1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
4 *
5 * Deterministic automata (DA) monitor functions, to be used together
6 * with automata models in C generated by the rvgen tool.
7 *
8 * The rvgen tool is available at tools/verification/rvgen/
9 *
10 * For further information, see:
11 * Documentation/trace/rv/monitor_synthesis.rst
12 */
13
14 #ifndef _RV_DA_MONITOR_H
15 #define _RV_DA_MONITOR_H
16
17 #include <rv/automata.h>
18 #include <linux/rv.h>
19 #include <linux/stringify.h>
20 #include <linux/bug.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/hashtable.h>
24
25 /*
26 * Per-cpu variables require a unique name although static in some
27 * configurations (e.g. CONFIG_DEBUG_FORCE_WEAK_PER_CPU or alpha modules).
28 */
29 #define DA_MON_NAME CONCATENATE(da_mon_, MONITOR_NAME)
30
31 static struct rv_monitor rv_this;
32
33 /*
34 * Hook to allow the implementation of hybrid automata: define it with a
35 * function that takes curr_state, event and next_state and returns true if the
36 * environment constraints (e.g. timing) are satisfied, false otherwise.
37 */
38 #ifndef da_monitor_event_hook
39 #define da_monitor_event_hook(...) true
40 #endif
41
42 /*
43 * Hook to allow the implementation of hybrid automata: define it with a
44 * function that takes the da_monitor and performs further initialisation
45 * (e.g. reset set up timers).
46 */
47 #ifndef da_monitor_init_hook
48 #define da_monitor_init_hook(da_mon)
49 #endif
50
51 /*
52 * Hook to allow the implementation of hybrid automata: define it with a
53 * function that takes the da_monitor and performs further reset (e.g. reset
54 * all clocks).
55 */
56 #ifndef da_monitor_reset_hook
57 #define da_monitor_reset_hook(da_mon)
58 #endif
59
60 /*
61 * Type for the target id, default to int but can be overridden.
62 * A long type can work as hash table key (PER_OBJ) but will be downgraded to
63 * int in the event tracepoint.
64 * Unused for implicit monitors.
65 */
66 #ifndef da_id_type
67 #define da_id_type int
68 #endif
69
react(enum states curr_state,enum events event)70 static void react(enum states curr_state, enum events event)
71 {
72 rv_react(&rv_this,
73 "rv: monitor %s does not allow event %s on state %s\n",
74 __stringify(MONITOR_NAME),
75 model_get_event_name(event),
76 model_get_state_name(curr_state));
77 }
78
79 /*
80 * da_monitor_reset - reset a monitor and setting it to init state
81 */
da_monitor_reset(struct da_monitor * da_mon)82 static inline void da_monitor_reset(struct da_monitor *da_mon)
83 {
84 da_monitor_reset_hook(da_mon);
85 da_mon->monitoring = 0;
86 da_mon->curr_state = model_get_initial_state();
87 }
88
89 /*
90 * da_monitor_start - start monitoring
91 *
92 * The monitor will ignore all events until monitoring is set to true. This
93 * function needs to be called to tell the monitor to start monitoring.
94 */
da_monitor_start(struct da_monitor * da_mon)95 static inline void da_monitor_start(struct da_monitor *da_mon)
96 {
97 da_mon->curr_state = model_get_initial_state();
98 da_mon->monitoring = 1;
99 da_monitor_init_hook(da_mon);
100 }
101
102 /*
103 * da_monitoring - returns true if the monitor is processing events
104 */
da_monitoring(struct da_monitor * da_mon)105 static inline bool da_monitoring(struct da_monitor *da_mon)
106 {
107 return da_mon->monitoring;
108 }
109
110 /*
111 * da_monitor_enabled - checks if the monitor is enabled
112 */
da_monitor_enabled(void)113 static inline bool da_monitor_enabled(void)
114 {
115 /* global switch */
116 if (unlikely(!rv_monitoring_on()))
117 return 0;
118
119 /* monitor enabled */
120 if (unlikely(!rv_this.enabled))
121 return 0;
122
123 return 1;
124 }
125
126 /*
127 * da_monitor_handling_event - checks if the monitor is ready to handle events
128 */
da_monitor_handling_event(struct da_monitor * da_mon)129 static inline bool da_monitor_handling_event(struct da_monitor *da_mon)
130 {
131 if (!da_monitor_enabled())
132 return 0;
133
134 /* monitor is actually monitoring */
135 if (unlikely(!da_monitoring(da_mon)))
136 return 0;
137
138 return 1;
139 }
140
141 #if RV_MON_TYPE == RV_MON_GLOBAL
142 /*
143 * Functions to define, init and get a global monitor.
144 */
145
146 /*
147 * global monitor (a single variable)
148 */
149 static struct da_monitor DA_MON_NAME;
150
151 /*
152 * da_get_monitor - return the global monitor address
153 */
da_get_monitor(void)154 static struct da_monitor *da_get_monitor(void)
155 {
156 return &DA_MON_NAME;
157 }
158
159 /*
160 * da_monitor_reset_all - reset the single monitor
161 */
da_monitor_reset_all(void)162 static void da_monitor_reset_all(void)
163 {
164 da_monitor_reset(da_get_monitor());
165 }
166
167 /*
168 * da_monitor_init - initialize a monitor
169 */
da_monitor_init(void)170 static inline int da_monitor_init(void)
171 {
172 da_monitor_reset_all();
173 return 0;
174 }
175
176 /*
177 * da_monitor_destroy - destroy the monitor
178 */
da_monitor_destroy(void)179 static inline void da_monitor_destroy(void)
180 {
181 da_monitor_reset_all();
182 }
183
184 #elif RV_MON_TYPE == RV_MON_PER_CPU
185 /*
186 * Functions to define, init and get a per-cpu monitor.
187 */
188
189 /*
190 * per-cpu monitor variables
191 */
192 static DEFINE_PER_CPU(struct da_monitor, DA_MON_NAME);
193
194 /*
195 * da_get_monitor - return current CPU monitor address
196 */
da_get_monitor(void)197 static struct da_monitor *da_get_monitor(void)
198 {
199 return this_cpu_ptr(&DA_MON_NAME);
200 }
201
202 /*
203 * da_monitor_reset_all - reset all CPUs' monitor
204 */
da_monitor_reset_all(void)205 static void da_monitor_reset_all(void)
206 {
207 struct da_monitor *da_mon;
208 int cpu;
209
210 for_each_cpu(cpu, cpu_online_mask) {
211 da_mon = per_cpu_ptr(&DA_MON_NAME, cpu);
212 da_monitor_reset(da_mon);
213 }
214 }
215
216 /*
217 * da_monitor_init - initialize all CPUs' monitor
218 */
da_monitor_init(void)219 static inline int da_monitor_init(void)
220 {
221 da_monitor_reset_all();
222 return 0;
223 }
224
225 /*
226 * da_monitor_destroy - destroy the monitor
227 */
da_monitor_destroy(void)228 static inline void da_monitor_destroy(void)
229 {
230 da_monitor_reset_all();
231 }
232
233 #elif RV_MON_TYPE == RV_MON_PER_TASK
234 /*
235 * Functions to define, init and get a per-task monitor.
236 */
237
238 /*
239 * The per-task monitor is stored a vector in the task struct. This variable
240 * stores the position on the vector reserved for this monitor.
241 */
242 static int task_mon_slot = RV_PER_TASK_MONITOR_INIT;
243
244 /*
245 * da_get_monitor - return the monitor in the allocated slot for tsk
246 */
da_get_monitor(struct task_struct * tsk)247 static inline struct da_monitor *da_get_monitor(struct task_struct *tsk)
248 {
249 return &tsk->rv[task_mon_slot].da_mon;
250 }
251
252 /*
253 * da_get_target - return the task associated to the monitor
254 */
da_get_target(struct da_monitor * da_mon)255 static inline struct task_struct *da_get_target(struct da_monitor *da_mon)
256 {
257 return container_of(da_mon, struct task_struct, rv[task_mon_slot].da_mon);
258 }
259
260 /*
261 * da_get_id - return the id associated to the monitor
262 *
263 * For per-task monitors, the id is the task's PID.
264 */
da_get_id(struct da_monitor * da_mon)265 static inline da_id_type da_get_id(struct da_monitor *da_mon)
266 {
267 return da_get_target(da_mon)->pid;
268 }
269
da_monitor_reset_all(void)270 static void da_monitor_reset_all(void)
271 {
272 struct task_struct *g, *p;
273 int cpu;
274
275 read_lock(&tasklist_lock);
276 for_each_process_thread(g, p)
277 da_monitor_reset(da_get_monitor(p));
278 for_each_present_cpu(cpu)
279 da_monitor_reset(da_get_monitor(idle_task(cpu)));
280 read_unlock(&tasklist_lock);
281 }
282
283 /*
284 * da_monitor_init - initialize the per-task monitor
285 *
286 * Try to allocate a slot in the task's vector of monitors. If there
287 * is an available slot, use it and reset all task's monitor.
288 */
da_monitor_init(void)289 static int da_monitor_init(void)
290 {
291 int slot;
292
293 slot = rv_get_task_monitor_slot();
294 if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT)
295 return slot;
296
297 task_mon_slot = slot;
298
299 da_monitor_reset_all();
300 return 0;
301 }
302
303 /*
304 * da_monitor_destroy - return the allocated slot
305 */
da_monitor_destroy(void)306 static inline void da_monitor_destroy(void)
307 {
308 if (task_mon_slot == RV_PER_TASK_MONITOR_INIT) {
309 WARN_ONCE(1, "Disabling a disabled monitor: " __stringify(MONITOR_NAME));
310 return;
311 }
312 rv_put_task_monitor_slot(task_mon_slot);
313 task_mon_slot = RV_PER_TASK_MONITOR_INIT;
314
315 da_monitor_reset_all();
316 }
317
318 #elif RV_MON_TYPE == RV_MON_PER_OBJ
319 /*
320 * Functions to define, init and get a per-object monitor.
321 */
322
323 struct da_monitor_storage {
324 da_id_type id;
325 monitor_target target;
326 union rv_task_monitor rv;
327 struct hlist_node node;
328 struct rcu_head rcu;
329 };
330
331 #ifndef DA_MONITOR_HT_BITS
332 #define DA_MONITOR_HT_BITS 10
333 #endif
334 static DEFINE_HASHTABLE(da_monitor_ht, DA_MONITOR_HT_BITS);
335
336 /*
337 * da_create_empty_storage - pre-allocate an empty storage
338 */
da_create_empty_storage(da_id_type id)339 static inline struct da_monitor_storage *da_create_empty_storage(da_id_type id)
340 {
341 struct da_monitor_storage *mon_storage;
342
343 mon_storage = kmalloc_nolock(sizeof(struct da_monitor_storage),
344 __GFP_ZERO, NUMA_NO_NODE);
345 if (!mon_storage)
346 return NULL;
347
348 hash_add_rcu(da_monitor_ht, &mon_storage->node, id);
349 mon_storage->id = id;
350 return mon_storage;
351 }
352
353 /*
354 * da_create_storage - create the per-object storage
355 *
356 * The caller is responsible to synchronise writers, either with locks or
357 * implicitly. For instance, if da_create_storage is only called from a single
358 * event for target (e.g. sched_switch), it's safe to call this without locks.
359 */
da_create_storage(da_id_type id,monitor_target target,struct da_monitor * da_mon)360 static inline struct da_monitor *da_create_storage(da_id_type id,
361 monitor_target target,
362 struct da_monitor *da_mon)
363 {
364 struct da_monitor_storage *mon_storage;
365
366 if (da_mon)
367 return da_mon;
368
369 mon_storage = da_create_empty_storage(id);
370 if (!mon_storage)
371 return NULL;
372
373 mon_storage->target = target;
374 return &mon_storage->rv.da_mon;
375 }
376
377 /*
378 * __da_get_mon_storage - get the monitor storage from the hash table
379 */
__da_get_mon_storage(da_id_type id)380 static inline struct da_monitor_storage *__da_get_mon_storage(da_id_type id)
381 {
382 struct da_monitor_storage *mon_storage;
383
384 lockdep_assert_in_rcu_read_lock();
385 hash_for_each_possible_rcu(da_monitor_ht, mon_storage, node, id) {
386 if (mon_storage->id == id)
387 return mon_storage;
388 }
389
390 return NULL;
391 }
392
393 /*
394 * da_get_monitor - return the monitor for target
395 */
da_get_monitor(da_id_type id,monitor_target target)396 static struct da_monitor *da_get_monitor(da_id_type id, monitor_target target)
397 {
398 struct da_monitor_storage *mon_storage;
399
400 mon_storage = __da_get_mon_storage(id);
401 return mon_storage ? &mon_storage->rv.da_mon : NULL;
402 }
403
404 /*
405 * da_get_target - return the object associated to the monitor
406 */
da_get_target(struct da_monitor * da_mon)407 static inline monitor_target da_get_target(struct da_monitor *da_mon)
408 {
409 return container_of(da_mon, struct da_monitor_storage, rv.da_mon)->target;
410 }
411
412 /*
413 * da_get_id - return the id associated to the monitor
414 */
da_get_id(struct da_monitor * da_mon)415 static inline da_id_type da_get_id(struct da_monitor *da_mon)
416 {
417 return container_of(da_mon, struct da_monitor_storage, rv.da_mon)->id;
418 }
419
420 /*
421 * da_create_or_get - create the per-object storage if not already there
422 *
423 * This needs a lookup so should be guarded by RCU, the condition is checked
424 * directly in da_create_storage()
425 */
da_create_or_get(da_id_type id,monitor_target target)426 static inline void da_create_or_get(da_id_type id, monitor_target target)
427 {
428 guard(rcu)();
429 da_create_storage(id, target, da_get_monitor(id, target));
430 }
431
432 /*
433 * da_fill_empty_storage - store the target in a pre-allocated storage
434 *
435 * Can be used as a substitute of da_create_storage when starting a monitor in
436 * an environment where allocation is unsafe.
437 */
da_fill_empty_storage(da_id_type id,monitor_target target,struct da_monitor * da_mon)438 static inline struct da_monitor *da_fill_empty_storage(da_id_type id,
439 monitor_target target,
440 struct da_monitor *da_mon)
441 {
442 if (unlikely(da_mon && !da_get_target(da_mon)))
443 container_of(da_mon, struct da_monitor_storage, rv.da_mon)->target = target;
444 return da_mon;
445 }
446
447 /*
448 * da_get_target_by_id - return the object associated to the id
449 */
da_get_target_by_id(da_id_type id)450 static inline monitor_target da_get_target_by_id(da_id_type id)
451 {
452 struct da_monitor_storage *mon_storage;
453
454 guard(rcu)();
455 mon_storage = __da_get_mon_storage(id);
456
457 if (unlikely(!mon_storage))
458 return NULL;
459 return mon_storage->target;
460 }
461
462 /*
463 * da_destroy_storage - destroy the per-object storage
464 *
465 * The caller is responsible to synchronise writers, either with locks or
466 * implicitly. For instance, if da_destroy_storage is called at sched_exit and
467 * da_create_storage can never occur after that, it's safe to call this without
468 * locks.
469 * This function includes an RCU read-side critical section to synchronise
470 * against da_monitor_destroy().
471 */
da_destroy_storage(da_id_type id)472 static inline void da_destroy_storage(da_id_type id)
473 {
474 struct da_monitor_storage *mon_storage;
475
476 guard(rcu)();
477 mon_storage = __da_get_mon_storage(id);
478
479 if (!mon_storage)
480 return;
481 da_monitor_reset_hook(&mon_storage->rv.da_mon);
482 hash_del_rcu(&mon_storage->node);
483 kfree_rcu(mon_storage, rcu);
484 }
485
da_monitor_reset_all(void)486 static void da_monitor_reset_all(void)
487 {
488 struct da_monitor_storage *mon_storage;
489 int bkt;
490
491 rcu_read_lock();
492 hash_for_each_rcu(da_monitor_ht, bkt, mon_storage, node)
493 da_monitor_reset(&mon_storage->rv.da_mon);
494 rcu_read_unlock();
495 }
496
da_monitor_init(void)497 static inline int da_monitor_init(void)
498 {
499 hash_init(da_monitor_ht);
500 return 0;
501 }
502
da_monitor_destroy(void)503 static inline void da_monitor_destroy(void)
504 {
505 struct da_monitor_storage *mon_storage;
506 struct hlist_node *tmp;
507 int bkt;
508
509 /*
510 * This function is called after all probes are disabled, we need only
511 * worry about concurrency against old events.
512 */
513 synchronize_rcu();
514 hash_for_each_safe(da_monitor_ht, bkt, tmp, mon_storage, node) {
515 da_monitor_reset_hook(&mon_storage->rv.da_mon);
516 hash_del_rcu(&mon_storage->node);
517 kfree(mon_storage);
518 }
519 }
520
521 /*
522 * Allow the per-object monitors to run allocation manually, necessary if the
523 * start condition is in a context problematic for allocation (e.g. scheduling).
524 * In such case, if the storage was pre-allocated without a target, set it now.
525 */
526 #ifdef DA_SKIP_AUTO_ALLOC
527 #define da_prepare_storage da_fill_empty_storage
528 #else
529 #define da_prepare_storage da_create_storage
530 #endif /* DA_SKIP_AUTO_ALLOC */
531
532 #endif /* RV_MON_TYPE */
533
534 #if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
535 /*
536 * Trace events for implicit monitors. Implicit monitor is the one which the
537 * handler does not need to specify which da_monitor to manipulate. Examples
538 * of implicit monitor are the per_cpu or the global ones.
539 */
540
da_trace_event(struct da_monitor * da_mon,char * curr_state,char * event,char * next_state,bool is_final,da_id_type id)541 static inline void da_trace_event(struct da_monitor *da_mon,
542 char *curr_state, char *event,
543 char *next_state, bool is_final,
544 da_id_type id)
545 {
546 CONCATENATE(trace_event_, MONITOR_NAME)(curr_state, event, next_state,
547 is_final);
548 }
549
da_trace_error(struct da_monitor * da_mon,char * curr_state,char * event,da_id_type id)550 static inline void da_trace_error(struct da_monitor *da_mon,
551 char *curr_state, char *event,
552 da_id_type id)
553 {
554 CONCATENATE(trace_error_, MONITOR_NAME)(curr_state, event);
555 }
556
557 /*
558 * da_get_id - unused for implicit monitors
559 */
da_get_id(struct da_monitor * da_mon)560 static inline da_id_type da_get_id(struct da_monitor *da_mon)
561 {
562 return 0;
563 }
564
565 #elif RV_MON_TYPE == RV_MON_PER_TASK || RV_MON_TYPE == RV_MON_PER_OBJ
566 /*
567 * Trace events for per_task/per_object monitors, report the target id.
568 */
569
da_trace_event(struct da_monitor * da_mon,char * curr_state,char * event,char * next_state,bool is_final,da_id_type id)570 static inline void da_trace_event(struct da_monitor *da_mon,
571 char *curr_state, char *event,
572 char *next_state, bool is_final,
573 da_id_type id)
574 {
575 CONCATENATE(trace_event_, MONITOR_NAME)(id, curr_state, event,
576 next_state, is_final);
577 }
578
da_trace_error(struct da_monitor * da_mon,char * curr_state,char * event,da_id_type id)579 static inline void da_trace_error(struct da_monitor *da_mon,
580 char *curr_state, char *event,
581 da_id_type id)
582 {
583 CONCATENATE(trace_error_, MONITOR_NAME)(id, curr_state, event);
584 }
585 #endif /* RV_MON_TYPE */
586
587 /*
588 * da_event - handle an event for the da_mon
589 *
590 * This function is valid for both implicit and id monitors.
591 * Retry in case there is a race between getting and setting the next state,
592 * warn and reset the monitor if it runs out of retries. The monitor should be
593 * able to handle various orders.
594 */
da_event(struct da_monitor * da_mon,enum events event,da_id_type id)595 static inline bool da_event(struct da_monitor *da_mon, enum events event, da_id_type id)
596 {
597 enum states curr_state, next_state;
598
599 curr_state = READ_ONCE(da_mon->curr_state);
600 for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
601 next_state = model_get_next_state(curr_state, event);
602 if (next_state == INVALID_STATE) {
603 react(curr_state, event);
604 da_trace_error(da_mon, model_get_state_name(curr_state),
605 model_get_event_name(event), id);
606 return false;
607 }
608 if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
609 if (!da_monitor_event_hook(da_mon, curr_state, event, next_state, id))
610 return false;
611 da_trace_event(da_mon, model_get_state_name(curr_state),
612 model_get_event_name(event),
613 model_get_state_name(next_state),
614 model_is_final_state(next_state), id);
615 return true;
616 }
617 }
618
619 trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
620 pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
621 " retries reached for event %s, resetting monitor %s",
622 model_get_event_name(event), __stringify(MONITOR_NAME));
623 return false;
624 }
625
__da_handle_event_common(struct da_monitor * da_mon,enum events event,da_id_type id)626 static inline void __da_handle_event_common(struct da_monitor *da_mon,
627 enum events event, da_id_type id)
628 {
629 if (!da_event(da_mon, event, id))
630 da_monitor_reset(da_mon);
631 }
632
__da_handle_event(struct da_monitor * da_mon,enum events event,da_id_type id)633 static inline void __da_handle_event(struct da_monitor *da_mon,
634 enum events event, da_id_type id)
635 {
636 if (da_monitor_handling_event(da_mon))
637 __da_handle_event_common(da_mon, event, id);
638 }
639
__da_handle_start_event(struct da_monitor * da_mon,enum events event,da_id_type id)640 static inline bool __da_handle_start_event(struct da_monitor *da_mon,
641 enum events event, da_id_type id)
642 {
643 if (!da_monitor_enabled())
644 return 0;
645 if (unlikely(!da_monitoring(da_mon))) {
646 da_monitor_start(da_mon);
647 return 0;
648 }
649
650 __da_handle_event_common(da_mon, event, id);
651
652 return 1;
653 }
654
__da_handle_start_run_event(struct da_monitor * da_mon,enum events event,da_id_type id)655 static inline bool __da_handle_start_run_event(struct da_monitor *da_mon,
656 enum events event, da_id_type id)
657 {
658 if (!da_monitor_enabled())
659 return 0;
660 if (unlikely(!da_monitoring(da_mon)))
661 da_monitor_start(da_mon);
662
663 __da_handle_event_common(da_mon, event, id);
664
665 return 1;
666 }
667
668 #if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
669 /*
670 * Handle event for implicit monitor: da_get_monitor() will figure out
671 * the monitor.
672 */
673
674 /*
675 * da_handle_event - handle an event
676 */
da_handle_event(enum events event)677 static inline void da_handle_event(enum events event)
678 {
679 __da_handle_event(da_get_monitor(), event, 0);
680 }
681
682 /*
683 * da_handle_start_event - start monitoring or handle event
684 *
685 * This function is used to notify the monitor that the system is returning
686 * to the initial state, so the monitor can start monitoring in the next event.
687 * Thus:
688 *
689 * If the monitor already started, handle the event.
690 * If the monitor did not start yet, start the monitor but skip the event.
691 */
da_handle_start_event(enum events event)692 static inline bool da_handle_start_event(enum events event)
693 {
694 return __da_handle_start_event(da_get_monitor(), event, 0);
695 }
696
697 /*
698 * da_handle_start_run_event - start monitoring and handle event
699 *
700 * This function is used to notify the monitor that the system is in the
701 * initial state, so the monitor can start monitoring and handling event.
702 */
da_handle_start_run_event(enum events event)703 static inline bool da_handle_start_run_event(enum events event)
704 {
705 return __da_handle_start_run_event(da_get_monitor(), event, 0);
706 }
707
708 #elif RV_MON_TYPE == RV_MON_PER_TASK
709 /*
710 * Handle event for per task.
711 */
712
713 /*
714 * da_handle_event - handle an event
715 */
da_handle_event(struct task_struct * tsk,enum events event)716 static inline void da_handle_event(struct task_struct *tsk, enum events event)
717 {
718 __da_handle_event(da_get_monitor(tsk), event, tsk->pid);
719 }
720
721 /*
722 * da_handle_start_event - start monitoring or handle event
723 *
724 * This function is used to notify the monitor that the system is returning
725 * to the initial state, so the monitor can start monitoring in the next event.
726 * Thus:
727 *
728 * If the monitor already started, handle the event.
729 * If the monitor did not start yet, start the monitor but skip the event.
730 */
da_handle_start_event(struct task_struct * tsk,enum events event)731 static inline bool da_handle_start_event(struct task_struct *tsk,
732 enum events event)
733 {
734 return __da_handle_start_event(da_get_monitor(tsk), event, tsk->pid);
735 }
736
737 /*
738 * da_handle_start_run_event - start monitoring and handle event
739 *
740 * This function is used to notify the monitor that the system is in the
741 * initial state, so the monitor can start monitoring and handling event.
742 */
da_handle_start_run_event(struct task_struct * tsk,enum events event)743 static inline bool da_handle_start_run_event(struct task_struct *tsk,
744 enum events event)
745 {
746 return __da_handle_start_run_event(da_get_monitor(tsk), event, tsk->pid);
747 }
748
749 #elif RV_MON_TYPE == RV_MON_PER_OBJ
750 /*
751 * Handle event for per object.
752 */
753
754 /*
755 * da_handle_event - handle an event
756 */
da_handle_event(da_id_type id,monitor_target target,enum events event)757 static inline void da_handle_event(da_id_type id, monitor_target target, enum events event)
758 {
759 struct da_monitor *da_mon;
760
761 guard(rcu)();
762 da_mon = da_get_monitor(id, target);
763 if (likely(da_mon))
764 __da_handle_event(da_mon, event, id);
765 }
766
767 /*
768 * da_handle_start_event - start monitoring or handle event
769 *
770 * This function is used to notify the monitor that the system is returning
771 * to the initial state, so the monitor can start monitoring in the next event.
772 * Thus:
773 *
774 * If the monitor already started, handle the event.
775 * If the monitor did not start yet, start the monitor but skip the event.
776 */
da_handle_start_event(da_id_type id,monitor_target target,enum events event)777 static inline bool da_handle_start_event(da_id_type id, monitor_target target,
778 enum events event)
779 {
780 struct da_monitor *da_mon;
781
782 guard(rcu)();
783 da_mon = da_get_monitor(id, target);
784 da_mon = da_prepare_storage(id, target, da_mon);
785 if (unlikely(!da_mon))
786 return 0;
787 return __da_handle_start_event(da_mon, event, id);
788 }
789
790 /*
791 * da_handle_start_run_event - start monitoring and handle event
792 *
793 * This function is used to notify the monitor that the system is in the
794 * initial state, so the monitor can start monitoring and handling event.
795 */
da_handle_start_run_event(da_id_type id,monitor_target target,enum events event)796 static inline bool da_handle_start_run_event(da_id_type id, monitor_target target,
797 enum events event)
798 {
799 struct da_monitor *da_mon;
800
801 guard(rcu)();
802 da_mon = da_get_monitor(id, target);
803 da_mon = da_prepare_storage(id, target, da_mon);
804 if (unlikely(!da_mon))
805 return 0;
806 return __da_handle_start_run_event(da_mon, event, id);
807 }
808
da_reset(da_id_type id,monitor_target target)809 static inline void da_reset(da_id_type id, monitor_target target)
810 {
811 struct da_monitor *da_mon;
812
813 guard(rcu)();
814 da_mon = da_get_monitor(id, target);
815 if (likely(da_mon))
816 da_monitor_reset(da_mon);
817 }
818 #endif /* RV_MON_TYPE */
819
820 #endif
821