1 // SPDX-License-Identifier: GPL-2.0
2
3 #define pr_fmt(fmt) "kcsan: " fmt
4
5 #include <linux/atomic.h>
6 #include <linux/bug.h>
7 #include <linux/delay.h>
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/moduleparam.h>
13 #include <linux/percpu.h>
14 #include <linux/preempt.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17 #include <linux/uaccess.h>
18
19 #include "atomic.h"
20 #include "encoding.h"
21 #include "kcsan.h"
22
23 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
24 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
25 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
26 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
27 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
28
29 #ifdef MODULE_PARAM_PREFIX
30 #undef MODULE_PARAM_PREFIX
31 #endif
32 #define MODULE_PARAM_PREFIX "kcsan."
33 module_param_named(early_enable, kcsan_early_enable, bool, 0);
34 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
35 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
36 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
37 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
38
39 bool kcsan_enabled;
40
41 /* Per-CPU kcsan_ctx for interrupts */
42 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
43 .disable_count = 0,
44 .atomic_next = 0,
45 .atomic_nest_count = 0,
46 .in_flat_atomic = false,
47 .access_mask = 0,
48 .scoped_accesses = {LIST_POISON1, NULL},
49 };
50
51 /*
52 * Helper macros to index into adjacent slots, starting from address slot
53 * itself, followed by the right and left slots.
54 *
55 * The purpose is 2-fold:
56 *
57 * 1. if during insertion the address slot is already occupied, check if
58 * any adjacent slots are free;
59 * 2. accesses that straddle a slot boundary due to size that exceeds a
60 * slot's range may check adjacent slots if any watchpoint matches.
61 *
62 * Note that accesses with very large size may still miss a watchpoint; however,
63 * given this should be rare, this is a reasonable trade-off to make, since this
64 * will avoid:
65 *
66 * 1. excessive contention between watchpoint checks and setup;
67 * 2. larger number of simultaneous watchpoints without sacrificing
68 * performance.
69 *
70 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
71 *
72 * slot=0: [ 1, 2, 0]
73 * slot=9: [10, 11, 9]
74 * slot=63: [64, 65, 63]
75 */
76 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
77
78 /*
79 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
80 * slot (middle) is fine if we assume that races occur rarely. The set of
81 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
82 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
83 */
84 #define SLOT_IDX_FAST(slot, i) (slot + i)
85
86 /*
87 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
88 * able to safely update and access a watchpoint without introducing locking
89 * overhead, we encode each watchpoint as a single atomic long. The initial
90 * zero-initialized state matches INVALID_WATCHPOINT.
91 *
92 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
93 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
94 */
95 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
96
97 /*
98 * Instructions to skip watching counter, used in should_watch(). We use a
99 * per-CPU counter to avoid excessive contention.
100 */
101 static DEFINE_PER_CPU(long, kcsan_skip);
102
103 /* For kcsan_prandom_u32_max(). */
104 static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
105
find_watchpoint(unsigned long addr,size_t size,bool expect_write,long * encoded_watchpoint)106 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
107 size_t size,
108 bool expect_write,
109 long *encoded_watchpoint)
110 {
111 const int slot = watchpoint_slot(addr);
112 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
113 atomic_long_t *watchpoint;
114 unsigned long wp_addr_masked;
115 size_t wp_size;
116 bool is_write;
117 int i;
118
119 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
120
121 for (i = 0; i < NUM_SLOTS; ++i) {
122 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
123 *encoded_watchpoint = atomic_long_read(watchpoint);
124 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
125 &wp_size, &is_write))
126 continue;
127
128 if (expect_write && !is_write)
129 continue;
130
131 /* Check if the watchpoint matches the access. */
132 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
133 return watchpoint;
134 }
135
136 return NULL;
137 }
138
139 static inline atomic_long_t *
insert_watchpoint(unsigned long addr,size_t size,bool is_write)140 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
141 {
142 const int slot = watchpoint_slot(addr);
143 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
144 atomic_long_t *watchpoint;
145 int i;
146
147 /* Check slot index logic, ensuring we stay within array bounds. */
148 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
149 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
150 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
151 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
152
153 for (i = 0; i < NUM_SLOTS; ++i) {
154 long expect_val = INVALID_WATCHPOINT;
155
156 /* Try to acquire this slot. */
157 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
158 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
159 return watchpoint;
160 }
161
162 return NULL;
163 }
164
165 /*
166 * Return true if watchpoint was successfully consumed, false otherwise.
167 *
168 * This may return false if:
169 *
170 * 1. another thread already consumed the watchpoint;
171 * 2. the thread that set up the watchpoint already removed it;
172 * 3. the watchpoint was removed and then re-used.
173 */
174 static __always_inline bool
try_consume_watchpoint(atomic_long_t * watchpoint,long encoded_watchpoint)175 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
176 {
177 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
178 }
179
180 /* Return true if watchpoint was not touched, false if already consumed. */
consume_watchpoint(atomic_long_t * watchpoint)181 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
182 {
183 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
184 }
185
186 /* Remove the watchpoint -- its slot may be reused after. */
remove_watchpoint(atomic_long_t * watchpoint)187 static inline void remove_watchpoint(atomic_long_t *watchpoint)
188 {
189 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
190 }
191
get_ctx(void)192 static __always_inline struct kcsan_ctx *get_ctx(void)
193 {
194 /*
195 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
196 * also result in calls that generate warnings in uaccess regions.
197 */
198 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
199 }
200
201 /* Check scoped accesses; never inline because this is a slow-path! */
kcsan_check_scoped_accesses(void)202 static noinline void kcsan_check_scoped_accesses(void)
203 {
204 struct kcsan_ctx *ctx = get_ctx();
205 struct list_head *prev_save = ctx->scoped_accesses.prev;
206 struct kcsan_scoped_access *scoped_access;
207
208 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
209 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
210 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
211 ctx->scoped_accesses.prev = prev_save;
212 }
213
214 /* Rules for generic atomic accesses. Called from fast-path. */
215 static __always_inline bool
is_atomic(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)216 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
217 {
218 if (type & KCSAN_ACCESS_ATOMIC)
219 return true;
220
221 /*
222 * Unless explicitly declared atomic, never consider an assertion access
223 * as atomic. This allows using them also in atomic regions, such as
224 * seqlocks, without implicitly changing their semantics.
225 */
226 if (type & KCSAN_ACCESS_ASSERT)
227 return false;
228
229 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
230 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
231 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
232 return true; /* Assume aligned writes up to word size are atomic. */
233
234 if (ctx->atomic_next > 0) {
235 /*
236 * Because we do not have separate contexts for nested
237 * interrupts, in case atomic_next is set, we simply assume that
238 * the outer interrupt set atomic_next. In the worst case, we
239 * will conservatively consider operations as atomic. This is a
240 * reasonable trade-off to make, since this case should be
241 * extremely rare; however, even if extremely rare, it could
242 * lead to false positives otherwise.
243 */
244 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
245 --ctx->atomic_next; /* in task, or outer interrupt */
246 return true;
247 }
248
249 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
250 }
251
252 static __always_inline bool
should_watch(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)253 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
254 {
255 /*
256 * Never set up watchpoints when memory operations are atomic.
257 *
258 * Need to check this first, before kcsan_skip check below: (1) atomics
259 * should not count towards skipped instructions, and (2) to actually
260 * decrement kcsan_atomic_next for consecutive instruction stream.
261 */
262 if (is_atomic(ptr, size, type, ctx))
263 return false;
264
265 if (this_cpu_dec_return(kcsan_skip) >= 0)
266 return false;
267
268 /*
269 * NOTE: If we get here, kcsan_skip must always be reset in slow path
270 * via reset_kcsan_skip() to avoid underflow.
271 */
272
273 /* this operation should be watched */
274 return true;
275 }
276
277 /*
278 * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
279 * for more details.
280 *
281 * The open-coded version here is using only safe primitives for all contexts
282 * where we can have KCSAN instrumentation. In particular, we cannot use
283 * prandom_u32() directly, as its tracepoint could cause recursion.
284 */
kcsan_prandom_u32_max(u32 ep_ro)285 static u32 kcsan_prandom_u32_max(u32 ep_ro)
286 {
287 struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
288 const u32 res = prandom_u32_state(state);
289
290 put_cpu_var(kcsan_rand_state);
291 return (u32)(((u64) res * ep_ro) >> 32);
292 }
293
reset_kcsan_skip(void)294 static inline void reset_kcsan_skip(void)
295 {
296 long skip_count = kcsan_skip_watch -
297 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
298 kcsan_prandom_u32_max(kcsan_skip_watch) :
299 0);
300 this_cpu_write(kcsan_skip, skip_count);
301 }
302
kcsan_is_enabled(void)303 static __always_inline bool kcsan_is_enabled(void)
304 {
305 return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
306 }
307
308 /* Introduce delay depending on context and configuration. */
delay_access(int type)309 static void delay_access(int type)
310 {
311 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
312 /* For certain access types, skew the random delay to be longer. */
313 unsigned int skew_delay_order =
314 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
315
316 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
317 kcsan_prandom_u32_max(delay >> skew_delay_order) :
318 0;
319 udelay(delay);
320 }
321
kcsan_save_irqtrace(struct task_struct * task)322 void kcsan_save_irqtrace(struct task_struct *task)
323 {
324 #ifdef CONFIG_TRACE_IRQFLAGS
325 task->kcsan_save_irqtrace = task->irqtrace;
326 #endif
327 }
328
kcsan_restore_irqtrace(struct task_struct * task)329 void kcsan_restore_irqtrace(struct task_struct *task)
330 {
331 #ifdef CONFIG_TRACE_IRQFLAGS
332 task->irqtrace = task->kcsan_save_irqtrace;
333 #endif
334 }
335
336 /*
337 * Pull everything together: check_access() below contains the performance
338 * critical operations; the fast-path (including check_access) functions should
339 * all be inlinable by the instrumentation functions.
340 *
341 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
342 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
343 * be filtered from the stacktrace, as well as give them unique names for the
344 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
345 * since they do not access any user memory, but instrumentation is still
346 * emitted in UACCESS regions.
347 */
348
kcsan_found_watchpoint(const volatile void * ptr,size_t size,int type,atomic_long_t * watchpoint,long encoded_watchpoint)349 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
350 size_t size,
351 int type,
352 atomic_long_t *watchpoint,
353 long encoded_watchpoint)
354 {
355 unsigned long flags;
356 bool consumed;
357
358 if (!kcsan_is_enabled())
359 return;
360
361 /*
362 * The access_mask check relies on value-change comparison. To avoid
363 * reporting a race where e.g. the writer set up the watchpoint, but the
364 * reader has access_mask!=0, we have to ignore the found watchpoint.
365 */
366 if (get_ctx()->access_mask != 0)
367 return;
368
369 /*
370 * Consume the watchpoint as soon as possible, to minimize the chances
371 * of !consumed. Consuming the watchpoint must always be guarded by
372 * kcsan_is_enabled() check, as otherwise we might erroneously
373 * triggering reports when disabled.
374 */
375 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
376
377 /* keep this after try_consume_watchpoint */
378 flags = user_access_save();
379
380 if (consumed) {
381 kcsan_save_irqtrace(current);
382 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
383 KCSAN_REPORT_CONSUMED_WATCHPOINT,
384 watchpoint - watchpoints);
385 kcsan_restore_irqtrace(current);
386 } else {
387 /*
388 * The other thread may not print any diagnostics, as it has
389 * already removed the watchpoint, or another thread consumed
390 * the watchpoint before this thread.
391 */
392 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
393 }
394
395 if ((type & KCSAN_ACCESS_ASSERT) != 0)
396 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
397 else
398 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
399
400 user_access_restore(flags);
401 }
402
403 static noinline void
kcsan_setup_watchpoint(const volatile void * ptr,size_t size,int type)404 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
405 {
406 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
407 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
408 atomic_long_t *watchpoint;
409 union {
410 u8 _1;
411 u16 _2;
412 u32 _4;
413 u64 _8;
414 } expect_value;
415 unsigned long access_mask;
416 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
417 unsigned long ua_flags = user_access_save();
418 unsigned long irq_flags = 0;
419
420 /*
421 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
422 * should_watch().
423 */
424 reset_kcsan_skip();
425
426 if (!kcsan_is_enabled())
427 goto out;
428
429 /*
430 * Special atomic rules: unlikely to be true, so we check them here in
431 * the slow-path, and not in the fast-path in is_atomic(). Call after
432 * kcsan_is_enabled(), as we may access memory that is not yet
433 * initialized during early boot.
434 */
435 if (!is_assert && kcsan_is_atomic_special(ptr))
436 goto out;
437
438 if (!check_encodable((unsigned long)ptr, size)) {
439 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
440 goto out;
441 }
442
443 /*
444 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
445 * runtime is entered for every memory access, and potentially useful
446 * information is lost if dirtied by KCSAN.
447 */
448 kcsan_save_irqtrace(current);
449 if (!kcsan_interrupt_watcher)
450 local_irq_save(irq_flags);
451
452 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
453 if (watchpoint == NULL) {
454 /*
455 * Out of capacity: the size of 'watchpoints', and the frequency
456 * with which should_watch() returns true should be tweaked so
457 * that this case happens very rarely.
458 */
459 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
460 goto out_unlock;
461 }
462
463 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
464 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
465
466 /*
467 * Read the current value, to later check and infer a race if the data
468 * was modified via a non-instrumented access, e.g. from a device.
469 */
470 expect_value._8 = 0;
471 switch (size) {
472 case 1:
473 expect_value._1 = READ_ONCE(*(const u8 *)ptr);
474 break;
475 case 2:
476 expect_value._2 = READ_ONCE(*(const u16 *)ptr);
477 break;
478 case 4:
479 expect_value._4 = READ_ONCE(*(const u32 *)ptr);
480 break;
481 case 8:
482 expect_value._8 = READ_ONCE(*(const u64 *)ptr);
483 break;
484 default:
485 break; /* ignore; we do not diff the values */
486 }
487
488 if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
489 kcsan_disable_current();
490 pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
491 is_write ? "write" : "read", size, ptr,
492 watchpoint_slot((unsigned long)ptr),
493 encode_watchpoint((unsigned long)ptr, size, is_write));
494 kcsan_enable_current();
495 }
496
497 /*
498 * Delay this thread, to increase probability of observing a racy
499 * conflicting access.
500 */
501 delay_access(type);
502
503 /*
504 * Re-read value, and check if it is as expected; if not, we infer a
505 * racy access.
506 */
507 access_mask = get_ctx()->access_mask;
508 switch (size) {
509 case 1:
510 expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
511 if (access_mask)
512 expect_value._1 &= (u8)access_mask;
513 break;
514 case 2:
515 expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
516 if (access_mask)
517 expect_value._2 &= (u16)access_mask;
518 break;
519 case 4:
520 expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
521 if (access_mask)
522 expect_value._4 &= (u32)access_mask;
523 break;
524 case 8:
525 expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
526 if (access_mask)
527 expect_value._8 &= (u64)access_mask;
528 break;
529 default:
530 break; /* ignore; we do not diff the values */
531 }
532
533 /* Were we able to observe a value-change? */
534 if (expect_value._8 != 0)
535 value_change = KCSAN_VALUE_CHANGE_TRUE;
536
537 /* Check if this access raced with another. */
538 if (!consume_watchpoint(watchpoint)) {
539 /*
540 * Depending on the access type, map a value_change of MAYBE to
541 * TRUE (always report) or FALSE (never report).
542 */
543 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
544 if (access_mask != 0) {
545 /*
546 * For access with access_mask, we require a
547 * value-change, as it is likely that races on
548 * ~access_mask bits are expected.
549 */
550 value_change = KCSAN_VALUE_CHANGE_FALSE;
551 } else if (size > 8 || is_assert) {
552 /* Always assume a value-change. */
553 value_change = KCSAN_VALUE_CHANGE_TRUE;
554 }
555 }
556
557 /*
558 * No need to increment 'data_races' counter, as the racing
559 * thread already did.
560 *
561 * Count 'assert_failures' for each failed ASSERT access,
562 * therefore both this thread and the racing thread may
563 * increment this counter.
564 */
565 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
566 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
567
568 kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
569 watchpoint - watchpoints);
570 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
571 /* Inferring a race, since the value should not have changed. */
572
573 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
574 if (is_assert)
575 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
576
577 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
578 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
579 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
580 watchpoint - watchpoints);
581 }
582
583 /*
584 * Remove watchpoint; must be after reporting, since the slot may be
585 * reused after this point.
586 */
587 remove_watchpoint(watchpoint);
588 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
589 out_unlock:
590 if (!kcsan_interrupt_watcher)
591 local_irq_restore(irq_flags);
592 kcsan_restore_irqtrace(current);
593 out:
594 user_access_restore(ua_flags);
595 }
596
check_access(const volatile void * ptr,size_t size,int type)597 static __always_inline void check_access(const volatile void *ptr, size_t size,
598 int type)
599 {
600 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
601 atomic_long_t *watchpoint;
602 long encoded_watchpoint;
603
604 /*
605 * Do nothing for 0 sized check; this comparison will be optimized out
606 * for constant sized instrumentation (__tsan_{read,write}N).
607 */
608 if (unlikely(size == 0))
609 return;
610
611 /*
612 * Avoid user_access_save in fast-path: find_watchpoint is safe without
613 * user_access_save, as the address that ptr points to is only used to
614 * check if a watchpoint exists; ptr is never dereferenced.
615 */
616 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
617 &encoded_watchpoint);
618 /*
619 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
620 * slow-path, as long as no state changes that cause a race to be
621 * detected and reported have occurred until kcsan_is_enabled() is
622 * checked.
623 */
624
625 if (unlikely(watchpoint != NULL))
626 kcsan_found_watchpoint(ptr, size, type, watchpoint,
627 encoded_watchpoint);
628 else {
629 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
630
631 if (unlikely(should_watch(ptr, size, type, ctx)))
632 kcsan_setup_watchpoint(ptr, size, type);
633 else if (unlikely(ctx->scoped_accesses.prev))
634 kcsan_check_scoped_accesses();
635 }
636 }
637
638 /* === Public interface ===================================================== */
639
kcsan_init(void)640 void __init kcsan_init(void)
641 {
642 BUG_ON(!in_task());
643
644 kcsan_debugfs_init();
645 prandom_seed_full_state(&kcsan_rand_state);
646
647 /*
648 * We are in the init task, and no other tasks should be running;
649 * WRITE_ONCE without memory barrier is sufficient.
650 */
651 if (kcsan_early_enable) {
652 pr_info("enabled early\n");
653 WRITE_ONCE(kcsan_enabled, true);
654 }
655 }
656
657 /* === Exported interface =================================================== */
658
kcsan_disable_current(void)659 void kcsan_disable_current(void)
660 {
661 ++get_ctx()->disable_count;
662 }
663 EXPORT_SYMBOL(kcsan_disable_current);
664
kcsan_enable_current(void)665 void kcsan_enable_current(void)
666 {
667 if (get_ctx()->disable_count-- == 0) {
668 /*
669 * Warn if kcsan_enable_current() calls are unbalanced with
670 * kcsan_disable_current() calls, which causes disable_count to
671 * become negative and should not happen.
672 */
673 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
674 kcsan_disable_current(); /* disable to generate warning */
675 WARN(1, "Unbalanced %s()", __func__);
676 kcsan_enable_current();
677 }
678 }
679 EXPORT_SYMBOL(kcsan_enable_current);
680
kcsan_enable_current_nowarn(void)681 void kcsan_enable_current_nowarn(void)
682 {
683 if (get_ctx()->disable_count-- == 0)
684 kcsan_disable_current();
685 }
686 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
687
kcsan_nestable_atomic_begin(void)688 void kcsan_nestable_atomic_begin(void)
689 {
690 /*
691 * Do *not* check and warn if we are in a flat atomic region: nestable
692 * and flat atomic regions are independent from each other.
693 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
694 * comments.
695 */
696
697 ++get_ctx()->atomic_nest_count;
698 }
699 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
700
kcsan_nestable_atomic_end(void)701 void kcsan_nestable_atomic_end(void)
702 {
703 if (get_ctx()->atomic_nest_count-- == 0) {
704 /*
705 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
706 * kcsan_nestable_atomic_begin() calls, which causes
707 * atomic_nest_count to become negative and should not happen.
708 */
709 kcsan_nestable_atomic_begin(); /* restore to 0 */
710 kcsan_disable_current(); /* disable to generate warning */
711 WARN(1, "Unbalanced %s()", __func__);
712 kcsan_enable_current();
713 }
714 }
715 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
716
kcsan_flat_atomic_begin(void)717 void kcsan_flat_atomic_begin(void)
718 {
719 get_ctx()->in_flat_atomic = true;
720 }
721 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
722
kcsan_flat_atomic_end(void)723 void kcsan_flat_atomic_end(void)
724 {
725 get_ctx()->in_flat_atomic = false;
726 }
727 EXPORT_SYMBOL(kcsan_flat_atomic_end);
728
kcsan_atomic_next(int n)729 void kcsan_atomic_next(int n)
730 {
731 get_ctx()->atomic_next = n;
732 }
733 EXPORT_SYMBOL(kcsan_atomic_next);
734
kcsan_set_access_mask(unsigned long mask)735 void kcsan_set_access_mask(unsigned long mask)
736 {
737 get_ctx()->access_mask = mask;
738 }
739 EXPORT_SYMBOL(kcsan_set_access_mask);
740
741 struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void * ptr,size_t size,int type,struct kcsan_scoped_access * sa)742 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
743 struct kcsan_scoped_access *sa)
744 {
745 struct kcsan_ctx *ctx = get_ctx();
746
747 __kcsan_check_access(ptr, size, type);
748
749 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
750
751 INIT_LIST_HEAD(&sa->list);
752 sa->ptr = ptr;
753 sa->size = size;
754 sa->type = type;
755
756 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
757 INIT_LIST_HEAD(&ctx->scoped_accesses);
758 list_add(&sa->list, &ctx->scoped_accesses);
759
760 ctx->disable_count--;
761 return sa;
762 }
763 EXPORT_SYMBOL(kcsan_begin_scoped_access);
764
kcsan_end_scoped_access(struct kcsan_scoped_access * sa)765 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
766 {
767 struct kcsan_ctx *ctx = get_ctx();
768
769 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
770 return;
771
772 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
773
774 list_del(&sa->list);
775 if (list_empty(&ctx->scoped_accesses))
776 /*
777 * Ensure we do not enter kcsan_check_scoped_accesses()
778 * slow-path if unnecessary, and avoids requiring list_empty()
779 * in the fast-path (to avoid a READ_ONCE() and potential
780 * uaccess warning).
781 */
782 ctx->scoped_accesses.prev = NULL;
783
784 ctx->disable_count--;
785
786 __kcsan_check_access(sa->ptr, sa->size, sa->type);
787 }
788 EXPORT_SYMBOL(kcsan_end_scoped_access);
789
__kcsan_check_access(const volatile void * ptr,size_t size,int type)790 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
791 {
792 check_access(ptr, size, type);
793 }
794 EXPORT_SYMBOL(__kcsan_check_access);
795
796 /*
797 * KCSAN uses the same instrumentation that is emitted by supported compilers
798 * for ThreadSanitizer (TSAN).
799 *
800 * When enabled, the compiler emits instrumentation calls (the functions
801 * prefixed with "__tsan" below) for all loads and stores that it generated;
802 * inline asm is not instrumented.
803 *
804 * Note that, not all supported compiler versions distinguish aligned/unaligned
805 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
806 * version to the generic version, which can handle both.
807 */
808
809 #define DEFINE_TSAN_READ_WRITE(size) \
810 void __tsan_read##size(void *ptr); \
811 void __tsan_read##size(void *ptr) \
812 { \
813 check_access(ptr, size, 0); \
814 } \
815 EXPORT_SYMBOL(__tsan_read##size); \
816 void __tsan_unaligned_read##size(void *ptr) \
817 __alias(__tsan_read##size); \
818 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
819 void __tsan_write##size(void *ptr); \
820 void __tsan_write##size(void *ptr) \
821 { \
822 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
823 } \
824 EXPORT_SYMBOL(__tsan_write##size); \
825 void __tsan_unaligned_write##size(void *ptr) \
826 __alias(__tsan_write##size); \
827 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
828 void __tsan_read_write##size(void *ptr); \
829 void __tsan_read_write##size(void *ptr) \
830 { \
831 check_access(ptr, size, \
832 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
833 } \
834 EXPORT_SYMBOL(__tsan_read_write##size); \
835 void __tsan_unaligned_read_write##size(void *ptr) \
836 __alias(__tsan_read_write##size); \
837 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
838
839 DEFINE_TSAN_READ_WRITE(1);
840 DEFINE_TSAN_READ_WRITE(2);
841 DEFINE_TSAN_READ_WRITE(4);
842 DEFINE_TSAN_READ_WRITE(8);
843 DEFINE_TSAN_READ_WRITE(16);
844
845 void __tsan_read_range(void *ptr, size_t size);
__tsan_read_range(void * ptr,size_t size)846 void __tsan_read_range(void *ptr, size_t size)
847 {
848 check_access(ptr, size, 0);
849 }
850 EXPORT_SYMBOL(__tsan_read_range);
851
852 void __tsan_write_range(void *ptr, size_t size);
__tsan_write_range(void * ptr,size_t size)853 void __tsan_write_range(void *ptr, size_t size)
854 {
855 check_access(ptr, size, KCSAN_ACCESS_WRITE);
856 }
857 EXPORT_SYMBOL(__tsan_write_range);
858
859 /*
860 * Use of explicit volatile is generally disallowed [1], however, volatile is
861 * still used in various concurrent context, whether in low-level
862 * synchronization primitives or for legacy reasons.
863 * [1] https://lwn.net/Articles/233479/
864 *
865 * We only consider volatile accesses atomic if they are aligned and would pass
866 * the size-check of compiletime_assert_rwonce_type().
867 */
868 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
869 void __tsan_volatile_read##size(void *ptr); \
870 void __tsan_volatile_read##size(void *ptr) \
871 { \
872 const bool is_atomic = size <= sizeof(long long) && \
873 IS_ALIGNED((unsigned long)ptr, size); \
874 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
875 return; \
876 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
877 } \
878 EXPORT_SYMBOL(__tsan_volatile_read##size); \
879 void __tsan_unaligned_volatile_read##size(void *ptr) \
880 __alias(__tsan_volatile_read##size); \
881 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
882 void __tsan_volatile_write##size(void *ptr); \
883 void __tsan_volatile_write##size(void *ptr) \
884 { \
885 const bool is_atomic = size <= sizeof(long long) && \
886 IS_ALIGNED((unsigned long)ptr, size); \
887 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
888 return; \
889 check_access(ptr, size, \
890 KCSAN_ACCESS_WRITE | \
891 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
892 } \
893 EXPORT_SYMBOL(__tsan_volatile_write##size); \
894 void __tsan_unaligned_volatile_write##size(void *ptr) \
895 __alias(__tsan_volatile_write##size); \
896 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
897
898 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
899 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
900 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
901 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
902 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
903
904 /*
905 * The below are not required by KCSAN, but can still be emitted by the
906 * compiler.
907 */
908 void __tsan_func_entry(void *call_pc);
__tsan_func_entry(void * call_pc)909 void __tsan_func_entry(void *call_pc)
910 {
911 }
912 EXPORT_SYMBOL(__tsan_func_entry);
913 void __tsan_func_exit(void);
__tsan_func_exit(void)914 void __tsan_func_exit(void)
915 {
916 }
917 EXPORT_SYMBOL(__tsan_func_exit);
918 void __tsan_init(void);
__tsan_init(void)919 void __tsan_init(void)
920 {
921 }
922 EXPORT_SYMBOL(__tsan_init);
923
924 /*
925 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
926 *
927 * Normal kernel code _should not_ be using them directly, but some
928 * architectures may implement some or all atomics using the compilers'
929 * builtins.
930 *
931 * Note: If an architecture decides to fully implement atomics using the
932 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
933 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
934 * atomic-instrumented) is no longer necessary.
935 *
936 * TSAN instrumentation replaces atomic accesses with calls to any of the below
937 * functions, whose job is to also execute the operation itself.
938 */
939
940 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
941 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
942 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
943 { \
944 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
945 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
946 } \
947 return __atomic_load_n(ptr, memorder); \
948 } \
949 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
950 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
951 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
952 { \
953 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
954 check_access(ptr, bits / BITS_PER_BYTE, \
955 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
956 } \
957 __atomic_store_n(ptr, v, memorder); \
958 } \
959 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
960
961 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
962 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
963 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
964 { \
965 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
966 check_access(ptr, bits / BITS_PER_BYTE, \
967 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
968 KCSAN_ACCESS_ATOMIC); \
969 } \
970 return __atomic_##op##suffix(ptr, v, memorder); \
971 } \
972 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
973
974 /*
975 * Note: CAS operations are always classified as write, even in case they
976 * fail. We cannot perform check_access() after a write, as it might lead to
977 * false positives, in cases such as:
978 *
979 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
980 *
981 * T1: if (__atomic_load_n(&p->flag, ...)) {
982 * modify *p;
983 * p->flag = 0;
984 * }
985 *
986 * The only downside is that, if there are 3 threads, with one CAS that
987 * succeeds, another CAS that fails, and an unmarked racing operation, we may
988 * point at the wrong CAS as the source of the race. However, if we assume that
989 * all CAS can succeed in some other execution, the data race is still valid.
990 */
991 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
992 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
993 u##bits val, int mo, int fail_mo); \
994 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
995 u##bits val, int mo, int fail_mo) \
996 { \
997 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
998 check_access(ptr, bits / BITS_PER_BYTE, \
999 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1000 KCSAN_ACCESS_ATOMIC); \
1001 } \
1002 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1003 } \
1004 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1005
1006 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1007 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1008 int mo, int fail_mo); \
1009 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1010 int mo, int fail_mo) \
1011 { \
1012 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1013 check_access(ptr, bits / BITS_PER_BYTE, \
1014 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1015 KCSAN_ACCESS_ATOMIC); \
1016 } \
1017 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1018 return exp; \
1019 } \
1020 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1021
1022 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1023 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1024 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1025 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1026 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1027 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1028 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1029 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1030 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1031 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1032 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1033 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1034
1035 DEFINE_TSAN_ATOMIC_OPS(8);
1036 DEFINE_TSAN_ATOMIC_OPS(16);
1037 DEFINE_TSAN_ATOMIC_OPS(32);
1038 DEFINE_TSAN_ATOMIC_OPS(64);
1039
1040 void __tsan_atomic_thread_fence(int memorder);
__tsan_atomic_thread_fence(int memorder)1041 void __tsan_atomic_thread_fence(int memorder)
1042 {
1043 __atomic_thread_fence(memorder);
1044 }
1045 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1046
1047 void __tsan_atomic_signal_fence(int memorder);
__tsan_atomic_signal_fence(int memorder)1048 void __tsan_atomic_signal_fence(int memorder) { }
1049 EXPORT_SYMBOL(__tsan_atomic_signal_fence);
1050