1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Module-based torture test facility for locking
4 *
5 * Copyright (C) IBM Corporation, 2014
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/torture.h>
31 #include <linux/reboot.h>
32
33 MODULE_DESCRIPTION("torture test facility for locking");
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
36
37 torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
38 torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
39 torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
40 torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
41 torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
42 torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
43 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
44 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
45 torture_param(int, rt_boost, 2,
46 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
47 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
48 torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
49 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
50 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
51 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
52 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
53 torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
54 /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
55 #define MAX_NESTED_LOCKS 8
56
57 static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
58 module_param(torture_type, charp, 0444);
59 MODULE_PARM_DESC(torture_type,
60 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
61
62 static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
63 static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.
64
65 // Parse a cpumask kernel parameter. If there are more users later on,
66 // this might need to got to a more central location.
param_set_cpumask(const char * val,const struct kernel_param * kp)67 static int param_set_cpumask(const char *val, const struct kernel_param *kp)
68 {
69 cpumask_var_t *cm_bind = kp->arg;
70 int ret;
71 char *s;
72
73 if (!alloc_cpumask_var(cm_bind, GFP_KERNEL)) {
74 s = "Out of memory";
75 ret = -ENOMEM;
76 goto out_err;
77 }
78 ret = cpulist_parse(val, *cm_bind);
79 if (!ret)
80 return ret;
81 s = "Bad CPU range";
82 out_err:
83 pr_warn("%s: %s, all CPUs set\n", kp->name, s);
84 cpumask_setall(*cm_bind);
85 return ret;
86 }
87
88 // Output a cpumask kernel parameter.
param_get_cpumask(char * buffer,const struct kernel_param * kp)89 static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
90 {
91 cpumask_var_t *cm_bind = kp->arg;
92
93 return sprintf(buffer, "%*pbl", cpumask_pr_args(*cm_bind));
94 }
95
cpumask_nonempty(cpumask_var_t mask)96 static bool cpumask_nonempty(cpumask_var_t mask)
97 {
98 return cpumask_available(mask) && !cpumask_empty(mask);
99 }
100
101 static const struct kernel_param_ops lt_bind_ops = {
102 .set = param_set_cpumask,
103 .get = param_get_cpumask,
104 };
105
106 module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644);
107 module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644);
108
109 long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn);
110
111 static struct task_struct *stats_task;
112 static struct task_struct **writer_tasks;
113 static struct task_struct **reader_tasks;
114
115 static bool lock_is_write_held;
116 static atomic_t lock_is_read_held;
117 static unsigned long last_lock_release;
118
119 struct lock_stress_stats {
120 long n_lock_fail;
121 long n_lock_acquired;
122 };
123
124 struct call_rcu_chain {
125 struct rcu_head crc_rh;
126 bool crc_stop;
127 };
128 struct call_rcu_chain *call_rcu_chain_list;
129
130 /* Forward reference. */
131 static void lock_torture_cleanup(void);
132
133 /*
134 * Operations vector for selecting different types of tests.
135 */
136 struct lock_torture_ops {
137 void (*init)(void);
138 void (*exit)(void);
139 int (*nested_lock)(int tid, u32 lockset);
140 int (*writelock)(int tid);
141 void (*write_delay)(struct torture_random_state *trsp);
142 void (*task_boost)(struct torture_random_state *trsp);
143 void (*writeunlock)(int tid);
144 void (*nested_unlock)(int tid, u32 lockset);
145 int (*readlock)(int tid);
146 void (*read_delay)(struct torture_random_state *trsp);
147 void (*readunlock)(int tid);
148
149 unsigned long flags; /* for irq spinlocks */
150 const char *name;
151 };
152
153 struct lock_torture_cxt {
154 int nrealwriters_stress;
155 int nrealreaders_stress;
156 bool debug_lock;
157 bool init_called;
158 atomic_t n_lock_torture_errors;
159 struct lock_torture_ops *cur_ops;
160 struct lock_stress_stats *lwsa; /* writer statistics */
161 struct lock_stress_stats *lrsa; /* reader statistics */
162 };
163 static struct lock_torture_cxt cxt = { 0, 0, false, false,
164 ATOMIC_INIT(0),
165 NULL, NULL};
166 /*
167 * Definitions for lock torture testing.
168 */
169
torture_lock_busted_write_lock(int tid __maybe_unused)170 static int torture_lock_busted_write_lock(int tid __maybe_unused)
171 {
172 return 0; /* BUGGY, do not use in real life!!! */
173 }
174
torture_lock_busted_write_delay(struct torture_random_state * trsp)175 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
176 {
177 /* We want a long delay occasionally to force massive contention. */
178 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
179 mdelay(long_hold);
180 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
181 torture_preempt_schedule(); /* Allow test to be preempted. */
182 }
183
torture_lock_busted_write_unlock(int tid __maybe_unused)184 static void torture_lock_busted_write_unlock(int tid __maybe_unused)
185 {
186 /* BUGGY, do not use in real life!!! */
187 }
188
__torture_rt_boost(struct torture_random_state * trsp)189 static void __torture_rt_boost(struct torture_random_state *trsp)
190 {
191 const unsigned int factor = rt_boost_factor;
192
193 if (!rt_task(current)) {
194 /*
195 * Boost priority once every rt_boost_factor operations. When
196 * the task tries to take the lock, the rtmutex it will account
197 * for the new priority, and do any corresponding pi-dance.
198 */
199 if (trsp && !(torture_random(trsp) %
200 (cxt.nrealwriters_stress * factor))) {
201 sched_set_fifo(current);
202 } else /* common case, do nothing */
203 return;
204 } else {
205 /*
206 * The task will remain boosted for another 10 * rt_boost_factor
207 * operations, then restored back to its original prio, and so
208 * forth.
209 *
210 * When @trsp is nil, we want to force-reset the task for
211 * stopping the kthread.
212 */
213 if (!trsp || !(torture_random(trsp) %
214 (cxt.nrealwriters_stress * factor * 2))) {
215 sched_set_normal(current, 0);
216 } else /* common case, do nothing */
217 return;
218 }
219 }
220
torture_rt_boost(struct torture_random_state * trsp)221 static void torture_rt_boost(struct torture_random_state *trsp)
222 {
223 if (rt_boost != 2)
224 return;
225
226 __torture_rt_boost(trsp);
227 }
228
229 static struct lock_torture_ops lock_busted_ops = {
230 .writelock = torture_lock_busted_write_lock,
231 .write_delay = torture_lock_busted_write_delay,
232 .task_boost = torture_rt_boost,
233 .writeunlock = torture_lock_busted_write_unlock,
234 .readlock = NULL,
235 .read_delay = NULL,
236 .readunlock = NULL,
237 .name = "lock_busted"
238 };
239
240 static DEFINE_SPINLOCK(torture_spinlock);
241
torture_spin_lock_write_lock(int tid __maybe_unused)242 static int torture_spin_lock_write_lock(int tid __maybe_unused)
243 __acquires(torture_spinlock)
244 {
245 spin_lock(&torture_spinlock);
246 return 0;
247 }
248
torture_spin_lock_write_delay(struct torture_random_state * trsp)249 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
250 {
251 const unsigned long shortdelay_us = 2;
252 unsigned long j;
253
254 /* We want a short delay mostly to emulate likely code, and
255 * we want a long delay occasionally to force massive contention.
256 */
257 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) {
258 j = jiffies;
259 mdelay(long_hold);
260 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
261 }
262 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
263 udelay(shortdelay_us);
264 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
265 torture_preempt_schedule(); /* Allow test to be preempted. */
266 }
267
torture_spin_lock_write_unlock(int tid __maybe_unused)268 static void torture_spin_lock_write_unlock(int tid __maybe_unused)
269 __releases(torture_spinlock)
270 {
271 spin_unlock(&torture_spinlock);
272 }
273
274 static struct lock_torture_ops spin_lock_ops = {
275 .writelock = torture_spin_lock_write_lock,
276 .write_delay = torture_spin_lock_write_delay,
277 .task_boost = torture_rt_boost,
278 .writeunlock = torture_spin_lock_write_unlock,
279 .readlock = NULL,
280 .read_delay = NULL,
281 .readunlock = NULL,
282 .name = "spin_lock"
283 };
284
torture_spin_lock_write_lock_irq(int tid __maybe_unused)285 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
286 __acquires(torture_spinlock)
287 {
288 unsigned long flags;
289
290 spin_lock_irqsave(&torture_spinlock, flags);
291 cxt.cur_ops->flags = flags;
292 return 0;
293 }
294
torture_lock_spin_write_unlock_irq(int tid __maybe_unused)295 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
296 __releases(torture_spinlock)
297 {
298 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
299 }
300
301 static struct lock_torture_ops spin_lock_irq_ops = {
302 .writelock = torture_spin_lock_write_lock_irq,
303 .write_delay = torture_spin_lock_write_delay,
304 .task_boost = torture_rt_boost,
305 .writeunlock = torture_lock_spin_write_unlock_irq,
306 .readlock = NULL,
307 .read_delay = NULL,
308 .readunlock = NULL,
309 .name = "spin_lock_irq"
310 };
311
312 static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
313
torture_raw_spin_lock_write_lock(int tid __maybe_unused)314 static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
315 __acquires(torture_raw_spinlock)
316 {
317 raw_spin_lock(&torture_raw_spinlock);
318 return 0;
319 }
320
torture_raw_spin_lock_write_unlock(int tid __maybe_unused)321 static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
322 __releases(torture_raw_spinlock)
323 {
324 raw_spin_unlock(&torture_raw_spinlock);
325 }
326
327 static struct lock_torture_ops raw_spin_lock_ops = {
328 .writelock = torture_raw_spin_lock_write_lock,
329 .write_delay = torture_spin_lock_write_delay,
330 .task_boost = torture_rt_boost,
331 .writeunlock = torture_raw_spin_lock_write_unlock,
332 .readlock = NULL,
333 .read_delay = NULL,
334 .readunlock = NULL,
335 .name = "raw_spin_lock"
336 };
337
torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)338 static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
339 __acquires(torture_raw_spinlock)
340 {
341 unsigned long flags;
342
343 raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
344 cxt.cur_ops->flags = flags;
345 return 0;
346 }
347
torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)348 static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
349 __releases(torture_raw_spinlock)
350 {
351 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
352 }
353
354 static struct lock_torture_ops raw_spin_lock_irq_ops = {
355 .writelock = torture_raw_spin_lock_write_lock_irq,
356 .write_delay = torture_spin_lock_write_delay,
357 .task_boost = torture_rt_boost,
358 .writeunlock = torture_raw_spin_lock_write_unlock_irq,
359 .readlock = NULL,
360 .read_delay = NULL,
361 .readunlock = NULL,
362 .name = "raw_spin_lock_irq"
363 };
364
365 #ifdef CONFIG_BPF_SYSCALL
366
367 #include <asm/rqspinlock.h>
368 static rqspinlock_t rqspinlock;
369
torture_raw_res_spin_write_lock(int tid __maybe_unused)370 static int torture_raw_res_spin_write_lock(int tid __maybe_unused)
371 {
372 raw_res_spin_lock(&rqspinlock);
373 return 0;
374 }
375
torture_raw_res_spin_write_unlock(int tid __maybe_unused)376 static void torture_raw_res_spin_write_unlock(int tid __maybe_unused)
377 {
378 raw_res_spin_unlock(&rqspinlock);
379 }
380
381 static struct lock_torture_ops raw_res_spin_lock_ops = {
382 .writelock = torture_raw_res_spin_write_lock,
383 .write_delay = torture_spin_lock_write_delay,
384 .task_boost = torture_rt_boost,
385 .writeunlock = torture_raw_res_spin_write_unlock,
386 .readlock = NULL,
387 .read_delay = NULL,
388 .readunlock = NULL,
389 .name = "raw_res_spin_lock"
390 };
391
torture_raw_res_spin_write_lock_irq(int tid __maybe_unused)392 static int torture_raw_res_spin_write_lock_irq(int tid __maybe_unused)
393 {
394 unsigned long flags;
395
396 raw_res_spin_lock_irqsave(&rqspinlock, flags);
397 cxt.cur_ops->flags = flags;
398 return 0;
399 }
400
torture_raw_res_spin_write_unlock_irq(int tid __maybe_unused)401 static void torture_raw_res_spin_write_unlock_irq(int tid __maybe_unused)
402 {
403 raw_res_spin_unlock_irqrestore(&rqspinlock, cxt.cur_ops->flags);
404 }
405
406 static struct lock_torture_ops raw_res_spin_lock_irq_ops = {
407 .writelock = torture_raw_res_spin_write_lock_irq,
408 .write_delay = torture_spin_lock_write_delay,
409 .task_boost = torture_rt_boost,
410 .writeunlock = torture_raw_res_spin_write_unlock_irq,
411 .readlock = NULL,
412 .read_delay = NULL,
413 .readunlock = NULL,
414 .name = "raw_res_spin_lock_irq"
415 };
416
417 #endif
418
419 static DEFINE_RWLOCK(torture_rwlock);
420
torture_rwlock_write_lock(int tid __maybe_unused)421 static int torture_rwlock_write_lock(int tid __maybe_unused)
422 __acquires(torture_rwlock)
423 {
424 write_lock(&torture_rwlock);
425 return 0;
426 }
427
torture_rwlock_write_delay(struct torture_random_state * trsp)428 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
429 {
430 const unsigned long shortdelay_us = 2;
431
432 /* We want a short delay mostly to emulate likely code, and
433 * we want a long delay occasionally to force massive contention.
434 */
435 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
436 mdelay(long_hold);
437 else
438 udelay(shortdelay_us);
439 }
440
torture_rwlock_write_unlock(int tid __maybe_unused)441 static void torture_rwlock_write_unlock(int tid __maybe_unused)
442 __releases(torture_rwlock)
443 {
444 write_unlock(&torture_rwlock);
445 }
446
torture_rwlock_read_lock(int tid __maybe_unused)447 static int torture_rwlock_read_lock(int tid __maybe_unused)
448 __acquires(torture_rwlock)
449 {
450 read_lock(&torture_rwlock);
451 return 0;
452 }
453
torture_rwlock_read_delay(struct torture_random_state * trsp)454 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
455 {
456 const unsigned long shortdelay_us = 10;
457
458 /* We want a short delay mostly to emulate likely code, and
459 * we want a long delay occasionally to force massive contention.
460 */
461 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
462 mdelay(long_hold);
463 else
464 udelay(shortdelay_us);
465 }
466
torture_rwlock_read_unlock(int tid __maybe_unused)467 static void torture_rwlock_read_unlock(int tid __maybe_unused)
468 __releases(torture_rwlock)
469 {
470 read_unlock(&torture_rwlock);
471 }
472
473 static struct lock_torture_ops rw_lock_ops = {
474 .writelock = torture_rwlock_write_lock,
475 .write_delay = torture_rwlock_write_delay,
476 .task_boost = torture_rt_boost,
477 .writeunlock = torture_rwlock_write_unlock,
478 .readlock = torture_rwlock_read_lock,
479 .read_delay = torture_rwlock_read_delay,
480 .readunlock = torture_rwlock_read_unlock,
481 .name = "rw_lock"
482 };
483
torture_rwlock_write_lock_irq(int tid __maybe_unused)484 static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
485 __acquires(torture_rwlock)
486 {
487 unsigned long flags;
488
489 write_lock_irqsave(&torture_rwlock, flags);
490 cxt.cur_ops->flags = flags;
491 return 0;
492 }
493
torture_rwlock_write_unlock_irq(int tid __maybe_unused)494 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
495 __releases(torture_rwlock)
496 {
497 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
498 }
499
torture_rwlock_read_lock_irq(int tid __maybe_unused)500 static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
501 __acquires(torture_rwlock)
502 {
503 unsigned long flags;
504
505 read_lock_irqsave(&torture_rwlock, flags);
506 cxt.cur_ops->flags = flags;
507 return 0;
508 }
509
torture_rwlock_read_unlock_irq(int tid __maybe_unused)510 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
511 __releases(torture_rwlock)
512 {
513 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
514 }
515
516 static struct lock_torture_ops rw_lock_irq_ops = {
517 .writelock = torture_rwlock_write_lock_irq,
518 .write_delay = torture_rwlock_write_delay,
519 .task_boost = torture_rt_boost,
520 .writeunlock = torture_rwlock_write_unlock_irq,
521 .readlock = torture_rwlock_read_lock_irq,
522 .read_delay = torture_rwlock_read_delay,
523 .readunlock = torture_rwlock_read_unlock_irq,
524 .name = "rw_lock_irq"
525 };
526
527 static DEFINE_MUTEX(torture_mutex);
528 static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
529 static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
530
torture_mutex_init(void)531 static void torture_mutex_init(void)
532 {
533 int i;
534
535 for (i = 0; i < MAX_NESTED_LOCKS; i++)
536 __mutex_init(&torture_nested_mutexes[i], __func__,
537 &nested_mutex_keys[i]);
538 }
539
torture_mutex_nested_lock(int tid __maybe_unused,u32 lockset)540 static int torture_mutex_nested_lock(int tid __maybe_unused,
541 u32 lockset)
542 {
543 int i;
544
545 for (i = 0; i < nested_locks; i++)
546 if (lockset & (1 << i))
547 mutex_lock(&torture_nested_mutexes[i]);
548 return 0;
549 }
550
torture_mutex_lock(int tid __maybe_unused)551 static int torture_mutex_lock(int tid __maybe_unused)
552 __acquires(torture_mutex)
553 {
554 mutex_lock(&torture_mutex);
555 return 0;
556 }
557
torture_mutex_delay(struct torture_random_state * trsp)558 static void torture_mutex_delay(struct torture_random_state *trsp)
559 {
560 /* We want a long delay occasionally to force massive contention. */
561 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
562 mdelay(long_hold * 5);
563 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
564 torture_preempt_schedule(); /* Allow test to be preempted. */
565 }
566
torture_mutex_unlock(int tid __maybe_unused)567 static void torture_mutex_unlock(int tid __maybe_unused)
568 __releases(torture_mutex)
569 {
570 mutex_unlock(&torture_mutex);
571 }
572
torture_mutex_nested_unlock(int tid __maybe_unused,u32 lockset)573 static void torture_mutex_nested_unlock(int tid __maybe_unused,
574 u32 lockset)
575 {
576 int i;
577
578 for (i = nested_locks - 1; i >= 0; i--)
579 if (lockset & (1 << i))
580 mutex_unlock(&torture_nested_mutexes[i]);
581 }
582
583 static struct lock_torture_ops mutex_lock_ops = {
584 .init = torture_mutex_init,
585 .nested_lock = torture_mutex_nested_lock,
586 .writelock = torture_mutex_lock,
587 .write_delay = torture_mutex_delay,
588 .task_boost = torture_rt_boost,
589 .writeunlock = torture_mutex_unlock,
590 .nested_unlock = torture_mutex_nested_unlock,
591 .readlock = NULL,
592 .read_delay = NULL,
593 .readunlock = NULL,
594 .name = "mutex_lock"
595 };
596
597 #include <linux/ww_mutex.h>
598 /*
599 * The torture ww_mutexes should belong to the same lock class as
600 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
601 * function is called for initialization to ensure that.
602 */
603 static DEFINE_WD_CLASS(torture_ww_class);
604 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
605 static struct ww_acquire_ctx *ww_acquire_ctxs;
606
torture_ww_mutex_init(void)607 static void torture_ww_mutex_init(void)
608 {
609 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
610 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
611 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
612
613 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
614 sizeof(*ww_acquire_ctxs),
615 GFP_KERNEL);
616 if (!ww_acquire_ctxs)
617 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
618 }
619
torture_ww_mutex_exit(void)620 static void torture_ww_mutex_exit(void)
621 {
622 kfree(ww_acquire_ctxs);
623 }
624
torture_ww_mutex_lock(int tid)625 static int torture_ww_mutex_lock(int tid)
626 __acquires(torture_ww_mutex_0)
627 __acquires(torture_ww_mutex_1)
628 __acquires(torture_ww_mutex_2)
629 {
630 LIST_HEAD(list);
631 struct reorder_lock {
632 struct list_head link;
633 struct ww_mutex *lock;
634 } locks[3], *ll, *ln;
635 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
636
637 locks[0].lock = &torture_ww_mutex_0;
638 list_add(&locks[0].link, &list);
639
640 locks[1].lock = &torture_ww_mutex_1;
641 list_add(&locks[1].link, &list);
642
643 locks[2].lock = &torture_ww_mutex_2;
644 list_add(&locks[2].link, &list);
645
646 ww_acquire_init(ctx, &torture_ww_class);
647
648 list_for_each_entry(ll, &list, link) {
649 int err;
650
651 err = ww_mutex_lock(ll->lock, ctx);
652 if (!err)
653 continue;
654
655 ln = ll;
656 list_for_each_entry_continue_reverse(ln, &list, link)
657 ww_mutex_unlock(ln->lock);
658
659 if (err != -EDEADLK)
660 return err;
661
662 ww_mutex_lock_slow(ll->lock, ctx);
663 list_move(&ll->link, &list);
664 }
665
666 return 0;
667 }
668
torture_ww_mutex_unlock(int tid)669 static void torture_ww_mutex_unlock(int tid)
670 __releases(torture_ww_mutex_0)
671 __releases(torture_ww_mutex_1)
672 __releases(torture_ww_mutex_2)
673 {
674 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
675
676 ww_mutex_unlock(&torture_ww_mutex_0);
677 ww_mutex_unlock(&torture_ww_mutex_1);
678 ww_mutex_unlock(&torture_ww_mutex_2);
679 ww_acquire_fini(ctx);
680 }
681
682 static struct lock_torture_ops ww_mutex_lock_ops = {
683 .init = torture_ww_mutex_init,
684 .exit = torture_ww_mutex_exit,
685 .writelock = torture_ww_mutex_lock,
686 .write_delay = torture_mutex_delay,
687 .task_boost = torture_rt_boost,
688 .writeunlock = torture_ww_mutex_unlock,
689 .readlock = NULL,
690 .read_delay = NULL,
691 .readunlock = NULL,
692 .name = "ww_mutex_lock"
693 };
694
695 #ifdef CONFIG_RT_MUTEXES
696 static DEFINE_RT_MUTEX(torture_rtmutex);
697 static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
698 static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
699
torture_rtmutex_init(void)700 static void torture_rtmutex_init(void)
701 {
702 int i;
703
704 for (i = 0; i < MAX_NESTED_LOCKS; i++)
705 __rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
706 &nested_rtmutex_keys[i]);
707 }
708
torture_rtmutex_nested_lock(int tid __maybe_unused,u32 lockset)709 static int torture_rtmutex_nested_lock(int tid __maybe_unused,
710 u32 lockset)
711 {
712 int i;
713
714 for (i = 0; i < nested_locks; i++)
715 if (lockset & (1 << i))
716 rt_mutex_lock(&torture_nested_rtmutexes[i]);
717 return 0;
718 }
719
torture_rtmutex_lock(int tid __maybe_unused)720 static int torture_rtmutex_lock(int tid __maybe_unused)
721 __acquires(torture_rtmutex)
722 {
723 rt_mutex_lock(&torture_rtmutex);
724 return 0;
725 }
726
torture_rtmutex_delay(struct torture_random_state * trsp)727 static void torture_rtmutex_delay(struct torture_random_state *trsp)
728 {
729 const unsigned long shortdelay_us = 2;
730
731 /*
732 * We want a short delay mostly to emulate likely code, and
733 * we want a long delay occasionally to force massive contention.
734 */
735 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
736 mdelay(long_hold);
737 if (!(torture_random(trsp) %
738 (cxt.nrealwriters_stress * 200 * shortdelay_us)))
739 udelay(shortdelay_us);
740 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
741 torture_preempt_schedule(); /* Allow test to be preempted. */
742 }
743
torture_rtmutex_unlock(int tid __maybe_unused)744 static void torture_rtmutex_unlock(int tid __maybe_unused)
745 __releases(torture_rtmutex)
746 {
747 rt_mutex_unlock(&torture_rtmutex);
748 }
749
torture_rt_boost_rtmutex(struct torture_random_state * trsp)750 static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
751 {
752 if (!rt_boost)
753 return;
754
755 __torture_rt_boost(trsp);
756 }
757
torture_rtmutex_nested_unlock(int tid __maybe_unused,u32 lockset)758 static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
759 u32 lockset)
760 {
761 int i;
762
763 for (i = nested_locks - 1; i >= 0; i--)
764 if (lockset & (1 << i))
765 rt_mutex_unlock(&torture_nested_rtmutexes[i]);
766 }
767
768 static struct lock_torture_ops rtmutex_lock_ops = {
769 .init = torture_rtmutex_init,
770 .nested_lock = torture_rtmutex_nested_lock,
771 .writelock = torture_rtmutex_lock,
772 .write_delay = torture_rtmutex_delay,
773 .task_boost = torture_rt_boost_rtmutex,
774 .writeunlock = torture_rtmutex_unlock,
775 .nested_unlock = torture_rtmutex_nested_unlock,
776 .readlock = NULL,
777 .read_delay = NULL,
778 .readunlock = NULL,
779 .name = "rtmutex_lock"
780 };
781 #endif
782
783 static DECLARE_RWSEM(torture_rwsem);
torture_rwsem_down_write(int tid __maybe_unused)784 static int torture_rwsem_down_write(int tid __maybe_unused)
785 __acquires(torture_rwsem)
786 {
787 down_write(&torture_rwsem);
788 return 0;
789 }
790
torture_rwsem_write_delay(struct torture_random_state * trsp)791 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
792 {
793 /* We want a long delay occasionally to force massive contention. */
794 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
795 mdelay(long_hold * 10);
796 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
797 torture_preempt_schedule(); /* Allow test to be preempted. */
798 }
799
torture_rwsem_up_write(int tid __maybe_unused)800 static void torture_rwsem_up_write(int tid __maybe_unused)
801 __releases(torture_rwsem)
802 {
803 up_write(&torture_rwsem);
804 }
805
torture_rwsem_down_read(int tid __maybe_unused)806 static int torture_rwsem_down_read(int tid __maybe_unused)
807 __acquires(torture_rwsem)
808 {
809 down_read(&torture_rwsem);
810 return 0;
811 }
812
torture_rwsem_read_delay(struct torture_random_state * trsp)813 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
814 {
815 /* We want a long delay occasionally to force massive contention. */
816 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
817 mdelay(long_hold * 2);
818 else
819 mdelay(long_hold / 2);
820 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
821 torture_preempt_schedule(); /* Allow test to be preempted. */
822 }
823
torture_rwsem_up_read(int tid __maybe_unused)824 static void torture_rwsem_up_read(int tid __maybe_unused)
825 __releases(torture_rwsem)
826 {
827 up_read(&torture_rwsem);
828 }
829
830 static struct lock_torture_ops rwsem_lock_ops = {
831 .writelock = torture_rwsem_down_write,
832 .write_delay = torture_rwsem_write_delay,
833 .task_boost = torture_rt_boost,
834 .writeunlock = torture_rwsem_up_write,
835 .readlock = torture_rwsem_down_read,
836 .read_delay = torture_rwsem_read_delay,
837 .readunlock = torture_rwsem_up_read,
838 .name = "rwsem_lock"
839 };
840
841 #include <linux/percpu-rwsem.h>
842 static struct percpu_rw_semaphore pcpu_rwsem;
843
torture_percpu_rwsem_init(void)844 static void torture_percpu_rwsem_init(void)
845 {
846 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
847 }
848
torture_percpu_rwsem_exit(void)849 static void torture_percpu_rwsem_exit(void)
850 {
851 percpu_free_rwsem(&pcpu_rwsem);
852 }
853
torture_percpu_rwsem_down_write(int tid __maybe_unused)854 static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
855 __acquires(pcpu_rwsem)
856 {
857 percpu_down_write(&pcpu_rwsem);
858 return 0;
859 }
860
torture_percpu_rwsem_up_write(int tid __maybe_unused)861 static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
862 __releases(pcpu_rwsem)
863 {
864 percpu_up_write(&pcpu_rwsem);
865 }
866
torture_percpu_rwsem_down_read(int tid __maybe_unused)867 static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
868 __acquires(pcpu_rwsem)
869 {
870 percpu_down_read(&pcpu_rwsem);
871 return 0;
872 }
873
torture_percpu_rwsem_up_read(int tid __maybe_unused)874 static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
875 __releases(pcpu_rwsem)
876 {
877 percpu_up_read(&pcpu_rwsem);
878 }
879
880 static struct lock_torture_ops percpu_rwsem_lock_ops = {
881 .init = torture_percpu_rwsem_init,
882 .exit = torture_percpu_rwsem_exit,
883 .writelock = torture_percpu_rwsem_down_write,
884 .write_delay = torture_rwsem_write_delay,
885 .task_boost = torture_rt_boost,
886 .writeunlock = torture_percpu_rwsem_up_write,
887 .readlock = torture_percpu_rwsem_down_read,
888 .read_delay = torture_rwsem_read_delay,
889 .readunlock = torture_percpu_rwsem_up_read,
890 .name = "percpu_rwsem_lock"
891 };
892
893 /*
894 * Lock torture writer kthread. Repeatedly acquires and releases
895 * the lock, checking for duplicate acquisitions.
896 */
lock_torture_writer(void * arg)897 static int lock_torture_writer(void *arg)
898 {
899 unsigned long j;
900 unsigned long j1;
901 u32 lockset_mask;
902 struct lock_stress_stats *lwsp = arg;
903 DEFINE_TORTURE_RANDOM(rand);
904 bool skip_main_lock;
905 int tid = lwsp - cxt.lwsa;
906
907 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
908 if (!rt_task(current))
909 set_user_nice(current, MAX_NICE);
910
911 do {
912 if ((torture_random(&rand) & 0xfffff) == 0)
913 schedule_timeout_uninterruptible(1);
914
915 lockset_mask = torture_random(&rand);
916 /*
917 * When using nested_locks, we want to occasionally
918 * skip the main lock so we can avoid always serializing
919 * the lock chains on that central lock. By skipping the
920 * main lock occasionally, we can create different
921 * contention patterns (allowing for multiple disjoint
922 * blocked trees)
923 */
924 skip_main_lock = (nested_locks &&
925 !(torture_random(&rand) % 100));
926
927 cxt.cur_ops->task_boost(&rand);
928 if (cxt.cur_ops->nested_lock)
929 cxt.cur_ops->nested_lock(tid, lockset_mask);
930
931 if (!skip_main_lock) {
932 if (acq_writer_lim > 0)
933 j = jiffies;
934 cxt.cur_ops->writelock(tid);
935 if (WARN_ON_ONCE(lock_is_write_held))
936 lwsp->n_lock_fail++;
937 lock_is_write_held = true;
938 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
939 lwsp->n_lock_fail++; /* rare, but... */
940 if (acq_writer_lim > 0) {
941 j1 = jiffies;
942 WARN_ONCE(time_after(j1, j + acq_writer_lim),
943 "%s: Lock acquisition took %lu jiffies.\n",
944 __func__, j1 - j);
945 }
946 lwsp->n_lock_acquired++;
947
948 cxt.cur_ops->write_delay(&rand);
949
950 lock_is_write_held = false;
951 WRITE_ONCE(last_lock_release, jiffies);
952 cxt.cur_ops->writeunlock(tid);
953 }
954 if (cxt.cur_ops->nested_unlock)
955 cxt.cur_ops->nested_unlock(tid, lockset_mask);
956
957 stutter_wait("lock_torture_writer");
958 } while (!torture_must_stop());
959
960 cxt.cur_ops->task_boost(NULL); /* reset prio */
961 torture_kthread_stopping("lock_torture_writer");
962 return 0;
963 }
964
965 /*
966 * Lock torture reader kthread. Repeatedly acquires and releases
967 * the reader lock.
968 */
lock_torture_reader(void * arg)969 static int lock_torture_reader(void *arg)
970 {
971 struct lock_stress_stats *lrsp = arg;
972 int tid = lrsp - cxt.lrsa;
973 DEFINE_TORTURE_RANDOM(rand);
974
975 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
976 set_user_nice(current, MAX_NICE);
977
978 do {
979 if ((torture_random(&rand) & 0xfffff) == 0)
980 schedule_timeout_uninterruptible(1);
981
982 cxt.cur_ops->readlock(tid);
983 atomic_inc(&lock_is_read_held);
984 if (WARN_ON_ONCE(lock_is_write_held))
985 lrsp->n_lock_fail++; /* rare, but... */
986
987 lrsp->n_lock_acquired++;
988 cxt.cur_ops->read_delay(&rand);
989 atomic_dec(&lock_is_read_held);
990 cxt.cur_ops->readunlock(tid);
991
992 stutter_wait("lock_torture_reader");
993 } while (!torture_must_stop());
994 torture_kthread_stopping("lock_torture_reader");
995 return 0;
996 }
997
998 /*
999 * Create an lock-torture-statistics message in the specified buffer.
1000 */
__torture_print_stats(char * page,struct lock_stress_stats * statp,bool write)1001 static void __torture_print_stats(char *page,
1002 struct lock_stress_stats *statp, bool write)
1003 {
1004 long cur;
1005 bool fail = false;
1006 int i, n_stress;
1007 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
1008 long long sum = 0;
1009
1010 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
1011 for (i = 0; i < n_stress; i++) {
1012 if (data_race(statp[i].n_lock_fail))
1013 fail = true;
1014 cur = data_race(statp[i].n_lock_acquired);
1015 sum += cur;
1016 if (max < cur)
1017 max = cur;
1018 if (min > cur)
1019 min = cur;
1020 }
1021 page += sprintf(page,
1022 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
1023 write ? "Writes" : "Reads ",
1024 sum, max, min,
1025 !onoff_interval && max / 2 > min ? "???" : "",
1026 fail, fail ? "!!!" : "");
1027 if (fail)
1028 atomic_inc(&cxt.n_lock_torture_errors);
1029 }
1030
1031 /*
1032 * Print torture statistics. Caller must ensure that there is only one
1033 * call to this function at a given time!!! This is normally accomplished
1034 * by relying on the module system to only have one copy of the module
1035 * loaded, and then by giving the lock_torture_stats kthread full control
1036 * (or the init/cleanup functions when lock_torture_stats thread is not
1037 * running).
1038 */
lock_torture_stats_print(void)1039 static void lock_torture_stats_print(void)
1040 {
1041 int size = cxt.nrealwriters_stress * 200 + 8192;
1042 char *buf;
1043
1044 if (cxt.cur_ops->readlock)
1045 size += cxt.nrealreaders_stress * 200 + 8192;
1046
1047 buf = kmalloc(size, GFP_KERNEL);
1048 if (!buf) {
1049 pr_err("lock_torture_stats_print: Out of memory, need: %d",
1050 size);
1051 return;
1052 }
1053
1054 __torture_print_stats(buf, cxt.lwsa, true);
1055 pr_alert("%s", buf);
1056 kfree(buf);
1057
1058 if (cxt.cur_ops->readlock) {
1059 buf = kmalloc(size, GFP_KERNEL);
1060 if (!buf) {
1061 pr_err("lock_torture_stats_print: Out of memory, need: %d",
1062 size);
1063 return;
1064 }
1065
1066 __torture_print_stats(buf, cxt.lrsa, false);
1067 pr_alert("%s", buf);
1068 kfree(buf);
1069 }
1070 }
1071
1072 /*
1073 * Periodically prints torture statistics, if periodic statistics printing
1074 * was specified via the stat_interval module parameter.
1075 *
1076 * No need to worry about fullstop here, since this one doesn't reference
1077 * volatile state or register callbacks.
1078 */
lock_torture_stats(void * arg)1079 static int lock_torture_stats(void *arg)
1080 {
1081 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
1082 do {
1083 schedule_timeout_interruptible(stat_interval * HZ);
1084 lock_torture_stats_print();
1085 torture_shutdown_absorb("lock_torture_stats");
1086 } while (!torture_must_stop());
1087 torture_kthread_stopping("lock_torture_stats");
1088 return 0;
1089 }
1090
1091
1092 static inline void
lock_torture_print_module_parms(struct lock_torture_ops * cur_ops,const char * tag)1093 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
1094 const char *tag)
1095 {
1096 static cpumask_t cpumask_all;
1097 cpumask_t *rcmp = cpumask_nonempty(bind_readers) ? bind_readers : &cpumask_all;
1098 cpumask_t *wcmp = cpumask_nonempty(bind_writers) ? bind_writers : &cpumask_all;
1099
1100 cpumask_setall(&cpumask_all);
1101 pr_alert("%s" TORTURE_FLAG
1102 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
1103 torture_type, tag, cxt.debug_lock ? " [debug]": "",
1104 acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp),
1105 call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress,
1106 cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost,
1107 rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter,
1108 verbose, writer_fifo);
1109 }
1110
1111 // If requested, maintain call_rcu() chains to keep a grace period always
1112 // in flight. These increase the probability of getting an RCU CPU stall
1113 // warning and associated diagnostics when a locking primitive stalls.
1114
call_rcu_chain_cb(struct rcu_head * rhp)1115 static void call_rcu_chain_cb(struct rcu_head *rhp)
1116 {
1117 struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh);
1118
1119 if (!smp_load_acquire(&crcp->crc_stop)) {
1120 (void)start_poll_synchronize_rcu(); // Start one grace period...
1121 call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another.
1122 }
1123 }
1124
1125 // Start the requested number of call_rcu() chains.
call_rcu_chain_init(void)1126 static int call_rcu_chain_init(void)
1127 {
1128 int i;
1129
1130 if (call_rcu_chains <= 0)
1131 return 0;
1132 call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
1133 if (!call_rcu_chain_list)
1134 return -ENOMEM;
1135 for (i = 0; i < call_rcu_chains; i++) {
1136 call_rcu_chain_list[i].crc_stop = false;
1137 call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
1138 }
1139 return 0;
1140 }
1141
1142 // Stop all of the call_rcu() chains.
call_rcu_chain_cleanup(void)1143 static void call_rcu_chain_cleanup(void)
1144 {
1145 int i;
1146
1147 if (!call_rcu_chain_list)
1148 return;
1149 for (i = 0; i < call_rcu_chains; i++)
1150 smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
1151 rcu_barrier();
1152 kfree(call_rcu_chain_list);
1153 call_rcu_chain_list = NULL;
1154 }
1155
lock_torture_cleanup(void)1156 static void lock_torture_cleanup(void)
1157 {
1158 int i;
1159
1160 if (torture_cleanup_begin())
1161 return;
1162
1163 /*
1164 * Indicates early cleanup, meaning that the test has not run,
1165 * such as when passing bogus args when loading the module.
1166 * However cxt->cur_ops.init() may have been invoked, so beside
1167 * perform the underlying torture-specific cleanups, cur_ops.exit()
1168 * will be invoked if needed.
1169 */
1170 if (!cxt.lwsa && !cxt.lrsa)
1171 goto end;
1172
1173 if (writer_tasks) {
1174 for (i = 0; i < cxt.nrealwriters_stress; i++)
1175 torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
1176 kfree(writer_tasks);
1177 writer_tasks = NULL;
1178 }
1179
1180 if (reader_tasks) {
1181 for (i = 0; i < cxt.nrealreaders_stress; i++)
1182 torture_stop_kthread(lock_torture_reader,
1183 reader_tasks[i]);
1184 kfree(reader_tasks);
1185 reader_tasks = NULL;
1186 }
1187
1188 torture_stop_kthread(lock_torture_stats, stats_task);
1189 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
1190
1191 if (atomic_read(&cxt.n_lock_torture_errors))
1192 lock_torture_print_module_parms(cxt.cur_ops,
1193 "End of test: FAILURE");
1194 else if (torture_onoff_failures())
1195 lock_torture_print_module_parms(cxt.cur_ops,
1196 "End of test: LOCK_HOTPLUG");
1197 else
1198 lock_torture_print_module_parms(cxt.cur_ops,
1199 "End of test: SUCCESS");
1200
1201 kfree(cxt.lwsa);
1202 cxt.lwsa = NULL;
1203 kfree(cxt.lrsa);
1204 cxt.lrsa = NULL;
1205
1206 call_rcu_chain_cleanup();
1207
1208 end:
1209 if (cxt.init_called) {
1210 if (cxt.cur_ops->exit)
1211 cxt.cur_ops->exit();
1212 cxt.init_called = false;
1213 }
1214 torture_cleanup_end();
1215 }
1216
lock_torture_init(void)1217 static int __init lock_torture_init(void)
1218 {
1219 int i, j;
1220 int firsterr = 0;
1221 static struct lock_torture_ops *torture_ops[] = {
1222 &lock_busted_ops,
1223 &spin_lock_ops, &spin_lock_irq_ops,
1224 &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1225 #ifdef CONFIG_BPF_SYSCALL
1226 &raw_res_spin_lock_ops, &raw_res_spin_lock_irq_ops,
1227 #endif
1228 &rw_lock_ops, &rw_lock_irq_ops,
1229 &mutex_lock_ops,
1230 &ww_mutex_lock_ops,
1231 #ifdef CONFIG_RT_MUTEXES
1232 &rtmutex_lock_ops,
1233 #endif
1234 &rwsem_lock_ops,
1235 &percpu_rwsem_lock_ops,
1236 };
1237
1238 if (!torture_init_begin(torture_type, verbose))
1239 return -EBUSY;
1240
1241 /* Process args and tell the world that the torturer is on the job. */
1242 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1243 cxt.cur_ops = torture_ops[i];
1244 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1245 break;
1246 }
1247 if (i == ARRAY_SIZE(torture_ops)) {
1248 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1249 torture_type);
1250 pr_alert("lock-torture types:");
1251 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1252 pr_alert(" %s", torture_ops[i]->name);
1253 pr_alert("\n");
1254 firsterr = -EINVAL;
1255 goto unwind;
1256 }
1257
1258 if (nwriters_stress == 0 &&
1259 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1260 pr_alert("lock-torture: must run at least one locking thread\n");
1261 firsterr = -EINVAL;
1262 goto unwind;
1263 }
1264
1265 if (nwriters_stress >= 0)
1266 cxt.nrealwriters_stress = nwriters_stress;
1267 else
1268 cxt.nrealwriters_stress = 2 * num_online_cpus();
1269
1270 if (cxt.cur_ops->init) {
1271 cxt.cur_ops->init();
1272 cxt.init_called = true;
1273 }
1274
1275 #ifdef CONFIG_DEBUG_MUTEXES
1276 if (str_has_prefix(torture_type, "mutex"))
1277 cxt.debug_lock = true;
1278 #endif
1279 #ifdef CONFIG_DEBUG_RT_MUTEXES
1280 if (str_has_prefix(torture_type, "rtmutex"))
1281 cxt.debug_lock = true;
1282 #endif
1283 #ifdef CONFIG_DEBUG_SPINLOCK
1284 if ((str_has_prefix(torture_type, "spin")) ||
1285 (str_has_prefix(torture_type, "rw_lock")))
1286 cxt.debug_lock = true;
1287 #endif
1288
1289 /* Initialize the statistics so that each run gets its own numbers. */
1290 if (nwriters_stress) {
1291 lock_is_write_held = false;
1292 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1293 sizeof(*cxt.lwsa),
1294 GFP_KERNEL);
1295 if (cxt.lwsa == NULL) {
1296 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1297 firsterr = -ENOMEM;
1298 goto unwind;
1299 }
1300
1301 for (i = 0; i < cxt.nrealwriters_stress; i++) {
1302 cxt.lwsa[i].n_lock_fail = 0;
1303 cxt.lwsa[i].n_lock_acquired = 0;
1304 }
1305 }
1306
1307 if (cxt.cur_ops->readlock) {
1308 if (nreaders_stress >= 0)
1309 cxt.nrealreaders_stress = nreaders_stress;
1310 else {
1311 /*
1312 * By default distribute evenly the number of
1313 * readers and writers. We still run the same number
1314 * of threads as the writer-only locks default.
1315 */
1316 if (nwriters_stress < 0) /* user doesn't care */
1317 cxt.nrealwriters_stress = num_online_cpus();
1318 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1319 }
1320
1321 if (nreaders_stress) {
1322 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1323 sizeof(*cxt.lrsa),
1324 GFP_KERNEL);
1325 if (cxt.lrsa == NULL) {
1326 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1327 firsterr = -ENOMEM;
1328 kfree(cxt.lwsa);
1329 cxt.lwsa = NULL;
1330 goto unwind;
1331 }
1332
1333 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1334 cxt.lrsa[i].n_lock_fail = 0;
1335 cxt.lrsa[i].n_lock_acquired = 0;
1336 }
1337 }
1338 }
1339
1340 firsterr = call_rcu_chain_init();
1341 if (torture_init_error(firsterr))
1342 goto unwind;
1343
1344 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1345
1346 /* Prepare torture context. */
1347 if (onoff_interval > 0) {
1348 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1349 onoff_interval * HZ, NULL);
1350 if (torture_init_error(firsterr))
1351 goto unwind;
1352 }
1353 if (shuffle_interval > 0) {
1354 firsterr = torture_shuffle_init(shuffle_interval);
1355 if (torture_init_error(firsterr))
1356 goto unwind;
1357 }
1358 if (shutdown_secs > 0) {
1359 firsterr = torture_shutdown_init(shutdown_secs,
1360 lock_torture_cleanup);
1361 if (torture_init_error(firsterr))
1362 goto unwind;
1363 }
1364 if (stutter > 0) {
1365 firsterr = torture_stutter_init(stutter, stutter);
1366 if (torture_init_error(firsterr))
1367 goto unwind;
1368 }
1369
1370 if (nwriters_stress) {
1371 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1372 sizeof(writer_tasks[0]),
1373 GFP_KERNEL);
1374 if (writer_tasks == NULL) {
1375 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1376 firsterr = -ENOMEM;
1377 goto unwind;
1378 }
1379 }
1380
1381 /* cap nested_locks to MAX_NESTED_LOCKS */
1382 if (nested_locks > MAX_NESTED_LOCKS)
1383 nested_locks = MAX_NESTED_LOCKS;
1384
1385 if (cxt.cur_ops->readlock) {
1386 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1387 sizeof(reader_tasks[0]),
1388 GFP_KERNEL);
1389 if (reader_tasks == NULL) {
1390 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1391 kfree(writer_tasks);
1392 writer_tasks = NULL;
1393 firsterr = -ENOMEM;
1394 goto unwind;
1395 }
1396 }
1397
1398 /*
1399 * Create the kthreads and start torturing (oh, those poor little locks).
1400 *
1401 * TODO: Note that we interleave writers with readers, giving writers a
1402 * slight advantage, by creating its kthread first. This can be modified
1403 * for very specific needs, or even let the user choose the policy, if
1404 * ever wanted.
1405 */
1406 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1407 j < cxt.nrealreaders_stress; i++, j++) {
1408 if (i >= cxt.nrealwriters_stress)
1409 goto create_reader;
1410
1411 /* Create writer. */
1412 firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1413 writer_tasks[i],
1414 writer_fifo ? sched_set_fifo : NULL);
1415 if (torture_init_error(firsterr))
1416 goto unwind;
1417 if (cpumask_nonempty(bind_writers))
1418 torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers, true);
1419
1420 create_reader:
1421 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1422 continue;
1423 /* Create reader. */
1424 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1425 reader_tasks[j]);
1426 if (torture_init_error(firsterr))
1427 goto unwind;
1428 if (cpumask_nonempty(bind_readers))
1429 torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers, true);
1430 }
1431 if (stat_interval > 0) {
1432 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1433 stats_task);
1434 if (torture_init_error(firsterr))
1435 goto unwind;
1436 }
1437 torture_init_end();
1438 return 0;
1439
1440 unwind:
1441 torture_init_end();
1442 lock_torture_cleanup();
1443 if (shutdown_secs) {
1444 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1445 kernel_power_off();
1446 }
1447 return firsterr;
1448 }
1449
1450 module_init(lock_torture_init);
1451 module_exit(lock_torture_cleanup);
1452