Lines Matching +full:init +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0+
3 * Module-based torture test facility for locking
28 #include <linux/delay.h>
37 torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
40 torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
41 torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
45 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
46 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
68 cpumask_var_t *cm_bind = kp->arg; in param_set_cpumask()
74 ret = -ENOMEM; in param_set_cpumask()
82 pr_warn("%s: %s, all CPUs set\n", kp->name, s); in param_set_cpumask()
90 cpumask_var_t *cm_bind = kp->arg; in param_get_cpumask()
136 void (*init)(void); member
176 /* We want a long delay occasionally to force massive contention. */ in torture_lock_busted_write_delay()
196 * for the new priority, and do any corresponding pi-dance. in __torture_rt_boost()
209 * When @trsp is nil, we want to force-reset the task for in __torture_rt_boost()
253 /* We want a short delay mostly to emulate likely code, and in torture_spin_lock_write_delay()
254 * we want a long delay occasionally to force massive contention. in torture_spin_lock_write_delay()
259 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j); in torture_spin_lock_write_delay()
290 cxt.cur_ops->flags = flags; in torture_spin_lock_write_lock_irq()
297 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); in torture_lock_spin_write_unlock_irq()
343 cxt.cur_ops->flags = flags; in torture_raw_spin_lock_write_lock_irq()
350 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags); in torture_raw_spin_lock_write_unlock_irq()
377 /* We want a short delay mostly to emulate likely code, and in torture_rwlock_write_delay()
378 * we want a long delay occasionally to force massive contention. in torture_rwlock_write_delay()
403 /* We want a short delay mostly to emulate likely code, and in torture_rwlock_read_delay()
404 * we want a long delay occasionally to force massive contention. in torture_rwlock_read_delay()
435 cxt.cur_ops->flags = flags; in torture_rwlock_write_lock_irq()
442 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); in torture_rwlock_write_unlock_irq()
451 cxt.cur_ops->flags = flags; in torture_rwlock_read_lock_irq()
458 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); in torture_rwlock_read_unlock_irq()
505 /* We want a long delay occasionally to force massive contention. */ in torture_mutex_delay()
523 for (i = nested_locks - 1; i >= 0; i--) in torture_mutex_nested_unlock()
529 .init = torture_mutex_init,
596 err = ww_mutex_lock(ll->lock, ctx); in torture_ww_mutex_lock()
602 ww_mutex_unlock(ln->lock); in torture_ww_mutex_lock()
604 if (err != -EDEADLK) in torture_ww_mutex_lock()
607 ww_mutex_lock_slow(ll->lock, ctx); in torture_ww_mutex_lock()
608 list_move(&ll->link, &list); in torture_ww_mutex_lock()
628 .init = torture_ww_mutex_init,
677 * We want a short delay mostly to emulate likely code, and in torture_rtmutex_delay()
678 * we want a long delay occasionally to force massive contention. in torture_rtmutex_delay()
708 for (i = nested_locks - 1; i >= 0; i--) in torture_rtmutex_nested_unlock()
714 .init = torture_rtmutex_init,
738 /* We want a long delay occasionally to force massive contention. */ in torture_rwsem_write_delay()
760 /* We want a long delay occasionally to force massive contention. */ in torture_rwsem_read_delay()
786 #include <linux/percpu-rwsem.h>
826 .init = torture_percpu_rwsem_init,
850 int tid = lwsp - cxt.lwsa; in lock_torture_writer()
872 cxt.cur_ops->task_boost(&rand); in lock_torture_writer()
873 if (cxt.cur_ops->nested_lock) in lock_torture_writer()
874 cxt.cur_ops->nested_lock(tid, lockset_mask); in lock_torture_writer()
879 cxt.cur_ops->writelock(tid); in lock_torture_writer()
881 lwsp->n_lock_fail++; in lock_torture_writer()
884 lwsp->n_lock_fail++; /* rare, but... */ in lock_torture_writer()
889 __func__, j1 - j); in lock_torture_writer()
891 lwsp->n_lock_acquired++; in lock_torture_writer()
893 cxt.cur_ops->write_delay(&rand); in lock_torture_writer()
897 cxt.cur_ops->writeunlock(tid); in lock_torture_writer()
899 if (cxt.cur_ops->nested_unlock) in lock_torture_writer()
900 cxt.cur_ops->nested_unlock(tid, lockset_mask); in lock_torture_writer()
905 cxt.cur_ops->task_boost(NULL); /* reset prio */ in lock_torture_writer()
917 int tid = lrsp - cxt.lrsa; in lock_torture_reader()
927 cxt.cur_ops->readlock(tid); in lock_torture_reader()
930 lrsp->n_lock_fail++; /* rare, but... */ in lock_torture_reader()
932 lrsp->n_lock_acquired++; in lock_torture_reader()
933 cxt.cur_ops->read_delay(&rand); in lock_torture_reader()
935 cxt.cur_ops->readunlock(tid); in lock_torture_reader()
944 * Create an lock-torture-statistics message in the specified buffer.
981 * (or the init/cleanup functions when lock_torture_stats thread is not
989 if (cxt.cur_ops->readlock) in lock_torture_stats_print()
1003 if (cxt.cur_ops->readlock) { in lock_torture_stats_print()
1047 …"--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d… in lock_torture_print_module_parms()
1064 if (!smp_load_acquire(&crcp->crc_stop)) { in call_rcu_chain_cb()
1066 call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another. in call_rcu_chain_cb()
1079 return -ENOMEM; in call_rcu_chain_init()
1111 * However cxt->cur_ops.init() may have been invoked, so beside in lock_torture_cleanup()
1112 * perform the underlying torture-specific cleanups, cur_ops.exit() in lock_torture_cleanup()
1134 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ in lock_torture_cleanup()
1155 if (cxt.cur_ops->exit) in lock_torture_cleanup()
1156 cxt.cur_ops->exit(); in lock_torture_cleanup()
1181 return -EBUSY; in lock_torture_init()
1186 if (strcmp(torture_type, cxt.cur_ops->name) == 0) in lock_torture_init()
1190 pr_alert("lock-torture: invalid torture type: \"%s\"\n", in lock_torture_init()
1192 pr_alert("lock-torture types:"); in lock_torture_init()
1194 pr_alert(" %s", torture_ops[i]->name); in lock_torture_init()
1196 firsterr = -EINVAL; in lock_torture_init()
1201 (!cxt.cur_ops->readlock || nreaders_stress == 0)) { in lock_torture_init()
1202 pr_alert("lock-torture: must run at least one locking thread\n"); in lock_torture_init()
1203 firsterr = -EINVAL; in lock_torture_init()
1212 if (cxt.cur_ops->init) { in lock_torture_init()
1213 cxt.cur_ops->init(); in lock_torture_init()
1239 firsterr = -ENOMEM; in lock_torture_init()
1249 if (cxt.cur_ops->readlock) { in lock_torture_init()
1256 * of threads as the writer-only locks default. in lock_torture_init()
1269 firsterr = -ENOMEM; in lock_torture_init()
1318 firsterr = -ENOMEM; in lock_torture_init()
1327 if (cxt.cur_ops->readlock) { in lock_torture_init()
1335 firsterr = -ENOMEM; in lock_torture_init()
1360 torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers); in lock_torture_init()
1363 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) in lock_torture_init()
1371 torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers); in lock_torture_init()