Lines Matching +full:wait +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0
16 #include <linux/delay.h>
37 struct completion wait; member
53 int delay; /* ms */ member
61 /* per-cpu worker */
73 int delay; member
90 atomic_long_add(size, &test->data.kmalloc.alloc); in ot_kzalloc()
98 atomic_long_add(size, &test->data.kmalloc.free); in ot_kfree()
106 pr_info("memory allocation summary for %s\n", test->name); in ot_mem_report()
108 alloc = atomic_long_read(&test->data.kmalloc.alloc); in ot_mem_report()
109 free = atomic_long_read(&test->data.kmalloc.free); in ot_mem_report()
110 pr_info(" kmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free); in ot_mem_report()
112 alloc = atomic_long_read(&test->data.vmalloc.alloc); in ot_mem_report()
113 free = atomic_long_read(&test->data.vmalloc.free); in ot_mem_report()
114 pr_info(" vmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free); in ot_mem_report()
139 init_rwsem(&data->start); in ot_init_data()
140 init_completion(&data->wait); in ot_init_data()
141 init_completion(&data->rcu); in ot_init_data()
142 atomic_set(&data->nthreads, 1); in ot_init_data()
152 on->owner = &sop->pool; in ot_init_node()
159 struct ot_test *test = item->test; in ot_hrtimer_handler()
161 if (atomic_read_acquire(&test->data.stop)) in ot_hrtimer_handler()
164 /* do bulk-testings for objects pop/push */ in ot_hrtimer_handler()
165 item->worker(item, 1); in ot_hrtimer_handler()
167 hrtimer_forward(hrt, hrt->base->get_time(), item->hrtcycle); in ot_hrtimer_handler()
173 if (!item->test->hrtimer) in ot_start_hrtimer()
175 hrtimer_start(&item->hrtimer, item->hrtcycle, HRTIMER_MODE_REL); in ot_start_hrtimer()
180 if (!item->test->hrtimer) in ot_stop_hrtimer()
182 hrtimer_cancel(&item->hrtimer); in ot_stop_hrtimer()
187 struct hrtimer *hrt = &item->hrtimer; in ot_init_hrtimer()
190 return -ENOENT; in ot_init_hrtimer()
192 item->hrtcycle = ktime_set(0, hrtimer * 1000000UL); in ot_init_hrtimer()
203 item->pool = pool; in ot_init_cpu_item()
204 item->test = test; in ot_init_cpu_item()
205 item->worker = worker; in ot_init_cpu_item()
207 item->bulk[0] = test->bulk_normal; in ot_init_cpu_item()
208 item->bulk[1] = test->bulk_irq; in ot_init_cpu_item()
209 item->delay = test->delay; in ot_init_cpu_item()
212 ot_init_hrtimer(item, item->test->hrtimer); in ot_init_cpu_item()
219 struct ot_test *test = item->test; in ot_thread_worker()
222 atomic_inc(&test->data.nthreads); in ot_thread_worker()
223 down_read(&test->data.start); in ot_thread_worker()
224 up_read(&test->data.start); in ot_thread_worker()
228 if (atomic_read_acquire(&test->data.stop)) in ot_thread_worker()
230 /* do bulk-testings for objects pop/push */ in ot_thread_worker()
231 item->worker(item, 0); in ot_thread_worker()
234 item->duration = (u64) ktime_us_delta(ktime_get(), start); in ot_thread_worker()
235 if (atomic_dec_and_test(&test->data.nthreads)) in ot_thread_worker()
236 complete(&test->data.wait); in ot_thread_worker()
247 pr_info("Testing summary for %s\n", test->name); in ot_perf_report()
251 if (!item->duration) in ot_perf_report()
253 normal.nhits += item->stat[0].nhits; in ot_perf_report()
254 normal.nmiss += item->stat[0].nmiss; in ot_perf_report()
255 irq.nhits += item->stat[1].nhits; in ot_perf_report()
256 irq.nmiss += item->stat[1].nmiss; in ot_perf_report()
257 pr_info("CPU: %d duration: %lluus\n", cpu, item->duration); in ot_perf_report()
259 item->stat[0].nhits, item->stat[0].nmiss); in ot_perf_report()
261 item->stat[1].nhits, item->stat[1].nmiss); in ot_perf_report()
263 item->stat[0].nhits + item->stat[1].nhits, in ot_perf_report()
264 item->stat[0].nmiss + item->stat[1].nmiss); in ot_perf_report()
275 test->data.objects = total; in ot_perf_report()
276 test->data.duration = duration; in ot_perf_report()
293 sop->test = test; in ot_init_sync_m0()
294 if (test->objsz < 512) in ot_init_sync_m0()
297 if (objpool_init(&sop->pool, max, test->objsz, in ot_init_sync_m0()
302 WARN_ON(max != sop->pool.nr_objs); in ot_init_sync_m0()
309 objpool_fini(&sop->pool); in ot_fini_sync()
310 ot_kfree(sop->test, sop, sizeof(*sop)); in ot_fini_sync()
329 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_sync()
330 nods[i] = objpool_pop(item->pool); in ot_bulk_sync()
332 if (!irq && (item->delay || !(++(item->niters) & 0x7FFF))) in ot_bulk_sync()
333 msleep(item->delay); in ot_bulk_sync()
335 while (i-- > 0) { in ot_bulk_sync()
338 on->refs++; in ot_bulk_sync()
339 objpool_push(on, item->pool); in ot_bulk_sync()
340 item->stat[irq].nhits++; in ot_bulk_sync()
342 item->stat[irq].nmiss++; in ot_bulk_sync()
356 sop = g_ot_sync_ops[test->mode].init(test); in ot_start_sync()
358 return -ENOMEM; in ot_start_sync()
361 down_write(&test->data.start); in ot_start_sync()
367 ot_init_cpu_item(item, test, &sop->pool, ot_bulk_sync); in ot_start_sync()
379 /* wait a while to make sure all threads waiting at start line */ in ot_start_sync()
383 if (atomic_dec_and_test(&test->data.nthreads)) in ot_start_sync()
384 complete(&test->data.wait); in ot_start_sync()
390 up_write(&test->data.start); in ot_start_sync()
393 timeout = msecs_to_jiffies(test->duration); in ot_start_sync()
397 atomic_set_release(&test->data.stop, 1); in ot_start_sync()
399 /* wait all workers threads finish and quit */ in ot_start_sync()
400 wait_for_completion(&test->data.wait); in ot_start_sync()
404 g_ot_sync_ops[test->mode].fini(sop); in ot_start_sync()
422 struct ot_test *test = sop->test; in ot_fini_async_rcu()
424 /* here all cpus are aware of the stop event: test->data.stop = 1 */ in ot_fini_async_rcu()
425 WARN_ON(!atomic_read_acquire(&test->data.stop)); in ot_fini_async_rcu()
427 objpool_fini(&sop->pool); in ot_fini_async_rcu()
428 complete(&test->data.rcu); in ot_fini_async_rcu()
434 call_rcu(&sop->rcu, ot_fini_async_rcu); in ot_fini_async()
441 WARN_ON(!head || !sop || head != &sop->pool); in ot_objpool_release()
445 ot_kfree(sop->test, sop, sizeof(*sop)); in ot_objpool_release()
459 sop->test = test; in ot_init_async_m0()
460 if (test->objsz < 512) in ot_init_async_m0()
463 if (objpool_init(&sop->pool, max, test->objsz, gfp, sop, in ot_init_async_m0()
468 WARN_ON(max != sop->pool.nr_objs); in ot_init_async_m0()
485 on->refs++; in ot_nod_recycle()
494 WARN_ON(sop != pool->context); in ot_nod_recycle()
502 struct ot_test *test = item->test; in ot_bulk_async()
506 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_async()
507 nods[i] = objpool_pop(item->pool); in ot_bulk_async()
510 if (item->delay || !(++(item->niters) & 0x7FFF)) in ot_bulk_async()
511 msleep(item->delay); in ot_bulk_async()
515 stop = atomic_read_acquire(&test->data.stop); in ot_bulk_async()
518 while (i-- > 0) { in ot_bulk_async()
522 on->refs++; in ot_bulk_async()
523 ot_nod_recycle(on, item->pool, stop); in ot_bulk_async()
524 item->stat[irq].nhits++; in ot_bulk_async()
526 item->stat[irq].nmiss++; in ot_bulk_async()
543 sop = g_ot_async_ops[test->mode].init(test); in ot_start_async()
545 return -ENOMEM; in ot_start_async()
548 down_write(&test->data.start); in ot_start_async()
554 ot_init_cpu_item(item, test, &sop->pool, ot_bulk_async); in ot_start_async()
565 /* wait a while to make sure all threads waiting at start line */ in ot_start_async()
569 if (atomic_dec_and_test(&test->data.nthreads)) in ot_start_async()
570 complete(&test->data.wait); in ot_start_async()
574 up_write(&test->data.start); in ot_start_async()
577 timeout = msecs_to_jiffies(test->duration); in ot_start_async()
581 atomic_set_release(&test->data.stop, 1); in ot_start_async()
583 /* do async-finalization */ in ot_start_async()
584 g_ot_async_ops[test->mode].fini(sop); in ot_start_async()
586 /* wait all workers threads finish and quit */ in ot_start_async()
587 wait_for_completion(&test->data.wait); in ot_start_async()
591 wait_for_completion(&test->data.rcu); in ot_start_async()
615 * delay: int, delay (in ms) between each iteration
670 return -EAGAIN; in ot_mod_init()