/linux-6.15/Documentation/core-api/ |
D | workqueue.rst | 34 number of workers as the number of CPUs. The kernel grew a lot of MT 82 For threaded workqueues, special purpose threads, called [k]workers, execute 126 number of the currently runnable workers. Generally, work items are 130 workers on the CPU, the worker-pool doesn't start execution of a new 133 are pending work items. This allows using a minimal number of workers 136 Keeping idle workers around doesn't cost other than the memory space 148 Forward progress guarantee relies on that workers can be created when 150 through the use of rescue workers. All work items which might be used 188 worker-pools which host workers which are not bound to any 197 of mostly unused workers across different CPUs as the issuer [all …]
|
/linux-6.15/Documentation/translations/zh_CN/core-api/ |
D | workqueue.rst | 577 pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0 578 pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0 579 pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1 580 pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1 581 pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2 582 pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2 583 pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3 584 pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3 585 pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f 586 pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003 [all …]
|
/linux-6.15/drivers/gpu/drm/xe/ |
D | xe_gt_sriov_pf_types.h | 39 * struct xe_gt_sriov_pf_workers - GT level workers used by the PF. 48 * @workers: workers data. 57 struct xe_gt_sriov_pf_workers workers; member
|
D | xe_gt_sriov_pf.c | 50 INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func); in pf_init_workers() 189 struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart); in pf_worker_restart_func() 200 if (!queue_work(xe->sriov.wq, >->sriov.pf.workers.restart)) in pf_queue_restart()
|
/linux-6.15/tools/testing/selftests/powerpc/math/ |
D | fpu_preempt.c | 24 /* Time to wait for workers to get preempted (seconds) */ 69 printf("\tWaiting for all workers to start..."); in test_preempt_fpu() 74 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_fpu() 78 printf("\tStopping workers..."); in test_preempt_fpu()
|
D | vmx_preempt.c | 24 /* Time to wait for workers to get preempted (seconds) */ 78 printf("\tWaiting for all workers to start..."); in test_preempt_vmx() 83 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_vmx() 87 printf("\tStopping workers..."); in test_preempt_vmx()
|
D | vsx_preempt.c | 23 /* Time to wait for workers to get preempted (seconds) */ 110 printf("\tWaiting for %d workers to start...", threads_starting); in test_preempt_vsx() 115 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); in test_preempt_vsx() 119 printf("\tStopping workers..."); in test_preempt_vsx()
|
D | fpu_signal.c | 89 printf("\tWaiting for all workers to start..."); in test_signal_fpu() 103 printf("\tStopping workers..."); in test_signal_fpu()
|
D | vmx_signal.c | 114 printf("\tWaiting for %d workers to start... %d", threads, threads_starting); in test_signal_vmx() 131 printf("\tKilling workers..."); in test_signal_vmx()
|
/linux-6.15/fs/erofs/ |
D | Kconfig | 164 bool "EROFS per-cpu decompression kthread workers" 167 Saying Y here enables per-CPU kthread workers pool to carry out 173 bool "EROFS high priority per-CPU kthread workers" 177 This permits EROFS to configure per-CPU kthread workers to run
|
/linux-6.15/tools/testing/selftests/bpf/ |
D | test_progs.c | 534 if (verbose() && !env.workers) in test__end_subtest() 886 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, 887 "Number of workers to run in parallel, default to number of cpus." }, 1086 env->workers = atoi(arg); in parse_arg() 1087 if (!env->workers) { in parse_arg() 1092 env->workers = get_nprocs(); in parse_arg() 1303 for (i = 0; i < env.workers; i++) in sigint_handler() 1681 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); in server_main() 1682 data = calloc(sizeof(struct dispatch_data), env.workers); in server_main() 1684 env.worker_current_test = calloc(sizeof(int), env.workers); in server_main() [all …]
|
/linux-6.15/include/linux/ |
D | padata.h | 71 * struct padata_cpumask - The cpumasks for the parallel/serial workers 73 * @pcpu: cpumask for the parallel workers. 74 * @cbcpu: cpumask for the serial (callback) workers. 92 * @cpumask: The cpumasks in use for parallel and serial workers.
|
/linux-6.15/tools/workqueue/ |
D | wq_dump.py | 28 idle number of idle workers 29 workers number of all workers 31 cpus CPUs the workers in the pool can run on (unbound pool) 160 print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='')
|
/linux-6.15/tools/testing/selftests/mm/ |
D | test_vmalloc.sh | 63 echo "available test cases are run by NUM_CPUS workers simultaneously." 96 echo "# Runs 1 test(id_1), repeats it 5 times by NUM_CPUS workers" 104 echo -n "# Runs all tests by NUM_CPUS workers, shuffled order, repeats "
|
/linux-6.15/lib/ |
D | test_vmalloc.c | 27 "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); 62 * phase that is done in main thread and workers. 507 * A maximum number of workers is defined as hard-coded in init_test_configuration() 539 * Put on hold all workers. in do_concurrent_test() 556 * Now let the workers do their job. in do_concurrent_test() 561 * Sleep quiet until all workers are done with 1 second in do_concurrent_test()
|
D | test_objpool.c | 396 /* tell workers threads to quit */ in ot_start_sync() 399 /* wait all workers threads finish and quit */ in ot_start_sync() 580 /* tell workers threads to quit */ in ot_start_async() 586 /* wait all workers threads finish and quit */ in ot_start_async()
|
/linux-6.15/tools/testing/selftests/kvm/x86/ |
D | hyperv_tlb_flush.c | 57 * Pass the following info to 'workers' and 'sender' 156 * Prepare to test: 'disable' workers by setting the expectation to '0', 164 /* 'Disable' workers */ in prepare_to_test() 168 /* Make sure workers are 'disabled' before we swap PTEs. */ in prepare_to_test() 171 /* Make sure workers have enough time to notice */ in prepare_to_test() 187 /* Set the expectation for workers, '0' means don't test */ in post_test() 191 /* Make sure workers have enough time to test */ in post_test() 630 * for 'workers' and issues TLB flush hypercalls. in main()
|
/linux-6.15/kernel/ |
D | workqueue.c | 66 * While associated (!DISASSOCIATED), all workers are bound to the 70 * While DISASSOCIATED, the cpu may be offline and all workers have 83 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 121 * Rescue workers are used only on emergencies and shared by 204 int nr_workers; /* L: total number of workers */ 205 int nr_idle; /* L: currently idle workers */ 207 struct list_head idle_list; /* L: list of idle workers */ 211 struct timer_list mayday_timer; /* L: SOS timer for workers */ 213 /* a workers is either on busy_hash or idle_list, or the manager */ 215 /* L: hash of busy workers */ [all …]
|
D | workqueue_internal.h | 18 * The poor guys doing the actual heavy lifting. All on-duty workers are 47 struct list_head node; /* A: anchored at pool->workers */
|
/linux-6.15/io_uring/ |
D | io-wq.c | 89 * The list of free workers. Protected by #workers_lock 95 * The list of all workers. Protected by #workers_lock 312 * below the max number of workers, create one. 318 * wasn't setup with any unbounded workers. in io_wq_create_worker() 321 pr_warn_once("io-wq is not configured for unbound workers"); in io_wq_create_worker() 727 * Called when worker is going to sleep. If there are no workers currently 1397 * Set max number of unbounded workers, returns old value. If new_count is 0,
|
/linux-6.15/tools/perf/trace/beauty/include/uapi/linux/ |
D | vhost.h | 63 * virtqueue. If userspace is not able to call this for workers its created, 64 * the kernel will free all the device's workers when the device is closed.
|
/linux-6.15/Documentation/devicetree/bindings/media/ |
D | mediatek,vcodec-subdev-decoder.yaml | 46 Its workers take input bitstream and LAT buffer, enable the hardware for 50 Its workers take LAT buffer and output buffer, enable the hardware for
|
/linux-6.15/include/uapi/linux/ |
D | vhost.h | 63 * virtqueue. If userspace is not able to call this for workers its created, 64 * the kernel will free all the device's workers when the device is closed.
|
/linux-6.15/fs/btrfs/ |
D | async-thread.c | 38 /* Up limit of concurrency workers */ 41 /* Current number of concurrency workers */
|
/linux-6.15/net/l2tp/ |
D | Kconfig | 23 with home workers to connect to their offices.
|