1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * padata.c - generic interface to process data streams in parallel
4 *
5 * See Documentation/core-api/padata.rst for more information.
6 *
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
10 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
12 */
13
14 #include <linux/completion.h>
15 #include <linux/export.h>
16 #include <linux/cpumask.h>
17 #include <linux/err.h>
18 #include <linux/cpu.h>
19 #include <linux/padata.h>
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/sysfs.h>
24 #include <linux/rcupdate.h>
25
26 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
27
28 struct padata_work {
29 struct work_struct pw_work;
30 struct list_head pw_list; /* padata_free_works linkage */
31 void *pw_data;
32 };
33
34 static DEFINE_SPINLOCK(padata_works_lock);
35 static struct padata_work *padata_works;
36 static LIST_HEAD(padata_free_works);
37
38 struct padata_mt_job_state {
39 spinlock_t lock;
40 struct completion completion;
41 struct padata_mt_job *job;
42 int nworks;
43 int nworks_fini;
44 unsigned long chunk_size;
45 };
46
47 static void padata_free_pd(struct parallel_data *pd);
48 static void __init padata_mt_helper(struct work_struct *work);
49
padata_get_pd(struct parallel_data * pd)50 static inline void padata_get_pd(struct parallel_data *pd)
51 {
52 refcount_inc(&pd->refcnt);
53 }
54
padata_put_pd_cnt(struct parallel_data * pd,int cnt)55 static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
56 {
57 if (refcount_sub_and_test(cnt, &pd->refcnt))
58 padata_free_pd(pd);
59 }
60
padata_put_pd(struct parallel_data * pd)61 static inline void padata_put_pd(struct parallel_data *pd)
62 {
63 padata_put_pd_cnt(pd, 1);
64 }
65
padata_cpu_hash(struct parallel_data * pd,unsigned int seq_nr)66 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
67 {
68 /*
69 * Hash the sequence numbers to the cpus by taking
70 * seq_nr mod. number of cpus in use.
71 */
72 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
73
74 return cpumask_nth(cpu_index, pd->cpumask.pcpu);
75 }
76
padata_work_alloc(void)77 static struct padata_work *padata_work_alloc(void)
78 {
79 struct padata_work *pw;
80
81 lockdep_assert_held(&padata_works_lock);
82
83 if (list_empty(&padata_free_works))
84 return NULL; /* No more work items allowed to be queued. */
85
86 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
87 list_del(&pw->pw_list);
88 return pw;
89 }
90
91 /*
92 * This function is marked __ref because this function may be optimized in such
93 * a way that it directly refers to work_fn's address, which causes modpost to
94 * complain when work_fn is marked __init. This scenario was observed with clang
95 * LTO, where padata_work_init() was optimized to refer directly to
96 * padata_mt_helper() because the calls to padata_work_init() with other work_fn
97 * values were eliminated or inlined.
98 */
padata_work_init(struct padata_work * pw,work_func_t work_fn,void * data,int flags)99 static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn,
100 void *data, int flags)
101 {
102 if (flags & PADATA_WORK_ONSTACK)
103 INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
104 else
105 INIT_WORK(&pw->pw_work, work_fn);
106 pw->pw_data = data;
107 }
108
padata_work_alloc_mt(int nworks,void * data,struct list_head * head)109 static int __init padata_work_alloc_mt(int nworks, void *data,
110 struct list_head *head)
111 {
112 int i;
113
114 spin_lock_bh(&padata_works_lock);
115 /* Start at 1 because the current task participates in the job. */
116 for (i = 1; i < nworks; ++i) {
117 struct padata_work *pw = padata_work_alloc();
118
119 if (!pw)
120 break;
121 padata_work_init(pw, padata_mt_helper, data, 0);
122 list_add(&pw->pw_list, head);
123 }
124 spin_unlock_bh(&padata_works_lock);
125
126 return i;
127 }
128
padata_work_free(struct padata_work * pw)129 static void padata_work_free(struct padata_work *pw)
130 {
131 lockdep_assert_held(&padata_works_lock);
132 list_add(&pw->pw_list, &padata_free_works);
133 }
134
padata_works_free(struct list_head * works)135 static void __init padata_works_free(struct list_head *works)
136 {
137 struct padata_work *cur, *next;
138
139 if (list_empty(works))
140 return;
141
142 spin_lock_bh(&padata_works_lock);
143 list_for_each_entry_safe(cur, next, works, pw_list) {
144 list_del(&cur->pw_list);
145 padata_work_free(cur);
146 }
147 spin_unlock_bh(&padata_works_lock);
148 }
149
padata_parallel_worker(struct work_struct * parallel_work)150 static void padata_parallel_worker(struct work_struct *parallel_work)
151 {
152 struct padata_work *pw = container_of(parallel_work, struct padata_work,
153 pw_work);
154 struct padata_priv *padata = pw->pw_data;
155
156 local_bh_disable();
157 padata->parallel(padata);
158 spin_lock(&padata_works_lock);
159 padata_work_free(pw);
160 spin_unlock(&padata_works_lock);
161 local_bh_enable();
162 }
163
164 /**
165 * padata_do_parallel - padata parallelization function
166 *
167 * @ps: padatashell
168 * @padata: object to be parallelized
169 * @cb_cpu: pointer to the CPU that the serialization callback function should
170 * run on. If it's not in the serial cpumask of @pinst
171 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
172 * none found, returns -EINVAL.
173 *
174 * The parallelization callback function will run with BHs off.
175 * Note: Every object which is parallelized by padata_do_parallel
176 * must be seen by padata_do_serial.
177 *
178 * Return: 0 on success or else negative error code.
179 */
padata_do_parallel(struct padata_shell * ps,struct padata_priv * padata,int * cb_cpu)180 int padata_do_parallel(struct padata_shell *ps,
181 struct padata_priv *padata, int *cb_cpu)
182 {
183 struct padata_instance *pinst = ps->pinst;
184 struct parallel_data *pd;
185 struct padata_work *pw;
186 int cpu_index, err;
187
188 rcu_read_lock_bh();
189
190 pd = rcu_dereference_bh(ps->pd);
191
192 err = -EINVAL;
193 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
194 goto out;
195
196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
197 if (cpumask_empty(pd->cpumask.cbcpu))
198 goto out;
199
200 /* Select an alternate fallback CPU and notify the caller. */
201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
202 *cb_cpu = cpumask_nth(cpu_index, pd->cpumask.cbcpu);
203 }
204
205 err = -EBUSY;
206 if ((pinst->flags & PADATA_RESET))
207 goto out;
208
209 padata_get_pd(pd);
210 padata->pd = pd;
211 padata->cb_cpu = *cb_cpu;
212
213 spin_lock(&padata_works_lock);
214 padata->seq_nr = ++pd->seq_nr;
215 pw = padata_work_alloc();
216 spin_unlock(&padata_works_lock);
217
218 if (!pw) {
219 /* Maximum works limit exceeded, run in the current task. */
220 padata->parallel(padata);
221 }
222
223 rcu_read_unlock_bh();
224
225 if (pw) {
226 padata_work_init(pw, padata_parallel_worker, padata, 0);
227 queue_work(pinst->parallel_wq, &pw->pw_work);
228 }
229
230 return 0;
231 out:
232 rcu_read_unlock_bh();
233
234 return err;
235 }
236 EXPORT_SYMBOL(padata_do_parallel);
237
238 /*
239 * padata_find_next - Find the next object that needs serialization.
240 *
241 * Return:
242 * * A pointer to the control struct of the next object that needs
243 * serialization, if present in one of the percpu reorder queues.
244 * * NULL, if the next object that needs serialization will
245 * be parallel processed by another cpu and is not yet present in
246 * the cpu's reorder queue.
247 */
padata_find_next(struct parallel_data * pd,int cpu,unsigned int processed)248 static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu,
249 unsigned int processed)
250 {
251 struct padata_priv *padata;
252 struct padata_list *reorder;
253
254 reorder = per_cpu_ptr(pd->reorder_list, cpu);
255
256 spin_lock(&reorder->lock);
257 if (list_empty(&reorder->list))
258 goto notfound;
259
260 padata = list_entry(reorder->list.next, struct padata_priv, list);
261
262 /*
263 * Checks the rare case where two or more parallel jobs have hashed to
264 * the same CPU and one of the later ones finishes first.
265 */
266 if (padata->seq_nr != processed)
267 goto notfound;
268
269 list_del_init(&padata->list);
270 spin_unlock(&reorder->lock);
271 return padata;
272
273 notfound:
274 pd->processed = processed;
275 pd->cpu = cpu;
276 spin_unlock(&reorder->lock);
277 return NULL;
278 }
279
padata_reorder(struct padata_priv * padata)280 static void padata_reorder(struct padata_priv *padata)
281 {
282 struct parallel_data *pd = padata->pd;
283 struct padata_instance *pinst = pd->ps->pinst;
284 unsigned int processed;
285 int cpu;
286
287 processed = pd->processed;
288 cpu = pd->cpu;
289
290 do {
291 struct padata_serial_queue *squeue;
292 int cb_cpu;
293
294 processed++;
295 /* When sequence wraps around, reset to the first CPU. */
296 if (unlikely(processed == 0))
297 cpu = cpumask_first(pd->cpumask.pcpu);
298 else
299 cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu);
300
301 cb_cpu = padata->cb_cpu;
302 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
303
304 spin_lock(&squeue->serial.lock);
305 list_add_tail(&padata->list, &squeue->serial.list);
306 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
307
308 /*
309 * If the next object that needs serialization is parallel
310 * processed by another cpu and is still on it's way to the
311 * cpu's reorder queue, end the loop.
312 */
313 padata = padata_find_next(pd, cpu, processed);
314 spin_unlock(&squeue->serial.lock);
315 } while (padata);
316 }
317
padata_serial_worker(struct work_struct * serial_work)318 static void padata_serial_worker(struct work_struct *serial_work)
319 {
320 struct padata_serial_queue *squeue;
321 struct parallel_data *pd;
322 LIST_HEAD(local_list);
323 int cnt;
324
325 local_bh_disable();
326 squeue = container_of(serial_work, struct padata_serial_queue, work);
327 pd = squeue->pd;
328
329 spin_lock(&squeue->serial.lock);
330 list_replace_init(&squeue->serial.list, &local_list);
331 spin_unlock(&squeue->serial.lock);
332
333 cnt = 0;
334
335 while (!list_empty(&local_list)) {
336 struct padata_priv *padata;
337
338 padata = list_entry(local_list.next,
339 struct padata_priv, list);
340
341 list_del_init(&padata->list);
342
343 padata->serial(padata);
344 cnt++;
345 }
346 local_bh_enable();
347
348 padata_put_pd_cnt(pd, cnt);
349 }
350
351 /**
352 * padata_do_serial - padata serialization function
353 *
354 * @padata: object to be serialized.
355 *
356 * padata_do_serial must be called for every parallelized object.
357 * The serialization callback function will run with BHs off.
358 */
padata_do_serial(struct padata_priv * padata)359 void padata_do_serial(struct padata_priv *padata)
360 {
361 struct parallel_data *pd = padata->pd;
362 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
363 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
364 struct padata_priv *cur;
365 struct list_head *pos;
366 bool gotit = true;
367
368 spin_lock(&reorder->lock);
369 /* Sort in ascending order of sequence number. */
370 list_for_each_prev(pos, &reorder->list) {
371 cur = list_entry(pos, struct padata_priv, list);
372 /* Compare by difference to consider integer wrap around */
373 if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
374 break;
375 }
376 if (padata->seq_nr != pd->processed) {
377 gotit = false;
378 list_add(&padata->list, pos);
379 }
380 spin_unlock(&reorder->lock);
381
382 if (gotit)
383 padata_reorder(padata);
384 }
385 EXPORT_SYMBOL(padata_do_serial);
386
padata_setup_cpumasks(struct padata_instance * pinst)387 static int padata_setup_cpumasks(struct padata_instance *pinst)
388 {
389 struct workqueue_attrs *attrs;
390 int err;
391
392 attrs = alloc_workqueue_attrs();
393 if (!attrs)
394 return -ENOMEM;
395
396 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
397 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
398 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
399 free_workqueue_attrs(attrs);
400
401 return err;
402 }
403
padata_mt_helper(struct work_struct * w)404 static void __init padata_mt_helper(struct work_struct *w)
405 {
406 struct padata_work *pw = container_of(w, struct padata_work, pw_work);
407 struct padata_mt_job_state *ps = pw->pw_data;
408 struct padata_mt_job *job = ps->job;
409 bool done;
410
411 spin_lock(&ps->lock);
412
413 while (job->size > 0) {
414 unsigned long start, size, end;
415
416 start = job->start;
417 /* So end is chunk size aligned if enough work remains. */
418 size = roundup(start + 1, ps->chunk_size) - start;
419 size = min(size, job->size);
420 end = start + size;
421
422 job->start = end;
423 job->size -= size;
424
425 spin_unlock(&ps->lock);
426 job->thread_fn(start, end, job->fn_arg);
427 spin_lock(&ps->lock);
428 }
429
430 ++ps->nworks_fini;
431 done = (ps->nworks_fini == ps->nworks);
432 spin_unlock(&ps->lock);
433
434 if (done)
435 complete(&ps->completion);
436 }
437
438 /**
439 * padata_do_multithreaded - run a multithreaded job
440 * @job: Description of the job.
441 *
442 * See the definition of struct padata_mt_job for more details.
443 */
padata_do_multithreaded(struct padata_mt_job * job)444 void __init padata_do_multithreaded(struct padata_mt_job *job)
445 {
446 /* In case threads finish at different times. */
447 static const unsigned long load_balance_factor = 4;
448 struct padata_work my_work, *pw;
449 struct padata_mt_job_state ps;
450 LIST_HEAD(works);
451 int nworks, nid;
452 static atomic_t last_used_nid __initdata;
453
454 if (job->size == 0)
455 return;
456
457 /* Ensure at least one thread when size < min_chunk. */
458 nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
459 nworks = min(nworks, job->max_threads);
460
461 if (nworks == 1) {
462 /* Single thread, no coordination needed, cut to the chase. */
463 job->thread_fn(job->start, job->start + job->size, job->fn_arg);
464 return;
465 }
466
467 spin_lock_init(&ps.lock);
468 init_completion(&ps.completion);
469 ps.job = job;
470 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
471 ps.nworks_fini = 0;
472
473 /*
474 * Chunk size is the amount of work a helper does per call to the
475 * thread function. Load balance large jobs between threads by
476 * increasing the number of chunks, guarantee at least the minimum
477 * chunk size from the caller, and honor the caller's alignment.
478 * Ensure chunk_size is at least 1 to prevent divide-by-0
479 * panic in padata_mt_helper().
480 */
481 ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
482 ps.chunk_size = max(ps.chunk_size, job->min_chunk);
483 ps.chunk_size = max(ps.chunk_size, 1ul);
484 ps.chunk_size = roundup(ps.chunk_size, job->align);
485
486 list_for_each_entry(pw, &works, pw_list)
487 if (job->numa_aware) {
488 int old_node = atomic_read(&last_used_nid);
489
490 do {
491 nid = next_node_in(old_node, node_states[N_CPU]);
492 } while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid));
493 queue_work_node(nid, system_dfl_wq, &pw->pw_work);
494 } else {
495 queue_work(system_dfl_wq, &pw->pw_work);
496 }
497
498 /* Use the current thread, which saves starting a workqueue worker. */
499 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
500 padata_mt_helper(&my_work.pw_work);
501
502 /* Wait for all the helpers to finish. */
503 wait_for_completion(&ps.completion);
504
505 destroy_work_on_stack(&my_work.pw_work);
506 padata_works_free(&works);
507 }
508
509 /* Initialize all percpu queues used by serial workers */
padata_init_squeues(struct parallel_data * pd)510 static void padata_init_squeues(struct parallel_data *pd)
511 {
512 int cpu;
513 struct padata_serial_queue *squeue;
514
515 for_each_cpu(cpu, pd->cpumask.cbcpu) {
516 squeue = per_cpu_ptr(pd->squeue, cpu);
517 squeue->pd = pd;
518 INIT_LIST_HEAD(&squeue->serial.list);
519 spin_lock_init(&squeue->serial.lock);
520 INIT_WORK(&squeue->work, padata_serial_worker);
521 }
522 }
523
524 /* Initialize per-CPU reorder lists */
padata_init_reorder_list(struct parallel_data * pd)525 static void padata_init_reorder_list(struct parallel_data *pd)
526 {
527 int cpu;
528 struct padata_list *list;
529
530 for_each_cpu(cpu, pd->cpumask.pcpu) {
531 list = per_cpu_ptr(pd->reorder_list, cpu);
532 INIT_LIST_HEAD(&list->list);
533 spin_lock_init(&list->lock);
534 }
535 }
536
537 /* Allocate and initialize the internal cpumask dependend resources. */
padata_alloc_pd(struct padata_shell * ps,int offlining_cpu)538 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps,
539 int offlining_cpu)
540 {
541 struct padata_instance *pinst = ps->pinst;
542 struct parallel_data *pd;
543
544 pd = kzalloc_obj(struct parallel_data);
545 if (!pd)
546 goto err;
547
548 pd->reorder_list = alloc_percpu(struct padata_list);
549 if (!pd->reorder_list)
550 goto err_free_pd;
551
552 pd->squeue = alloc_percpu(struct padata_serial_queue);
553 if (!pd->squeue)
554 goto err_free_reorder_list;
555
556 pd->ps = ps;
557
558 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
559 goto err_free_squeue;
560 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
561 goto err_free_pcpu;
562
563 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
564 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
565 if (offlining_cpu >= 0) {
566 __cpumask_clear_cpu(offlining_cpu, pd->cpumask.pcpu);
567 __cpumask_clear_cpu(offlining_cpu, pd->cpumask.cbcpu);
568 }
569
570 padata_init_reorder_list(pd);
571 padata_init_squeues(pd);
572 pd->seq_nr = -1;
573 refcount_set(&pd->refcnt, 1);
574 pd->cpu = cpumask_first(pd->cpumask.pcpu);
575
576 return pd;
577
578 err_free_pcpu:
579 free_cpumask_var(pd->cpumask.pcpu);
580 err_free_squeue:
581 free_percpu(pd->squeue);
582 err_free_reorder_list:
583 free_percpu(pd->reorder_list);
584 err_free_pd:
585 kfree(pd);
586 err:
587 return NULL;
588 }
589
padata_free_pd(struct parallel_data * pd)590 static void padata_free_pd(struct parallel_data *pd)
591 {
592 free_cpumask_var(pd->cpumask.pcpu);
593 free_cpumask_var(pd->cpumask.cbcpu);
594 free_percpu(pd->reorder_list);
595 free_percpu(pd->squeue);
596 kfree(pd);
597 }
598
__padata_start(struct padata_instance * pinst)599 static void __padata_start(struct padata_instance *pinst)
600 {
601 pinst->flags |= PADATA_INIT;
602 }
603
__padata_stop(struct padata_instance * pinst)604 static void __padata_stop(struct padata_instance *pinst)
605 {
606 if (!(pinst->flags & PADATA_INIT))
607 return;
608
609 pinst->flags &= ~PADATA_INIT;
610
611 synchronize_rcu();
612 }
613
614 /* Replace the internal control structure with a new one. */
padata_replace_one(struct padata_shell * ps,int offlining_cpu)615 static int padata_replace_one(struct padata_shell *ps, int offlining_cpu)
616 {
617 struct parallel_data *pd_new;
618
619 pd_new = padata_alloc_pd(ps, offlining_cpu);
620 if (!pd_new)
621 return -ENOMEM;
622
623 ps->opd = rcu_dereference_protected(ps->pd, 1);
624 rcu_assign_pointer(ps->pd, pd_new);
625
626 return 0;
627 }
628
padata_replace(struct padata_instance * pinst,int offlining_cpu)629 static int padata_replace(struct padata_instance *pinst, int offlining_cpu)
630 {
631 struct padata_shell *ps;
632 int err = 0;
633
634 pinst->flags |= PADATA_RESET;
635
636 list_for_each_entry(ps, &pinst->pslist, list) {
637 err = padata_replace_one(ps, offlining_cpu);
638 if (err)
639 break;
640 }
641
642 synchronize_rcu();
643
644 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
645 padata_put_pd(ps->opd);
646
647 pinst->flags &= ~PADATA_RESET;
648
649 return err;
650 }
651
652 /* If cpumask contains no active cpu, we mark the instance as invalid. */
padata_validate_cpumask(struct padata_instance * pinst,const struct cpumask * cpumask,int offlining_cpu)653 static bool padata_validate_cpumask(struct padata_instance *pinst,
654 const struct cpumask *cpumask,
655 int offlining_cpu)
656 {
657 cpumask_copy(pinst->validate_cpumask, cpu_online_mask);
658
659 /*
660 * @offlining_cpu is still in cpu_online_mask, so remove it here for
661 * validation. Using a sub-CPUHP_TEARDOWN_CPU hotplug state where
662 * @offlining_cpu wouldn't be in the online mask doesn't work because
663 * padata_cpu_offline() can fail but such a state doesn't allow failure.
664 */
665 if (offlining_cpu >= 0)
666 __cpumask_clear_cpu(offlining_cpu, pinst->validate_cpumask);
667
668 if (!cpumask_intersects(cpumask, pinst->validate_cpumask)) {
669 pinst->flags |= PADATA_INVALID;
670 return false;
671 }
672
673 pinst->flags &= ~PADATA_INVALID;
674 return true;
675 }
676
__padata_set_cpumasks(struct padata_instance * pinst,cpumask_var_t pcpumask,cpumask_var_t cbcpumask)677 static int __padata_set_cpumasks(struct padata_instance *pinst,
678 cpumask_var_t pcpumask,
679 cpumask_var_t cbcpumask)
680 {
681 int valid;
682 int err;
683
684 valid = padata_validate_cpumask(pinst, pcpumask, -1);
685 if (!valid) {
686 __padata_stop(pinst);
687 goto out_replace;
688 }
689
690 valid = padata_validate_cpumask(pinst, cbcpumask, -1);
691 if (!valid)
692 __padata_stop(pinst);
693
694 out_replace:
695 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
696 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
697
698 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst, -1);
699
700 if (valid)
701 __padata_start(pinst);
702
703 return err;
704 }
705
706 /**
707 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
708 * equivalent to @cpumask.
709 * @pinst: padata instance
710 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
711 * to parallel and serial cpumasks respectively.
712 * @cpumask: the cpumask to use
713 *
714 * Return: 0 on success or negative error code
715 */
padata_set_cpumask(struct padata_instance * pinst,int cpumask_type,cpumask_var_t cpumask)716 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
717 cpumask_var_t cpumask)
718 {
719 struct cpumask *serial_mask, *parallel_mask;
720 int err = -EINVAL;
721
722 cpus_read_lock();
723 mutex_lock(&pinst->lock);
724
725 switch (cpumask_type) {
726 case PADATA_CPU_PARALLEL:
727 serial_mask = pinst->cpumask.cbcpu;
728 parallel_mask = cpumask;
729 break;
730 case PADATA_CPU_SERIAL:
731 parallel_mask = pinst->cpumask.pcpu;
732 serial_mask = cpumask;
733 break;
734 default:
735 goto out;
736 }
737
738 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
739
740 out:
741 mutex_unlock(&pinst->lock);
742 cpus_read_unlock();
743
744 return err;
745 }
746 EXPORT_SYMBOL(padata_set_cpumask);
747
748 #ifdef CONFIG_HOTPLUG_CPU
749
pinst_has_cpu(struct padata_instance * pinst,int cpu)750 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
751 {
752 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
753 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
754 }
755
padata_cpu_online(unsigned int cpu,struct hlist_node * node)756 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
757 {
758 struct padata_instance *pinst;
759 int ret;
760
761 pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node);
762 if (!pinst_has_cpu(pinst, cpu))
763 return 0;
764
765 mutex_lock(&pinst->lock);
766
767 ret = padata_replace(pinst, -1);
768
769 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu, -1) &&
770 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, -1))
771 __padata_start(pinst);
772
773 mutex_unlock(&pinst->lock);
774 return ret;
775 }
776
padata_cpu_offline(unsigned int cpu,struct hlist_node * node)777 static int padata_cpu_offline(unsigned int cpu, struct hlist_node *node)
778 {
779 struct padata_instance *pinst;
780 int ret;
781
782 pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node);
783 if (!pinst_has_cpu(pinst, cpu))
784 return 0;
785
786 mutex_lock(&pinst->lock);
787
788 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu, cpu) ||
789 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, cpu))
790 __padata_stop(pinst);
791
792 ret = padata_replace(pinst, cpu);
793
794 mutex_unlock(&pinst->lock);
795 return ret;
796 }
797
798 static enum cpuhp_state hp_online;
799 #endif
800
__padata_free(struct padata_instance * pinst)801 static void __padata_free(struct padata_instance *pinst)
802 {
803 #ifdef CONFIG_HOTPLUG_CPU
804 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpuhp_node);
805 #endif
806
807 WARN_ON(!list_empty(&pinst->pslist));
808
809 free_cpumask_var(pinst->cpumask.pcpu);
810 free_cpumask_var(pinst->cpumask.cbcpu);
811 free_cpumask_var(pinst->validate_cpumask);
812 destroy_workqueue(pinst->serial_wq);
813 destroy_workqueue(pinst->parallel_wq);
814 kfree(pinst);
815 }
816
817 #define kobj2pinst(_kobj) \
818 container_of(_kobj, struct padata_instance, kobj)
819 #define attr2pentry(_attr) \
820 container_of_const(_attr, struct padata_sysfs_entry, attr)
821
padata_sysfs_release(struct kobject * kobj)822 static void padata_sysfs_release(struct kobject *kobj)
823 {
824 struct padata_instance *pinst = kobj2pinst(kobj);
825 __padata_free(pinst);
826 }
827
828 struct padata_sysfs_entry {
829 struct attribute attr;
830 ssize_t (*show)(struct padata_instance *, const struct attribute *, char *);
831 ssize_t (*store)(struct padata_instance *, const struct attribute *,
832 const char *, size_t);
833 };
834
show_cpumask(struct padata_instance * pinst,const struct attribute * attr,char * buf)835 static ssize_t show_cpumask(struct padata_instance *pinst,
836 const struct attribute *attr, char *buf)
837 {
838 struct cpumask *cpumask;
839 ssize_t len;
840
841 mutex_lock(&pinst->lock);
842 if (!strcmp(attr->name, "serial_cpumask"))
843 cpumask = pinst->cpumask.cbcpu;
844 else
845 cpumask = pinst->cpumask.pcpu;
846
847 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
848 nr_cpu_ids, cpumask_bits(cpumask));
849 mutex_unlock(&pinst->lock);
850 return len < PAGE_SIZE ? len : -EINVAL;
851 }
852
store_cpumask(struct padata_instance * pinst,const struct attribute * attr,const char * buf,size_t count)853 static ssize_t store_cpumask(struct padata_instance *pinst,
854 const struct attribute *attr,
855 const char *buf, size_t count)
856 {
857 cpumask_var_t new_cpumask;
858 ssize_t ret;
859 int mask_type;
860
861 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
862 return -ENOMEM;
863
864 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
865 nr_cpumask_bits);
866 if (ret < 0)
867 goto out;
868
869 mask_type = !strcmp(attr->name, "serial_cpumask") ?
870 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
871 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
872 if (!ret)
873 ret = count;
874
875 out:
876 free_cpumask_var(new_cpumask);
877 return ret;
878 }
879
880 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
881 static const struct padata_sysfs_entry _name##_attr = \
882 __ATTR(_name, 0644, _show_name, _store_name)
883 #define PADATA_ATTR_RO(_name, _show_name) \
884 static const struct padata_sysfs_entry _name##_attr = \
885 __ATTR(_name, 0400, _show_name, NULL)
886
887 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
888 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
889
890 /*
891 * Padata sysfs provides the following objects:
892 * serial_cpumask [RW] - cpumask for serial workers
893 * parallel_cpumask [RW] - cpumask for parallel workers
894 */
895 static const struct attribute *const padata_default_attrs[] = {
896 &serial_cpumask_attr.attr,
897 ¶llel_cpumask_attr.attr,
898 NULL,
899 };
900 ATTRIBUTE_GROUPS(padata_default);
901
padata_sysfs_show(struct kobject * kobj,struct attribute * attr,char * buf)902 static ssize_t padata_sysfs_show(struct kobject *kobj,
903 struct attribute *attr, char *buf)
904 {
905 const struct padata_sysfs_entry *pentry;
906 struct padata_instance *pinst;
907 ssize_t ret = -EIO;
908
909 pinst = kobj2pinst(kobj);
910 pentry = attr2pentry(attr);
911 if (pentry->show)
912 ret = pentry->show(pinst, attr, buf);
913
914 return ret;
915 }
916
padata_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)917 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
918 const char *buf, size_t count)
919 {
920 const struct padata_sysfs_entry *pentry;
921 struct padata_instance *pinst;
922 ssize_t ret = -EIO;
923
924 pinst = kobj2pinst(kobj);
925 pentry = attr2pentry(attr);
926 if (pentry->store)
927 ret = pentry->store(pinst, attr, buf, count);
928
929 return ret;
930 }
931
932 static const struct sysfs_ops padata_sysfs_ops = {
933 .show = padata_sysfs_show,
934 .store = padata_sysfs_store,
935 };
936
937 static const struct kobj_type padata_attr_type = {
938 .sysfs_ops = &padata_sysfs_ops,
939 .default_groups = padata_default_groups,
940 .release = padata_sysfs_release,
941 };
942
943 /**
944 * padata_alloc - allocate and initialize a padata instance
945 * @name: used to identify the instance
946 *
947 * Return: new instance on success, NULL on error
948 */
padata_alloc(const char * name)949 struct padata_instance *padata_alloc(const char *name)
950 {
951 struct padata_instance *pinst;
952
953 pinst = kzalloc_obj(struct padata_instance);
954 if (!pinst)
955 goto err;
956
957 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
958 name);
959 if (!pinst->parallel_wq)
960 goto err_free_inst;
961
962 cpus_read_lock();
963
964 pinst->serial_wq = alloc_workqueue("%s_serial",
965 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE | WQ_PERCPU,
966 1, name);
967 if (!pinst->serial_wq)
968 goto err_put_cpus;
969
970 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
971 goto err_free_serial_wq;
972 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL))
973 goto err_free_p_mask;
974 if (!alloc_cpumask_var(&pinst->validate_cpumask, GFP_KERNEL))
975 goto err_free_cb_mask;
976
977 INIT_LIST_HEAD(&pinst->pslist);
978
979 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
980 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
981
982 if (padata_setup_cpumasks(pinst))
983 goto err_free_v_mask;
984
985 __padata_start(pinst);
986
987 kobject_init(&pinst->kobj, &padata_attr_type);
988 mutex_init(&pinst->lock);
989
990 #ifdef CONFIG_HOTPLUG_CPU
991 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
992 &pinst->cpuhp_node);
993 #endif
994
995 cpus_read_unlock();
996
997 return pinst;
998
999 err_free_v_mask:
1000 free_cpumask_var(pinst->validate_cpumask);
1001 err_free_cb_mask:
1002 free_cpumask_var(pinst->cpumask.cbcpu);
1003 err_free_p_mask:
1004 free_cpumask_var(pinst->cpumask.pcpu);
1005 err_free_serial_wq:
1006 destroy_workqueue(pinst->serial_wq);
1007 err_put_cpus:
1008 cpus_read_unlock();
1009 destroy_workqueue(pinst->parallel_wq);
1010 err_free_inst:
1011 kfree(pinst);
1012 err:
1013 return NULL;
1014 }
1015 EXPORT_SYMBOL(padata_alloc);
1016
1017 /**
1018 * padata_free - free a padata instance
1019 *
1020 * @pinst: padata instance to free
1021 */
padata_free(struct padata_instance * pinst)1022 void padata_free(struct padata_instance *pinst)
1023 {
1024 kobject_put(&pinst->kobj);
1025 }
1026 EXPORT_SYMBOL(padata_free);
1027
1028 /**
1029 * padata_alloc_shell - Allocate and initialize padata shell.
1030 *
1031 * @pinst: Parent padata_instance object.
1032 *
1033 * Return: new shell on success, NULL on error
1034 */
padata_alloc_shell(struct padata_instance * pinst)1035 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1036 {
1037 struct parallel_data *pd;
1038 struct padata_shell *ps;
1039
1040 ps = kzalloc_obj(*ps);
1041 if (!ps)
1042 goto out;
1043
1044 ps->pinst = pinst;
1045
1046 cpus_read_lock();
1047 pd = padata_alloc_pd(ps, -1);
1048 cpus_read_unlock();
1049
1050 if (!pd)
1051 goto out_free_ps;
1052
1053 mutex_lock(&pinst->lock);
1054 RCU_INIT_POINTER(ps->pd, pd);
1055 list_add(&ps->list, &pinst->pslist);
1056 mutex_unlock(&pinst->lock);
1057
1058 return ps;
1059
1060 out_free_ps:
1061 kfree(ps);
1062 out:
1063 return NULL;
1064 }
1065 EXPORT_SYMBOL(padata_alloc_shell);
1066
1067 /**
1068 * padata_free_shell - free a padata shell
1069 *
1070 * @ps: padata shell to free
1071 */
padata_free_shell(struct padata_shell * ps)1072 void padata_free_shell(struct padata_shell *ps)
1073 {
1074 struct parallel_data *pd;
1075
1076 if (!ps)
1077 return;
1078
1079 mutex_lock(&ps->pinst->lock);
1080 list_del(&ps->list);
1081 pd = rcu_dereference_protected(ps->pd, 1);
1082 padata_put_pd(pd);
1083 mutex_unlock(&ps->pinst->lock);
1084
1085 kfree(ps);
1086 }
1087 EXPORT_SYMBOL(padata_free_shell);
1088
padata_init(void)1089 void __init padata_init(void)
1090 {
1091 unsigned int i, possible_cpus;
1092 #ifdef CONFIG_HOTPLUG_CPU
1093 int ret;
1094
1095 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1096 padata_cpu_online, padata_cpu_offline);
1097 if (ret < 0)
1098 goto err;
1099 hp_online = ret;
1100 #endif
1101
1102 possible_cpus = num_possible_cpus();
1103 padata_works = kmalloc_objs(struct padata_work, possible_cpus);
1104 if (!padata_works)
1105 goto remove_online_state;
1106
1107 for (i = 0; i < possible_cpus; ++i)
1108 list_add(&padata_works[i].pw_list, &padata_free_works);
1109
1110 return;
1111
1112 remove_online_state:
1113 #ifdef CONFIG_HOTPLUG_CPU
1114 cpuhp_remove_multi_state(hp_online);
1115 err:
1116 #endif
1117 pr_warn("padata: initialization failed\n");
1118 }
1119