Lines Matching full:pd

60 static void padata_free_pd(struct parallel_data *pd);
63 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) in padata_index_to_cpu() argument
67 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu()
69 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu()
74 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) in padata_cpu_hash() argument
80 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
82 return padata_index_to_cpu(pd, cpu_index); in padata_cpu_hash()
185 struct parallel_data *pd; in padata_do_parallel() local
190 pd = rcu_dereference_bh(ps->pd); in padata_do_parallel()
196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel()
197 if (!cpumask_weight(pd->cpumask.cbcpu)) in padata_do_parallel()
201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel()
203 cpu = cpumask_first(pd->cpumask.cbcpu); in padata_do_parallel()
205 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); in padata_do_parallel()
214 atomic_inc(&pd->refcnt); in padata_do_parallel()
215 padata->pd = pd; in padata_do_parallel()
219 padata->seq_nr = ++pd->seq_nr; in padata_do_parallel()
251 static struct padata_priv *padata_find_next(struct parallel_data *pd, in padata_find_next() argument
256 int cpu = pd->cpu; in padata_find_next()
258 reorder = per_cpu_ptr(pd->reorder_list, cpu); in padata_find_next()
272 if (padata->seq_nr != pd->processed) { in padata_find_next()
279 ++pd->processed; in padata_find_next()
280 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); in padata_find_next()
287 static void padata_reorder(struct parallel_data *pd) in padata_reorder() argument
289 struct padata_instance *pinst = pd->ps->pinst; in padata_reorder()
305 if (!spin_trylock_bh(&pd->lock)) in padata_reorder()
309 padata = padata_find_next(pd, true); in padata_reorder()
320 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder()
329 spin_unlock_bh(&pd->lock); in padata_reorder()
335 * Ensure reorder queue is read after pd->lock is dropped so we see in padata_reorder()
341 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); in padata_reorder()
342 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) in padata_reorder()
343 queue_work(pinst->serial_wq, &pd->reorder_work); in padata_reorder()
348 struct parallel_data *pd; in invoke_padata_reorder() local
351 pd = container_of(work, struct parallel_data, reorder_work); in invoke_padata_reorder()
352 padata_reorder(pd); in invoke_padata_reorder()
359 struct parallel_data *pd; in padata_serial_worker() local
365 pd = squeue->pd; in padata_serial_worker()
386 if (atomic_sub_and_test(cnt, &pd->refcnt)) in padata_serial_worker()
387 padata_free_pd(pd); in padata_serial_worker()
400 struct parallel_data *pd = padata->pd; in padata_do_serial() local
401 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); in padata_do_serial()
402 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); in padata_do_serial()
415 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb in padata_do_serial()
420 padata_reorder(pd); in padata_do_serial()
433 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ in padata_setup_cpumasks()
540 static void padata_init_squeues(struct parallel_data *pd) in padata_init_squeues() argument
545 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_init_squeues()
546 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues()
547 squeue->pd = pd; in padata_init_squeues()
554 static void padata_init_reorder_list(struct parallel_data *pd) in padata_init_reorder_list() argument
559 for_each_cpu(cpu, pd->cpumask.pcpu) { in padata_init_reorder_list()
560 list = per_cpu_ptr(pd->reorder_list, cpu); in padata_init_reorder_list()
569 struct parallel_data *pd; in padata_alloc_pd() local
571 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); in padata_alloc_pd()
572 if (!pd) in padata_alloc_pd()
575 pd->reorder_list = alloc_percpu(struct padata_list); in padata_alloc_pd()
576 if (!pd->reorder_list) in padata_alloc_pd()
579 pd->squeue = alloc_percpu(struct padata_serial_queue); in padata_alloc_pd()
580 if (!pd->squeue) in padata_alloc_pd()
583 pd->ps = ps; in padata_alloc_pd()
585 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) in padata_alloc_pd()
587 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) in padata_alloc_pd()
590 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); in padata_alloc_pd()
591 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); in padata_alloc_pd()
593 padata_init_reorder_list(pd); in padata_alloc_pd()
594 padata_init_squeues(pd); in padata_alloc_pd()
595 pd->seq_nr = -1; in padata_alloc_pd()
596 atomic_set(&pd->refcnt, 1); in padata_alloc_pd()
597 spin_lock_init(&pd->lock); in padata_alloc_pd()
598 pd->cpu = cpumask_first(pd->cpumask.pcpu); in padata_alloc_pd()
599 INIT_WORK(&pd->reorder_work, invoke_padata_reorder); in padata_alloc_pd()
601 return pd; in padata_alloc_pd()
604 free_cpumask_var(pd->cpumask.pcpu); in padata_alloc_pd()
606 free_percpu(pd->squeue); in padata_alloc_pd()
608 free_percpu(pd->reorder_list); in padata_alloc_pd()
610 kfree(pd); in padata_alloc_pd()
615 static void padata_free_pd(struct parallel_data *pd) in padata_free_pd() argument
617 free_cpumask_var(pd->cpumask.pcpu); in padata_free_pd()
618 free_cpumask_var(pd->cpumask.cbcpu); in padata_free_pd()
619 free_percpu(pd->reorder_list); in padata_free_pd()
620 free_percpu(pd->squeue); in padata_free_pd()
621 kfree(pd); in padata_free_pd()
648 ps->opd = rcu_dereference_protected(ps->pd, 1); in padata_replace_one()
649 rcu_assign_pointer(ps->pd, pd_new); in padata_replace_one()
1068 struct parallel_data *pd; in padata_alloc_shell() local
1078 pd = padata_alloc_pd(ps); in padata_alloc_shell()
1081 if (!pd) in padata_alloc_shell()
1085 RCU_INIT_POINTER(ps->pd, pd); in padata_alloc_shell()
1110 padata_free_pd(rcu_dereference_protected(ps->pd, 1)); in padata_free_shell()