Lines Matching +refs:get +refs:addr +refs:attrs

222 	struct workqueue_attrs	*attrs;		/* I: worker attributes */  member
606 static void *work_debug_hint(void *addr) in work_debug_hint() argument
608 return ((struct work_struct *) addr)->func; in work_debug_hint()
611 static bool work_is_static_object(void *addr) in work_is_static_object() argument
613 struct work_struct *work = addr; in work_is_static_object()
622 static bool work_fixup_init(void *addr, enum debug_obj_state state) in work_fixup_init() argument
624 struct work_struct *work = addr; in work_fixup_init()
640 static bool work_fixup_free(void *addr, enum debug_obj_state state) in work_fixup_free() argument
642 struct work_struct *work = addr; in work_fixup_free()
748 return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask; in unbound_effective_cpumask()
1205 int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0; in bh_pool_irq_work()
1220 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in kick_bh_pool()
1267 if (!pool->attrs->affn_strict && in kick_pool()
1268 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { in kick_pool()
1271 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask, in kick_pool()
2660 if (pool->cpu < 0 && pool->attrs->affn_strict) in pool_allowed_cpus()
2661 return pool->attrs->__pod_cpumask; in pool_allowed_cpus()
2663 return pool->attrs->cpumask; in pool_allowed_cpus()
2756 pool->attrs->nice < 0 ? "H" : ""); in format_worker_id()
2815 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
3646 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in drain_dead_softirq_workfn()
3678 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in workqueue_softirq_dead()
4613 void free_workqueue_attrs(struct workqueue_attrs *attrs) in free_workqueue_attrs() argument
4615 if (attrs) { in free_workqueue_attrs()
4616 free_cpumask_var(attrs->cpumask); in free_workqueue_attrs()
4617 free_cpumask_var(attrs->__pod_cpumask); in free_workqueue_attrs()
4618 kfree(attrs); in free_workqueue_attrs()
4632 struct workqueue_attrs *attrs; in alloc_workqueue_attrs() local
4634 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); in alloc_workqueue_attrs()
4635 if (!attrs) in alloc_workqueue_attrs()
4637 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) in alloc_workqueue_attrs()
4639 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) in alloc_workqueue_attrs()
4642 cpumask_copy(attrs->cpumask, cpu_possible_mask); in alloc_workqueue_attrs()
4643 attrs->affn_scope = WQ_AFFN_DFL; in alloc_workqueue_attrs()
4644 return attrs; in alloc_workqueue_attrs()
4646 free_workqueue_attrs(attrs); in alloc_workqueue_attrs()
4671 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) in wqattrs_clear_for_pool() argument
4673 attrs->affn_scope = WQ_AFFN_NR_TYPES; in wqattrs_clear_for_pool()
4674 attrs->ordered = false; in wqattrs_clear_for_pool()
4675 if (attrs->affn_strict) in wqattrs_clear_for_pool()
4676 cpumask_copy(attrs->cpumask, cpu_possible_mask); in wqattrs_clear_for_pool()
4680 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) in wqattrs_hash() argument
4684 hash = jhash_1word(attrs->nice, hash); in wqattrs_hash()
4685 hash = jhash_1word(attrs->affn_strict, hash); in wqattrs_hash()
4686 hash = jhash(cpumask_bits(attrs->__pod_cpumask), in wqattrs_hash()
4688 if (!attrs->affn_strict) in wqattrs_hash()
4689 hash = jhash(cpumask_bits(attrs->cpumask), in wqattrs_hash()
4710 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, in wqattrs_actualize_cpumask() argument
4718 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); in wqattrs_actualize_cpumask()
4719 if (unlikely(cpumask_empty(attrs->cpumask))) in wqattrs_actualize_cpumask()
4720 cpumask_copy(attrs->cpumask, unbound_cpumask); in wqattrs_actualize_cpumask()
4725 wqattrs_pod_type(const struct workqueue_attrs *attrs) in wqattrs_pod_type() argument
4733 if (attrs->affn_scope == WQ_AFFN_DFL) in wqattrs_pod_type()
4736 scope = attrs->affn_scope; in wqattrs_pod_type()
4740 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && in wqattrs_pod_type()
4787 pool->attrs = alloc_workqueue_attrs(); in init_worker_pool()
4788 if (!pool->attrs) in init_worker_pool()
4791 wqattrs_clear_for_pool(pool->attrs); in init_worker_pool()
4912 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
5009 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) in get_unbound_pool() argument
5012 u32 hash = wqattrs_hash(attrs); in get_unbound_pool()
5020 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
5028 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { in get_unbound_pool()
5040 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
5041 wqattrs_clear_for_pool(pool->attrs); in get_unbound_pool()
5156 const struct workqueue_attrs *attrs) in alloc_unbound_pwq() argument
5163 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
5201 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu) in wq_calc_pod_cpumask() argument
5203 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); in wq_calc_pod_cpumask()
5207 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); in wq_calc_pod_cpumask()
5209 if (!cpumask_intersects(attrs->__pod_cpumask, wq_online_cpumask)) { in wq_calc_pod_cpumask()
5210 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); in wq_calc_pod_cpumask()
5236 struct workqueue_attrs *attrs; /* attrs to apply */ member
5252 free_workqueue_attrs(ctx->attrs); in apply_wqattrs_cleanup()
5261 const struct workqueue_attrs *attrs, in apply_wqattrs_prepare() argument
5270 if (WARN_ON(attrs->affn_scope < 0 || in apply_wqattrs_prepare()
5271 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) in apply_wqattrs_prepare()
5285 copy_workqueue_attrs(new_attrs, attrs); in apply_wqattrs_prepare()
5305 copy_workqueue_attrs(new_attrs, attrs); in apply_wqattrs_prepare()
5308 ctx->attrs = new_attrs; in apply_wqattrs_prepare()
5336 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); in apply_wqattrs_commit()
5356 const struct workqueue_attrs *attrs) in apply_workqueue_attrs_locked() argument
5364 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); in apply_workqueue_attrs_locked()
5391 const struct workqueue_attrs *attrs) in apply_workqueue_attrs() argument
5396 ret = apply_workqueue_attrs_locked(wq, attrs); in apply_workqueue_attrs()
5443 if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) in unbound_wq_update_pwq()
6162 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
6168 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); in pr_cont_pool_info()
6170 pr_cont(" nice=%d", pool->attrs->nice); in pr_cont_pool_info()
6179 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); in pr_cont_worker_id()
6620 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
6623 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
6668 struct workqueue_attrs *attrs = wq->unbound_attrs; in workqueue_online_cpu() local
6670 if (attrs) { in workqueue_online_cpu()
6671 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); in workqueue_online_cpu()
6703 struct workqueue_attrs *attrs = wq->unbound_attrs; in workqueue_offline_cpu() local
6705 if (attrs) { in workqueue_offline_cpu()
6706 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); in workqueue_offline_cpu()
7013 .get = wq_affn_dfl_get,
7101 struct workqueue_attrs *attrs; in wq_sysfs_prep_attrs() local
7105 attrs = alloc_workqueue_attrs(); in wq_sysfs_prep_attrs()
7106 if (!attrs) in wq_sysfs_prep_attrs()
7109 copy_workqueue_attrs(attrs, wq->unbound_attrs); in wq_sysfs_prep_attrs()
7110 return attrs; in wq_sysfs_prep_attrs()
7117 struct workqueue_attrs *attrs; in wq_nice_store() local
7122 attrs = wq_sysfs_prep_attrs(wq); in wq_nice_store()
7123 if (!attrs) in wq_nice_store()
7126 if (sscanf(buf, "%d", &attrs->nice) == 1 && in wq_nice_store()
7127 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) in wq_nice_store()
7128 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_nice_store()
7134 free_workqueue_attrs(attrs); in wq_nice_store()
7156 struct workqueue_attrs *attrs; in wq_cpumask_store() local
7161 attrs = wq_sysfs_prep_attrs(wq); in wq_cpumask_store()
7162 if (!attrs) in wq_cpumask_store()
7165 ret = cpumask_parse(buf, attrs->cpumask); in wq_cpumask_store()
7167 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_cpumask_store()
7171 free_workqueue_attrs(attrs); in wq_cpumask_store()
7199 struct workqueue_attrs *attrs; in wq_affn_scope_store() local
7207 attrs = wq_sysfs_prep_attrs(wq); in wq_affn_scope_store()
7208 if (attrs) { in wq_affn_scope_store()
7209 attrs->affn_scope = affn; in wq_affn_scope_store()
7210 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_affn_scope_store()
7213 free_workqueue_attrs(attrs); in wq_affn_scope_store()
7231 struct workqueue_attrs *attrs; in wq_affinity_strict_store() local
7238 attrs = wq_sysfs_prep_attrs(wq); in wq_affinity_strict_store()
7239 if (attrs) { in wq_affinity_strict_store()
7240 attrs->affn_strict = (bool)v; in wq_affinity_strict_store()
7241 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_affinity_strict_store()
7244 free_workqueue_attrs(attrs); in wq_affinity_strict_store()
7669 .get = param_get_ulong,
7712 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in init_cpu_worker_pool()
7713 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); in init_cpu_worker_pool()
7714 pool->attrs->nice = nice; in init_cpu_worker_pool()
7715 pool->attrs->affn_strict = true; in init_cpu_worker_pool()
7802 struct workqueue_attrs *attrs; in workqueue_init_early() local
7804 BUG_ON(!(attrs = alloc_workqueue_attrs())); in workqueue_init_early()
7805 attrs->nice = std_nice[i]; in workqueue_init_early()
7806 unbound_std_wq_attrs[i] = attrs; in workqueue_init_early()
7812 BUG_ON(!(attrs = alloc_workqueue_attrs())); in workqueue_init_early()
7813 attrs->nice = std_nice[i]; in workqueue_init_early()
7814 attrs->ordered = true; in workqueue_init_early()
7815 ordered_wq_attrs[i] = attrs; in workqueue_init_early()