Lines Matching +full:cpu +full:- +full:cfg
1 // SPDX-License-Identifier: GPL-2.0-only
4 * - Cache Allocation code.
18 #include <linux/cpu.h>
49 if (!r->membw.delay_linear && r->membw.arch_needs_linear) { in bw_validate()
50 rdt_last_cmd_puts("No support for non-linear MB domains\n"); in bw_validate()
66 if (bw < r->membw.min_bw || bw > r->membw.max_bw) { in bw_validate()
68 bw, r->membw.min_bw, r->membw.max_bw); in bw_validate()
72 *data = roundup(bw, (unsigned long)r->membw.bw_gran); in bw_validate()
79 struct resctrl_staged_config *cfg; in parse_bw() local
80 u32 closid = data->rdtgrp->closid; in parse_bw()
81 struct rdt_resource *r = s->res; in parse_bw()
84 cfg = &d->staged_config[s->conf_type]; in parse_bw()
85 if (cfg->have_new_ctrl) { in parse_bw()
86 rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id); in parse_bw()
87 return -EINVAL; in parse_bw()
90 if (!bw_validate(data->buf, &bw_val, r)) in parse_bw()
91 return -EINVAL; in parse_bw()
94 d->mbps_val[closid] = bw_val; in parse_bw()
98 cfg->new_ctrl = bw_val; in parse_bw()
99 cfg->have_new_ctrl = true; in parse_bw()
106 * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID:
107 * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1
108 * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1
110 * Haswell does not support a non-contiguous 1s value and additionally
112 * AMD allows non-contiguous bitmasks.
116 u32 supported_bits = BIT_MASK(r->cache.cbm_len) - 1; in cbm_validate()
117 unsigned int cbm_len = r->cache.cbm_len; in cbm_validate()
123 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); in cbm_validate()
127 if ((r->cache.min_cbm_bits > 0 && val == 0) || val > supported_bits) { in cbm_validate()
135 /* Are non-contiguous bitmasks allowed? */ in cbm_validate()
136 if (!r->cache.arch_has_sparse_bitmasks && in cbm_validate()
138 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); in cbm_validate()
142 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { in cbm_validate()
144 r->cache.min_cbm_bits); in cbm_validate()
159 struct rdtgroup *rdtgrp = data->rdtgrp; in parse_cbm()
160 struct resctrl_staged_config *cfg; in parse_cbm() local
161 struct rdt_resource *r = s->res; in parse_cbm()
164 cfg = &d->staged_config[s->conf_type]; in parse_cbm()
165 if (cfg->have_new_ctrl) { in parse_cbm()
166 rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id); in parse_cbm()
167 return -EINVAL; in parse_cbm()
171 * Cannot set up more than one pseudo-locked region in a cache in parse_cbm()
174 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && in parse_cbm()
176 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); in parse_cbm()
177 return -EINVAL; in parse_cbm()
180 if (!cbm_validate(data->buf, &cbm_val, r)) in parse_cbm()
181 return -EINVAL; in parse_cbm()
183 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || in parse_cbm()
184 rdtgrp->mode == RDT_MODE_SHAREABLE) && in parse_cbm()
186 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); in parse_cbm()
187 return -EINVAL; in parse_cbm()
194 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { in parse_cbm()
196 return -EINVAL; in parse_cbm()
199 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { in parse_cbm()
200 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || in parse_cbm()
201 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in parse_cbm()
203 return -EINVAL; in parse_cbm()
207 cfg->new_ctrl = cbm_val; in parse_cbm()
208 cfg->have_new_ctrl = true; in parse_cbm()
222 enum resctrl_conf_type t = s->conf_type; in parse_line()
224 struct resctrl_staged_config *cfg; in parse_line() local
225 struct rdt_resource *r = s->res; in parse_line()
231 /* Walking r->domains, ensure it can't race with cpuhp */ in parse_line()
234 switch (r->schema_fmt) { in parse_line()
244 return -EINVAL; in parse_line()
246 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && in parse_line()
247 (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { in parse_line()
248 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); in parse_line()
249 return -EINVAL; in parse_line()
258 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); in parse_line()
259 return -EINVAL; in parse_line()
262 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in parse_line()
263 if (d->hdr.id == dom_id) { in parse_line()
267 return -EINVAL; in parse_line()
268 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in parse_line()
269 cfg = &d->staged_config[t]; in parse_line()
271 * In pseudo-locking setup mode and just in parse_line()
273 * pseudo-locked. Only one locked region per in parse_line()
278 rdtgrp->plr->s = s; in parse_line()
279 rdtgrp->plr->d = d; in parse_line()
280 rdtgrp->plr->cbm = cfg->new_ctrl; in parse_line()
281 d->plr = rdtgrp->plr; in parse_line()
287 return -EINVAL; in parse_line()
298 if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask)) in resctrl_arch_update_one()
299 return -EINVAL; in resctrl_arch_update_one()
301 hw_dom->ctrl_val[idx] = cfg_val; in resctrl_arch_update_one()
307 hw_res->msr_update(&msr_param); in resctrl_arch_update_one()
314 struct resctrl_staged_config *cfg; in resctrl_arch_update_domains() local
321 /* Walking r->domains, ensure it can't race with cpuhp */ in resctrl_arch_update_domains()
324 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in resctrl_arch_update_domains()
328 cfg = &hw_dom->d_resctrl.staged_config[t]; in resctrl_arch_update_domains()
329 if (!cfg->have_new_ctrl) in resctrl_arch_update_domains()
333 if (cfg->new_ctrl == hw_dom->ctrl_val[idx]) in resctrl_arch_update_domains()
335 hw_dom->ctrl_val[idx] = cfg->new_ctrl; in resctrl_arch_update_domains()
348 smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1); in resctrl_arch_update_domains()
360 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) in rdtgroup_parse_resource()
364 return -EINVAL; in rdtgroup_parse_resource()
377 if (nbytes == 0 || buf[nbytes - 1] != '\n') in rdtgroup_schemata_write()
378 return -EINVAL; in rdtgroup_schemata_write()
379 buf[nbytes - 1] = '\0'; in rdtgroup_schemata_write()
381 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_schemata_write()
383 rdtgroup_kn_unlock(of->kn); in rdtgroup_schemata_write()
384 return -ENOENT; in rdtgroup_schemata_write()
389 * No changes to pseudo-locked region allowed. It has to be removed in rdtgroup_schemata_write()
390 * and re-created instead. in rdtgroup_schemata_write()
392 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { in rdtgroup_schemata_write()
393 ret = -EINVAL; in rdtgroup_schemata_write()
394 rdt_last_cmd_puts("Resource group is pseudo-locked\n"); in rdtgroup_schemata_write()
404 ret = -EINVAL; in rdtgroup_schemata_write()
409 ret = -EINVAL; in rdtgroup_schemata_write()
418 r = s->res; in rdtgroup_schemata_write()
427 ret = resctrl_arch_update_domains(r, rdtgrp->closid); in rdtgroup_schemata_write()
432 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_schemata_write()
434 * If pseudo-locking fails we keep the resource group in in rdtgroup_schemata_write()
436 * active and updated for just the domain the pseudo-locked in rdtgroup_schemata_write()
444 rdtgroup_kn_unlock(of->kn); in rdtgroup_schemata_write()
454 return hw_dom->ctrl_val[idx]; in resctrl_arch_get_config()
459 struct rdt_resource *r = schema->res; in show_doms()
464 /* Walking r->domains, ensure it can't race with cpuhp */ in show_doms()
467 seq_printf(s, "%*s:", max_name_width, schema->name); in show_doms()
468 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { in show_doms()
473 ctrl_val = dom->mbps_val[closid]; in show_doms()
476 schema->conf_type); in show_doms()
478 seq_printf(s, schema->fmt_str, dom->hdr.id, ctrl_val); in show_doms()
492 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_schemata_show()
494 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_schemata_show()
496 seq_printf(s, "%s:uninitialized\n", schema->name); in rdtgroup_schemata_show()
498 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { in rdtgroup_schemata_show()
499 if (!rdtgrp->plr->d) { in rdtgroup_schemata_show()
502 ret = -ENODEV; in rdtgroup_schemata_show()
505 rdtgrp->plr->s->res->name, in rdtgroup_schemata_show()
506 rdtgrp->plr->d->hdr.id, in rdtgroup_schemata_show()
507 rdtgrp->plr->cbm); in rdtgroup_schemata_show()
510 closid = rdtgrp->closid; in rdtgroup_schemata_show()
512 if (closid < schema->num_closid) in rdtgroup_schemata_show()
517 ret = -ENOENT; in rdtgroup_schemata_show()
519 rdtgroup_kn_unlock(of->kn); in rdtgroup_schemata_show()
537 if (nbytes == 0 || buf[nbytes - 1] != '\n') in rdtgroup_mba_mbps_event_write()
538 return -EINVAL; in rdtgroup_mba_mbps_event_write()
539 buf[nbytes - 1] = '\0'; in rdtgroup_mba_mbps_event_write()
541 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_mba_mbps_event_write()
543 rdtgroup_kn_unlock(of->kn); in rdtgroup_mba_mbps_event_write()
544 return -ENOENT; in rdtgroup_mba_mbps_event_write()
550 rdtgrp->mba_mbps_event = QOS_L3_MBM_LOCAL_EVENT_ID; in rdtgroup_mba_mbps_event_write()
552 ret = -EINVAL; in rdtgroup_mba_mbps_event_write()
555 rdtgrp->mba_mbps_event = QOS_L3_MBM_TOTAL_EVENT_ID; in rdtgroup_mba_mbps_event_write()
557 ret = -EINVAL; in rdtgroup_mba_mbps_event_write()
559 ret = -EINVAL; in rdtgroup_mba_mbps_event_write()
565 rdtgroup_kn_unlock(of->kn); in rdtgroup_mba_mbps_event_write()
576 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_mba_mbps_event_show()
579 switch (rdtgrp->mba_mbps_event) { in rdtgroup_mba_mbps_event_show()
587 pr_warn_once("Bad event %d\n", rdtgrp->mba_mbps_event); in rdtgroup_mba_mbps_event_show()
588 ret = -EINVAL; in rdtgroup_mba_mbps_event_show()
592 ret = -ENOENT; in rdtgroup_mba_mbps_event_show()
595 rdtgroup_kn_unlock(of->kn); in rdtgroup_mba_mbps_event_show()
609 if (id == d->id) in resctrl_find_domain()
612 if (id < d->id) in resctrl_find_domain()
626 int cpu; in mon_event_read() local
628 /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ in mon_event_read()
634 rr->rgrp = rdtgrp; in mon_event_read()
635 rr->evtid = evtid; in mon_event_read()
636 rr->r = r; in mon_event_read()
637 rr->d = d; in mon_event_read()
638 rr->first = first; in mon_event_read()
639 rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); in mon_event_read()
640 if (IS_ERR(rr->arch_mon_ctx)) { in mon_event_read()
641 rr->err = -EINVAL; in mon_event_read()
645 cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU); in mon_event_read()
649 * are all the CPUs nohz_full? If yes, pick a CPU to IPI. in mon_event_read()
653 if (tick_nohz_full_cpu(cpu)) in mon_event_read()
656 smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); in mon_event_read()
658 resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); in mon_event_read()
663 struct kernfs_open_file *of = m->private; in rdtgroup_mondata_show()
673 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_mondata_show()
675 ret = -ENOENT; in rdtgroup_mondata_show()
679 md.priv = of->kn->priv; in rdtgroup_mondata_show()
692 list_for_each_entry(d, &r->mon_domains, hdr.list) { in rdtgroup_mondata_show()
693 if (d->ci->id == domid) { in rdtgroup_mondata_show()
694 rr.ci = d->ci; in rdtgroup_mondata_show()
696 &d->ci->shared_cpu_map, evtid, false); in rdtgroup_mondata_show()
700 ret = -ENOENT; in rdtgroup_mondata_show()
707 hdr = resctrl_find_domain(&r->mon_domains, domid, NULL); in rdtgroup_mondata_show()
708 if (!hdr || WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN)) { in rdtgroup_mondata_show()
709 ret = -ENOENT; in rdtgroup_mondata_show()
713 mon_event_read(&rr, r, d, rdtgrp, &d->hdr.cpu_mask, evtid, false); in rdtgroup_mondata_show()
718 if (rr.err == -EIO) in rdtgroup_mondata_show()
720 else if (rr.err == -EINVAL) in rdtgroup_mondata_show()
726 rdtgroup_kn_unlock(of->kn); in rdtgroup_mondata_show()