Lines Matching +full:hardware +full:- +full:wise

1 // SPDX-License-Identifier: GPL-2.0-only
4 * - Monitoring code
34 * @rmid_free_lru - A least recently used list of free RMIDs
41 * @rmid_limbo_count - count of currently unused but (potentially)
50 * @rmid_entry - The entry in the limbo and free lists.
67 * RMID available for re-allocation.
85 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
88 * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
144 WARN_ON(entry->rmid != rmid); in __rmid_entry()
165 return -EIO; in __rmid_read()
167 return -EINVAL; in __rmid_read()
181 return &hw_dom->arch_mbm_total[rmid]; in get_arch_mbm_state()
183 return &hw_dom->arch_mbm_local[rmid]; in get_arch_mbm_state()
202 /* Record any initial, non-zero count value. */ in resctrl_arch_reset_rmid()
203 __rmid_read(rmid, eventid, &am->prev_msr); in resctrl_arch_reset_rmid()
208 * Assumes that hardware counters are also reset and thus that there is
209 * no need to record initial non-zero counts.
216 memset(hw_dom->arch_mbm_total, 0, in resctrl_arch_reset_rmid_all()
217 sizeof(*hw_dom->arch_mbm_total) * r->num_rmid); in resctrl_arch_reset_rmid_all()
220 memset(hw_dom->arch_mbm_local, 0, in resctrl_arch_reset_rmid_all()
221 sizeof(*hw_dom->arch_mbm_local) * r->num_rmid); in resctrl_arch_reset_rmid_all()
226 u64 shift = 64 - width, chunks; in mbm_overflow_count()
228 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count()
241 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) in resctrl_arch_rmid_read()
242 return -EINVAL; in resctrl_arch_rmid_read()
250 am->chunks += mbm_overflow_count(am->prev_msr, msr_val, in resctrl_arch_rmid_read()
251 hw_res->mbm_width); in resctrl_arch_rmid_read()
252 chunks = get_corrected_mbm_count(rmid, am->chunks); in resctrl_arch_rmid_read()
253 am->prev_msr = msr_val; in resctrl_arch_rmid_read()
258 *val = chunks * hw_res->mon_scale; in resctrl_arch_rmid_read()
284 nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); in __check_limbo()
285 if (nrmid >= r->num_rmid) in __check_limbo()
290 if (resctrl_arch_rmid_read(r, d, entry->rmid, in __check_limbo()
298 clear_bit(entry->rmid, d->rmid_busy_llc); in __check_limbo()
299 if (!--entry->busy) { in __check_limbo()
300 rmid_limbo_count--; in __check_limbo()
301 list_add_tail(&entry->list, &rmid_free_lru); in __check_limbo()
310 return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; in has_busy_rmid()
325 return rmid_limbo_count ? -EBUSY : -ENOSPC; in alloc_rmid()
329 list_del(&entry->list); in alloc_rmid()
331 return entry->rmid; in alloc_rmid()
341 entry->busy = 0; in add_rmid_to_limbo()
343 list_for_each_entry(d, &r->domains, list) { in add_rmid_to_limbo()
344 if (cpumask_test_cpu(cpu, &d->cpu_mask)) { in add_rmid_to_limbo()
345 err = resctrl_arch_rmid_read(r, d, entry->rmid, in add_rmid_to_limbo()
358 set_bit(entry->rmid, d->rmid_busy_llc); in add_rmid_to_limbo()
359 entry->busy++; in add_rmid_to_limbo()
363 if (entry->busy) in add_rmid_to_limbo()
366 list_add_tail(&entry->list, &rmid_free_lru); in add_rmid_to_limbo()
383 list_add_tail(&entry->list, &rmid_free_lru); in free_rmid()
391 return &d->mbm_total[rmid]; in get_mbm_state()
393 return &d->mbm_local[rmid]; in get_mbm_state()
404 if (rr->first) { in __mon_event_count()
405 resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid); in __mon_event_count()
406 m = get_mbm_state(rr->d, rmid, rr->evtid); in __mon_event_count()
412 rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval); in __mon_event_count()
413 if (rr->err) in __mon_event_count()
414 return rr->err; in __mon_event_count()
416 rr->val += tval; in __mon_event_count()
422 * mbm_bw_count() - Update bw count from values previously read by
434 struct mbm_state *m = &rr->d->mbm_local[rmid]; in mbm_bw_count()
437 cur_bytes = rr->val; in mbm_bw_count()
438 bytes = cur_bytes - m->prev_bw_bytes; in mbm_bw_count()
439 m->prev_bw_bytes = cur_bytes; in mbm_bw_count()
443 if (m->delta_comp) in mbm_bw_count()
444 m->delta_bw = abs(cur_bw - m->prev_bw); in mbm_bw_count()
445 m->delta_comp = false; in mbm_bw_count()
446 m->prev_bw = cur_bw; in mbm_bw_count()
460 rdtgrp = rr->rgrp; in mon_event_count()
462 ret = __mon_event_count(rdtgrp->mon.rmid, rr); in mon_event_count()
469 head = &rdtgrp->mon.crdtgrp_list; in mon_event_count()
471 if (rdtgrp->type == RDTCTRL_GROUP) { in mon_event_count()
473 if (__mon_event_count(entry->mon.rmid, rr) == 0) in mon_event_count()
480 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. in mon_event_count()
484 rr->err = 0; in mon_event_count()
505 * the L2 <-> L3 traffic.
511 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
512 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
513 * after some time rdtgroup has mostly L2 <-> L3 traffic.
515 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
534 closid = rgrp->closid; in update_mba_bw()
535 rmid = rgrp->mon.rmid; in update_mba_bw()
536 pmbm_data = &dom_mbm->mbm_local[rmid]; in update_mba_bw()
544 cur_bw = pmbm_data->prev_bw; in update_mba_bw()
545 user_bw = dom_mba->mbps_val[closid]; in update_mba_bw()
546 delta_bw = pmbm_data->delta_bw; in update_mba_bw()
554 head = &rgrp->mon.crdtgrp_list; in update_mba_bw()
556 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; in update_mba_bw()
557 cur_bw += cmbm_data->prev_bw; in update_mba_bw()
558 delta_bw += cmbm_data->delta_bw; in update_mba_bw()
564 * hardware. in update_mba_bw()
571 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep in update_mba_bw()
575 if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { in update_mba_bw()
576 new_msr_val = cur_msr_val - r_mba->membw.bw_gran; in update_mba_bw()
579 new_msr_val = cur_msr_val + r_mba->membw.bw_gran; in update_mba_bw()
587 * Delta values are updated dynamically package wise for each in update_mba_bw()
591 * linear and only "approximately" linear even when the hardware in update_mba_bw()
596 pmbm_data->delta_comp = true; in update_mba_bw()
598 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; in update_mba_bw()
599 cmbm_data->delta_comp = true; in update_mba_bw()
654 schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); in cqm_handle_limbo()
664 cpu = cpumask_any(&dom->cpu_mask); in cqm_setup_limbo_handler()
665 dom->cqm_work_cpu = cpu; in cqm_setup_limbo_handler()
667 schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); in cqm_setup_limbo_handler()
688 mbm_update(r, d, prgrp->mon.rmid); in mbm_handle_overflow()
690 head = &prgrp->mon.crdtgrp_list; in mbm_handle_overflow()
692 mbm_update(r, d, crgrp->mon.rmid); in mbm_handle_overflow()
698 schedule_delayed_work_on(cpu, &d->mbm_over, delay); in mbm_handle_overflow()
711 cpu = cpumask_any(&dom->cpu_mask); in mbm_setup_overflow_handler()
712 dom->mbm_work_cpu = cpu; in mbm_setup_overflow_handler()
713 schedule_delayed_work_on(cpu, &dom->mbm_over, delay); in mbm_setup_overflow_handler()
721 nr_rmids = r->num_rmid; in dom_data_init()
724 return -ENOMEM; in dom_data_init()
728 INIT_LIST_HEAD(&entry->list); in dom_data_init()
730 entry->rmid = i; in dom_data_init()
731 list_add_tail(&entry->list, &rmid_free_lru); in dom_data_init()
739 list_del(&entry->list); in dom_data_init()
768 INIT_LIST_HEAD(&r->evt_list); in l3_mon_evt_init()
771 list_add_tail(&llc_occupancy_event.list, &r->evt_list); in l3_mon_evt_init()
773 list_add_tail(&mbm_total_event.list, &r->evt_list); in l3_mon_evt_init()
775 list_add_tail(&mbm_local_event.list, &r->evt_list); in l3_mon_evt_init()
786 hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; in rdt_get_mon_l3_config()
787 r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1; in rdt_get_mon_l3_config()
788 hw_res->mbm_width = MBM_CNTR_WIDTH_BASE; in rdt_get_mon_l3_config()
791 hw_res->mbm_width += mbm_offset; in rdt_get_mon_l3_config()
802 threshold = resctrl_rmid_realloc_limit / r->num_rmid; in rdt_get_mon_l3_config()
806 * to the nearest multiple of hw_res->mon_scale so it matches a in rdt_get_mon_l3_config()
807 * value the hardware will measure. mon_scale may not be a power of 2. in rdt_get_mon_l3_config()
828 r->mon_capable = true; in rdt_get_mon_l3_config()
837 cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1; in intel_rdt_mbm_apply_quirk()