1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Monitoring code
5  *
6  * Copyright (C) 2017 Intel Corporation
7  *
8  * Author:
9  *    Vikas Shivappa <vikas.shivappa@intel.com>
10  *
11  * This replaces the cqm.c based on perf but we reuse a lot of
12  * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13  *
14  * More information about RDT be found in the Intel (R) x86 Architecture
15  * Software Developer Manual June 2016, volume 3, section 17.17.
16  */
17 
18 #define pr_fmt(fmt)	"resctrl: " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/resctrl.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 
25 #include "internal.h"
26 
27 #define CREATE_TRACE_POINTS
28 
29 #include "monitor_trace.h"
30 
31 /**
32  * struct rmid_entry - dirty tracking for all RMID.
33  * @closid:	The CLOSID for this entry.
34  * @rmid:	The RMID for this entry.
35  * @busy:	The number of domains with cached data using this RMID.
36  * @list:	Member of the rmid_free_lru list when busy == 0.
37  *
38  * Depending on the architecture the correct monitor is accessed using
39  * both @closid and @rmid, or @rmid only.
40  *
41  * Take the rdtgroup_mutex when accessing.
42  */
43 struct rmid_entry {
44 	u32				closid;
45 	u32				rmid;
46 	int				busy;
47 	struct list_head		list;
48 };
49 
50 /*
51  * @rmid_free_lru - A least recently used list of free RMIDs
52  *     These RMIDs are guaranteed to have an occupancy less than the
53  *     threshold occupancy
54  */
55 static LIST_HEAD(rmid_free_lru);
56 
57 /*
58  * @closid_num_dirty_rmid    The number of dirty RMID each CLOSID has.
59  *     Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
60  *     Indexed by CLOSID. Protected by rdtgroup_mutex.
61  */
62 static u32 *closid_num_dirty_rmid;
63 
64 /*
65  * @rmid_limbo_count - count of currently unused but (potentially)
66  *     dirty RMIDs.
67  *     This counts RMIDs that no one is currently using but that
68  *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
69  *     change the threshold occupancy value.
70  */
71 static unsigned int rmid_limbo_count;
72 
73 /*
74  * @rmid_entry - The entry in the limbo and free lists.
75  */
76 static struct rmid_entry	*rmid_ptrs;
77 
78 /*
79  * This is the threshold cache occupancy in bytes at which we will consider an
80  * RMID available for re-allocation.
81  */
82 unsigned int resctrl_rmid_realloc_threshold;
83 
84 /*
85  * This is the maximum value for the reallocation threshold, in bytes.
86  */
87 unsigned int resctrl_rmid_realloc_limit;
88 
89 /*
90  * x86 and arm64 differ in their handling of monitoring.
91  * x86's RMID are independent numbers, there is only one source of traffic
92  * with an RMID value of '1'.
93  * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
94  * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
95  * value is no longer unique.
96  * To account for this, resctrl uses an index. On x86 this is just the RMID,
97  * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
98  *
99  * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
100  * must accept an attempt to read every index.
101  */
102 static inline struct rmid_entry *__rmid_entry(u32 idx)
103 {
104 	struct rmid_entry *entry;
105 	u32 closid, rmid;
106 
107 	entry = &rmid_ptrs[idx];
108 	resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
109 
110 	WARN_ON_ONCE(entry->closid != closid);
111 	WARN_ON_ONCE(entry->rmid != rmid);
112 
113 	return entry;
114 }
115 
116 static void limbo_release_entry(struct rmid_entry *entry)
117 {
118 	lockdep_assert_held(&rdtgroup_mutex);
119 
120 	rmid_limbo_count--;
121 	list_add_tail(&entry->list, &rmid_free_lru);
122 
123 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
124 		closid_num_dirty_rmid[entry->closid]--;
125 }
126 
127 /*
128  * Check the RMIDs that are marked as busy for this domain. If the
129  * reported LLC occupancy is below the threshold clear the busy bit and
130  * decrement the count. If the busy count gets to zero on an RMID, we
131  * free the RMID
132  */
133 void __check_limbo(struct rdt_mon_domain *d, bool force_free)
134 {
135 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
136 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
137 	struct rmid_entry *entry;
138 	u32 idx, cur_idx = 1;
139 	void *arch_mon_ctx;
140 	bool rmid_dirty;
141 	u64 val = 0;
142 
143 	arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
144 	if (IS_ERR(arch_mon_ctx)) {
145 		pr_warn_ratelimited("Failed to allocate monitor context: %ld",
146 				    PTR_ERR(arch_mon_ctx));
147 		return;
148 	}
149 
150 	/*
151 	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
152 	 * are marked as busy for occupancy < threshold. If the occupancy
153 	 * is less than the threshold decrement the busy counter of the
154 	 * RMID and move it to the free list when the counter reaches 0.
155 	 */
156 	for (;;) {
157 		idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
158 		if (idx >= idx_limit)
159 			break;
160 
161 		entry = __rmid_entry(idx);
162 		if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
163 					   QOS_L3_OCCUP_EVENT_ID, &val,
164 					   arch_mon_ctx)) {
165 			rmid_dirty = true;
166 		} else {
167 			rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
168 
169 			/*
170 			 * x86's CLOSID and RMID are independent numbers, so the entry's
171 			 * CLOSID is an empty CLOSID (X86_RESCTRL_EMPTY_CLOSID). On Arm the
172 			 * RMID (PMG) extends the CLOSID (PARTID) space with bits that aren't
173 			 * used to select the configuration. It is thus necessary to track both
174 			 * CLOSID and RMID because there may be dependencies between them
175 			 * on some architectures.
176 			 */
177 			trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
178 		}
179 
180 		if (force_free || !rmid_dirty) {
181 			clear_bit(idx, d->rmid_busy_llc);
182 			if (!--entry->busy)
183 				limbo_release_entry(entry);
184 		}
185 		cur_idx = idx + 1;
186 	}
187 
188 	resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
189 }
190 
191 bool has_busy_rmid(struct rdt_mon_domain *d)
192 {
193 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
194 
195 	return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
196 }
197 
198 static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
199 {
200 	struct rmid_entry *itr;
201 	u32 itr_idx, cmp_idx;
202 
203 	if (list_empty(&rmid_free_lru))
204 		return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
205 
206 	list_for_each_entry(itr, &rmid_free_lru, list) {
207 		/*
208 		 * Get the index of this free RMID, and the index it would need
209 		 * to be if it were used with this CLOSID.
210 		 * If the CLOSID is irrelevant on this architecture, the two
211 		 * index values are always the same on every entry and thus the
212 		 * very first entry will be returned.
213 		 */
214 		itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
215 		cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
216 
217 		if (itr_idx == cmp_idx)
218 			return itr;
219 	}
220 
221 	return ERR_PTR(-ENOSPC);
222 }
223 
224 /**
225  * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
226  *                                  RMID are clean, or the CLOSID that has
227  *                                  the most clean RMID.
228  *
229  * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
230  * may not be able to allocate clean RMID. To avoid this the allocator will
231  * choose the CLOSID with the most clean RMID.
232  *
233  * When the CLOSID and RMID are independent numbers, the first free CLOSID will
234  * be returned.
235  */
236 int resctrl_find_cleanest_closid(void)
237 {
238 	u32 cleanest_closid = ~0;
239 	int i = 0;
240 
241 	lockdep_assert_held(&rdtgroup_mutex);
242 
243 	if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
244 		return -EIO;
245 
246 	for (i = 0; i < closids_supported(); i++) {
247 		int num_dirty;
248 
249 		if (closid_allocated(i))
250 			continue;
251 
252 		num_dirty = closid_num_dirty_rmid[i];
253 		if (num_dirty == 0)
254 			return i;
255 
256 		if (cleanest_closid == ~0)
257 			cleanest_closid = i;
258 
259 		if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
260 			cleanest_closid = i;
261 	}
262 
263 	if (cleanest_closid == ~0)
264 		return -ENOSPC;
265 
266 	return cleanest_closid;
267 }
268 
269 /*
270  * For MPAM the RMID value is not unique, and has to be considered with
271  * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
272  * allows all domains to be managed by a single free list.
273  * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
274  */
275 int alloc_rmid(u32 closid)
276 {
277 	struct rmid_entry *entry;
278 
279 	lockdep_assert_held(&rdtgroup_mutex);
280 
281 	entry = resctrl_find_free_rmid(closid);
282 	if (IS_ERR(entry))
283 		return PTR_ERR(entry);
284 
285 	list_del(&entry->list);
286 	return entry->rmid;
287 }
288 
289 static void add_rmid_to_limbo(struct rmid_entry *entry)
290 {
291 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
292 	struct rdt_mon_domain *d;
293 	u32 idx;
294 
295 	lockdep_assert_held(&rdtgroup_mutex);
296 
297 	/* Walking r->domains, ensure it can't race with cpuhp */
298 	lockdep_assert_cpus_held();
299 
300 	idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
301 
302 	entry->busy = 0;
303 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
304 		/*
305 		 * For the first limbo RMID in the domain,
306 		 * setup up the limbo worker.
307 		 */
308 		if (!has_busy_rmid(d))
309 			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL,
310 						RESCTRL_PICK_ANY_CPU);
311 		set_bit(idx, d->rmid_busy_llc);
312 		entry->busy++;
313 	}
314 
315 	rmid_limbo_count++;
316 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
317 		closid_num_dirty_rmid[entry->closid]++;
318 }
319 
320 void free_rmid(u32 closid, u32 rmid)
321 {
322 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
323 	struct rmid_entry *entry;
324 
325 	lockdep_assert_held(&rdtgroup_mutex);
326 
327 	/*
328 	 * Do not allow the default rmid to be free'd. Comparing by index
329 	 * allows architectures that ignore the closid parameter to avoid an
330 	 * unnecessary check.
331 	 */
332 	if (!resctrl_arch_mon_capable() ||
333 	    idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
334 						RESCTRL_RESERVED_RMID))
335 		return;
336 
337 	entry = __rmid_entry(idx);
338 
339 	if (resctrl_arch_is_llc_occupancy_enabled())
340 		add_rmid_to_limbo(entry);
341 	else
342 		list_add_tail(&entry->list, &rmid_free_lru);
343 }
344 
345 static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid,
346 				       u32 rmid, enum resctrl_event_id evtid)
347 {
348 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
349 
350 	switch (evtid) {
351 	case QOS_L3_MBM_TOTAL_EVENT_ID:
352 		return &d->mbm_total[idx];
353 	case QOS_L3_MBM_LOCAL_EVENT_ID:
354 		return &d->mbm_local[idx];
355 	default:
356 		return NULL;
357 	}
358 }
359 
360 static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
361 {
362 	int cpu = smp_processor_id();
363 	struct rdt_mon_domain *d;
364 	struct mbm_state *m;
365 	int err, ret;
366 	u64 tval = 0;
367 
368 	if (rr->first) {
369 		resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
370 		m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
371 		if (m)
372 			memset(m, 0, sizeof(struct mbm_state));
373 		return 0;
374 	}
375 
376 	if (rr->d) {
377 		/* Reading a single domain, must be on a CPU in that domain. */
378 		if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
379 			return -EINVAL;
380 		rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
381 						 rr->evtid, &tval, rr->arch_mon_ctx);
382 		if (rr->err)
383 			return rr->err;
384 
385 		rr->val += tval;
386 
387 		return 0;
388 	}
389 
390 	/* Summing domains that share a cache, must be on a CPU for that cache. */
391 	if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
392 		return -EINVAL;
393 
394 	/*
395 	 * Legacy files must report the sum of an event across all
396 	 * domains that share the same L3 cache instance.
397 	 * Report success if a read from any domain succeeds, -EINVAL
398 	 * (translated to "Unavailable" for user space) if reading from
399 	 * all domains fail for any reason.
400 	 */
401 	ret = -EINVAL;
402 	list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
403 		if (d->ci->id != rr->ci->id)
404 			continue;
405 		err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
406 					     rr->evtid, &tval, rr->arch_mon_ctx);
407 		if (!err) {
408 			rr->val += tval;
409 			ret = 0;
410 		}
411 	}
412 
413 	if (ret)
414 		rr->err = ret;
415 
416 	return ret;
417 }
418 
419 /*
420  * mbm_bw_count() - Update bw count from values previously read by
421  *		    __mon_event_count().
422  * @closid:	The closid used to identify the cached mbm_state.
423  * @rmid:	The rmid used to identify the cached mbm_state.
424  * @rr:		The struct rmid_read populated by __mon_event_count().
425  *
426  * Supporting function to calculate the memory bandwidth
427  * and delta bandwidth in MBps. The chunks value previously read by
428  * __mon_event_count() is compared with the chunks value from the previous
429  * invocation. This must be called once per second to maintain values in MBps.
430  */
431 static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
432 {
433 	u64 cur_bw, bytes, cur_bytes;
434 	struct mbm_state *m;
435 
436 	m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
437 	if (WARN_ON_ONCE(!m))
438 		return;
439 
440 	cur_bytes = rr->val;
441 	bytes = cur_bytes - m->prev_bw_bytes;
442 	m->prev_bw_bytes = cur_bytes;
443 
444 	cur_bw = bytes / SZ_1M;
445 
446 	m->prev_bw = cur_bw;
447 }
448 
449 /*
450  * This is scheduled by mon_event_read() to read the CQM/MBM counters
451  * on a domain.
452  */
453 void mon_event_count(void *info)
454 {
455 	struct rdtgroup *rdtgrp, *entry;
456 	struct rmid_read *rr = info;
457 	struct list_head *head;
458 	int ret;
459 
460 	rdtgrp = rr->rgrp;
461 
462 	ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
463 
464 	/*
465 	 * For Ctrl groups read data from child monitor groups and
466 	 * add them together. Count events which are read successfully.
467 	 * Discard the rmid_read's reporting errors.
468 	 */
469 	head = &rdtgrp->mon.crdtgrp_list;
470 
471 	if (rdtgrp->type == RDTCTRL_GROUP) {
472 		list_for_each_entry(entry, head, mon.crdtgrp_list) {
473 			if (__mon_event_count(entry->closid, entry->mon.rmid,
474 					      rr) == 0)
475 				ret = 0;
476 		}
477 	}
478 
479 	/*
480 	 * __mon_event_count() calls for newly created monitor groups may
481 	 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
482 	 * Discard error if any of the monitor event reads succeeded.
483 	 */
484 	if (ret == 0)
485 		rr->err = 0;
486 }
487 
488 static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
489 							struct rdt_resource *r)
490 {
491 	struct rdt_ctrl_domain *d;
492 
493 	lockdep_assert_cpus_held();
494 
495 	list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
496 		/* Find the domain that contains this CPU */
497 		if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
498 			return d;
499 	}
500 
501 	return NULL;
502 }
503 
504 /*
505  * Feedback loop for MBA software controller (mba_sc)
506  *
507  * mba_sc is a feedback loop where we periodically read MBM counters and
508  * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
509  * that:
510  *
511  *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
512  *
513  * This uses the MBM counters to measure the bandwidth and MBA throttle
514  * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
515  * fact that resctrl rdtgroups have both monitoring and control.
516  *
517  * The frequency of the checks is 1s and we just tag along the MBM overflow
518  * timer. Having 1s interval makes the calculation of bandwidth simpler.
519  *
520  * Although MBA's goal is to restrict the bandwidth to a maximum, there may
521  * be a need to increase the bandwidth to avoid unnecessarily restricting
522  * the L2 <-> L3 traffic.
523  *
524  * Since MBA controls the L2 external bandwidth where as MBM measures the
525  * L3 external bandwidth the following sequence could lead to such a
526  * situation.
527  *
528  * Consider an rdtgroup which had high L3 <-> memory traffic in initial
529  * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
530  * after some time rdtgroup has mostly L2 <-> L3 traffic.
531  *
532  * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
533  * throttle MSRs already have low percentage values.  To avoid
534  * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
535  */
536 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
537 {
538 	u32 closid, rmid, cur_msr_val, new_msr_val;
539 	struct mbm_state *pmbm_data, *cmbm_data;
540 	struct rdt_ctrl_domain *dom_mba;
541 	enum resctrl_event_id evt_id;
542 	struct rdt_resource *r_mba;
543 	struct list_head *head;
544 	struct rdtgroup *entry;
545 	u32 cur_bw, user_bw;
546 
547 	r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
548 	evt_id = rgrp->mba_mbps_event;
549 
550 	closid = rgrp->closid;
551 	rmid = rgrp->mon.rmid;
552 	pmbm_data = get_mbm_state(dom_mbm, closid, rmid, evt_id);
553 	if (WARN_ON_ONCE(!pmbm_data))
554 		return;
555 
556 	dom_mba = get_ctrl_domain_from_cpu(smp_processor_id(), r_mba);
557 	if (!dom_mba) {
558 		pr_warn_once("Failure to get domain for MBA update\n");
559 		return;
560 	}
561 
562 	cur_bw = pmbm_data->prev_bw;
563 	user_bw = dom_mba->mbps_val[closid];
564 
565 	/* MBA resource doesn't support CDP */
566 	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
567 
568 	/*
569 	 * For Ctrl groups read data from child monitor groups.
570 	 */
571 	head = &rgrp->mon.crdtgrp_list;
572 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
573 		cmbm_data = get_mbm_state(dom_mbm, entry->closid, entry->mon.rmid, evt_id);
574 		if (WARN_ON_ONCE(!cmbm_data))
575 			return;
576 		cur_bw += cmbm_data->prev_bw;
577 	}
578 
579 	/*
580 	 * Scale up/down the bandwidth linearly for the ctrl group.  The
581 	 * bandwidth step is the bandwidth granularity specified by the
582 	 * hardware.
583 	 * Always increase throttling if current bandwidth is above the
584 	 * target set by user.
585 	 * But avoid thrashing up and down on every poll by checking
586 	 * whether a decrease in throttling is likely to push the group
587 	 * back over target. E.g. if currently throttling to 30% of bandwidth
588 	 * on a system with 10% granularity steps, check whether moving to
589 	 * 40% would go past the limit by multiplying current bandwidth by
590 	 * "(30 + 10) / 30".
591 	 */
592 	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
593 		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
594 	} else if (cur_msr_val < MAX_MBA_BW &&
595 		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
596 		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
597 	} else {
598 		return;
599 	}
600 
601 	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
602 }
603 
604 static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d,
605 				 u32 closid, u32 rmid, enum resctrl_event_id evtid)
606 {
607 	struct rmid_read rr = {0};
608 
609 	rr.r = r;
610 	rr.d = d;
611 	rr.evtid = evtid;
612 	rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
613 	if (IS_ERR(rr.arch_mon_ctx)) {
614 		pr_warn_ratelimited("Failed to allocate monitor context: %ld",
615 				    PTR_ERR(rr.arch_mon_ctx));
616 		return;
617 	}
618 
619 	__mon_event_count(closid, rmid, &rr);
620 
621 	/*
622 	 * If the software controller is enabled, compute the
623 	 * bandwidth for this event id.
624 	 */
625 	if (is_mba_sc(NULL))
626 		mbm_bw_count(closid, rmid, &rr);
627 
628 	resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
629 }
630 
631 static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
632 		       u32 closid, u32 rmid)
633 {
634 	/*
635 	 * This is protected from concurrent reads from user as both
636 	 * the user and overflow handler hold the global mutex.
637 	 */
638 	if (resctrl_arch_is_mbm_total_enabled())
639 		mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID);
640 
641 	if (resctrl_arch_is_mbm_local_enabled())
642 		mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID);
643 }
644 
645 /*
646  * Handler to scan the limbo list and move the RMIDs
647  * to free list whose occupancy < threshold_occupancy.
648  */
649 void cqm_handle_limbo(struct work_struct *work)
650 {
651 	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
652 	struct rdt_mon_domain *d;
653 
654 	cpus_read_lock();
655 	mutex_lock(&rdtgroup_mutex);
656 
657 	d = container_of(work, struct rdt_mon_domain, cqm_limbo.work);
658 
659 	__check_limbo(d, false);
660 
661 	if (has_busy_rmid(d)) {
662 		d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
663 							   RESCTRL_PICK_ANY_CPU);
664 		schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
665 					 delay);
666 	}
667 
668 	mutex_unlock(&rdtgroup_mutex);
669 	cpus_read_unlock();
670 }
671 
672 /**
673  * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
674  *                             domain.
675  * @dom:           The domain the limbo handler should run for.
676  * @delay_ms:      How far in the future the handler should run.
677  * @exclude_cpu:   Which CPU the handler should not run on,
678  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
679  */
680 void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
681 			     int exclude_cpu)
682 {
683 	unsigned long delay = msecs_to_jiffies(delay_ms);
684 	int cpu;
685 
686 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
687 	dom->cqm_work_cpu = cpu;
688 
689 	if (cpu < nr_cpu_ids)
690 		schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
691 }
692 
693 void mbm_handle_overflow(struct work_struct *work)
694 {
695 	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
696 	struct rdtgroup *prgrp, *crgrp;
697 	struct rdt_mon_domain *d;
698 	struct list_head *head;
699 	struct rdt_resource *r;
700 
701 	cpus_read_lock();
702 	mutex_lock(&rdtgroup_mutex);
703 
704 	/*
705 	 * If the filesystem has been unmounted this work no longer needs to
706 	 * run.
707 	 */
708 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
709 		goto out_unlock;
710 
711 	r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
712 	d = container_of(work, struct rdt_mon_domain, mbm_over.work);
713 
714 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
715 		mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
716 
717 		head = &prgrp->mon.crdtgrp_list;
718 		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
719 			mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
720 
721 		if (is_mba_sc(NULL))
722 			update_mba_bw(prgrp, d);
723 	}
724 
725 	/*
726 	 * Re-check for housekeeping CPUs. This allows the overflow handler to
727 	 * move off a nohz_full CPU quickly.
728 	 */
729 	d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
730 						   RESCTRL_PICK_ANY_CPU);
731 	schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
732 
733 out_unlock:
734 	mutex_unlock(&rdtgroup_mutex);
735 	cpus_read_unlock();
736 }
737 
738 /**
739  * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
740  *                                domain.
741  * @dom:           The domain the overflow handler should run for.
742  * @delay_ms:      How far in the future the handler should run.
743  * @exclude_cpu:   Which CPU the handler should not run on,
744  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
745  */
746 void mbm_setup_overflow_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
747 				int exclude_cpu)
748 {
749 	unsigned long delay = msecs_to_jiffies(delay_ms);
750 	int cpu;
751 
752 	/*
753 	 * When a domain comes online there is no guarantee the filesystem is
754 	 * mounted. If not, there is no need to catch counter overflow.
755 	 */
756 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
757 		return;
758 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
759 	dom->mbm_work_cpu = cpu;
760 
761 	if (cpu < nr_cpu_ids)
762 		schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
763 }
764 
765 static int dom_data_init(struct rdt_resource *r)
766 {
767 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
768 	u32 num_closid = resctrl_arch_get_num_closid(r);
769 	struct rmid_entry *entry = NULL;
770 	int err = 0, i;
771 	u32 idx;
772 
773 	mutex_lock(&rdtgroup_mutex);
774 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
775 		u32 *tmp;
776 
777 		/*
778 		 * If the architecture hasn't provided a sanitised value here,
779 		 * this may result in larger arrays than necessary. Resctrl will
780 		 * use a smaller system wide value based on the resources in
781 		 * use.
782 		 */
783 		tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
784 		if (!tmp) {
785 			err = -ENOMEM;
786 			goto out_unlock;
787 		}
788 
789 		closid_num_dirty_rmid = tmp;
790 	}
791 
792 	rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
793 	if (!rmid_ptrs) {
794 		if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
795 			kfree(closid_num_dirty_rmid);
796 			closid_num_dirty_rmid = NULL;
797 		}
798 		err = -ENOMEM;
799 		goto out_unlock;
800 	}
801 
802 	for (i = 0; i < idx_limit; i++) {
803 		entry = &rmid_ptrs[i];
804 		INIT_LIST_HEAD(&entry->list);
805 
806 		resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
807 		list_add_tail(&entry->list, &rmid_free_lru);
808 	}
809 
810 	/*
811 	 * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
812 	 * are always allocated. These are used for the rdtgroup_default
813 	 * control group, which will be setup later in resctrl_init().
814 	 */
815 	idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
816 					   RESCTRL_RESERVED_RMID);
817 	entry = __rmid_entry(idx);
818 	list_del(&entry->list);
819 
820 out_unlock:
821 	mutex_unlock(&rdtgroup_mutex);
822 
823 	return err;
824 }
825 
826 static void dom_data_exit(struct rdt_resource *r)
827 {
828 	mutex_lock(&rdtgroup_mutex);
829 
830 	if (!r->mon_capable)
831 		goto out_unlock;
832 
833 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
834 		kfree(closid_num_dirty_rmid);
835 		closid_num_dirty_rmid = NULL;
836 	}
837 
838 	kfree(rmid_ptrs);
839 	rmid_ptrs = NULL;
840 
841 out_unlock:
842 	mutex_unlock(&rdtgroup_mutex);
843 }
844 
845 static struct mon_evt llc_occupancy_event = {
846 	.name		= "llc_occupancy",
847 	.evtid		= QOS_L3_OCCUP_EVENT_ID,
848 };
849 
850 static struct mon_evt mbm_total_event = {
851 	.name		= "mbm_total_bytes",
852 	.evtid		= QOS_L3_MBM_TOTAL_EVENT_ID,
853 };
854 
855 static struct mon_evt mbm_local_event = {
856 	.name		= "mbm_local_bytes",
857 	.evtid		= QOS_L3_MBM_LOCAL_EVENT_ID,
858 };
859 
860 /*
861  * Initialize the event list for the resource.
862  *
863  * Note that MBM events are also part of RDT_RESOURCE_L3 resource
864  * because as per the SDM the total and local memory bandwidth
865  * are enumerated as part of L3 monitoring.
866  */
867 static void l3_mon_evt_init(struct rdt_resource *r)
868 {
869 	INIT_LIST_HEAD(&r->evt_list);
870 
871 	if (resctrl_arch_is_llc_occupancy_enabled())
872 		list_add_tail(&llc_occupancy_event.list, &r->evt_list);
873 	if (resctrl_arch_is_mbm_total_enabled())
874 		list_add_tail(&mbm_total_event.list, &r->evt_list);
875 	if (resctrl_arch_is_mbm_local_enabled())
876 		list_add_tail(&mbm_local_event.list, &r->evt_list);
877 }
878 
879 /**
880  * resctrl_mon_resource_init() - Initialise global monitoring structures.
881  *
882  * Allocate and initialise global monitor resources that do not belong to a
883  * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
884  * Called once during boot after the struct rdt_resource's have been configured
885  * but before the filesystem is mounted.
886  * Resctrl's cpuhp callbacks may be called before this point to bring a domain
887  * online.
888  *
889  * Returns 0 for success, or -ENOMEM.
890  */
891 int resctrl_mon_resource_init(void)
892 {
893 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
894 	int ret;
895 
896 	if (!r->mon_capable)
897 		return 0;
898 
899 	ret = dom_data_init(r);
900 	if (ret)
901 		return ret;
902 
903 	l3_mon_evt_init(r);
904 
905 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
906 		mbm_total_event.configurable = true;
907 		resctrl_file_fflags_init("mbm_total_bytes_config",
908 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
909 	}
910 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
911 		mbm_local_event.configurable = true;
912 		resctrl_file_fflags_init("mbm_local_bytes_config",
913 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
914 	}
915 
916 	if (resctrl_arch_is_mbm_local_enabled())
917 		mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
918 	else if (resctrl_arch_is_mbm_total_enabled())
919 		mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
920 
921 	return 0;
922 }
923 
924 void resctrl_mon_resource_exit(void)
925 {
926 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
927 
928 	dom_data_exit(r);
929 }
930