xref: /linux/include/linux/resctrl.h (revision dcb49710189d104d4edc07709615748dab61341b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RESCTRL_H
3 #define _RESCTRL_H
4 
5 #include <linux/cacheinfo.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/pid.h>
9 #include <linux/resctrl_types.h>
10 
11 #ifdef CONFIG_ARCH_HAS_CPU_RESCTRL
12 #include <asm/resctrl.h>
13 #endif
14 
15 /* CLOSID, RMID value used by the default control group */
16 #define RESCTRL_RESERVED_CLOSID		0
17 #define RESCTRL_RESERVED_RMID		0
18 
19 #define RESCTRL_PICK_ANY_CPU		-1
20 
21 #ifdef CONFIG_PROC_CPU_RESCTRL
22 
23 int proc_resctrl_show(struct seq_file *m,
24 		      struct pid_namespace *ns,
25 		      struct pid *pid,
26 		      struct task_struct *tsk);
27 
28 #endif
29 
30 /* max value for struct rdt_domain's mbps_val */
31 #define MBA_MAX_MBPS   U32_MAX
32 
33 /* Walk all possible resources, with variants for only controls or monitors. */
34 #define for_each_rdt_resource(_r)						\
35 	for ((_r) = resctrl_arch_get_resource(0);				\
36 	     (_r) && (_r)->rid < RDT_NUM_RESOURCES;				\
37 	     (_r) = resctrl_arch_get_resource((_r)->rid + 1))
38 
39 #define for_each_capable_rdt_resource(r)				      \
40 	for_each_rdt_resource((r))					      \
41 		if ((r)->alloc_capable || (r)->mon_capable)
42 
43 #define for_each_alloc_capable_rdt_resource(r)				      \
44 	for_each_rdt_resource((r))					      \
45 		if ((r)->alloc_capable)
46 
47 #define for_each_mon_capable_rdt_resource(r)				      \
48 	for_each_rdt_resource((r))					      \
49 		if ((r)->mon_capable)
50 
51 enum resctrl_res_level {
52 	RDT_RESOURCE_L3,
53 	RDT_RESOURCE_L2,
54 	RDT_RESOURCE_MBA,
55 	RDT_RESOURCE_SMBA,
56 	RDT_RESOURCE_PERF_PKG,
57 
58 	/* Must be the last */
59 	RDT_NUM_RESOURCES,
60 };
61 
62 /**
63  * enum resctrl_conf_type - The type of configuration.
64  * @CDP_NONE:	No prioritisation, both code and data are controlled or monitored.
65  * @CDP_CODE:	Configuration applies to instruction fetches.
66  * @CDP_DATA:	Configuration applies to reads and writes.
67  */
68 enum resctrl_conf_type {
69 	CDP_NONE,
70 	CDP_CODE,
71 	CDP_DATA,
72 };
73 
74 #define CDP_NUM_TYPES	(CDP_DATA + 1)
75 
76 /*
77  * struct pseudo_lock_region - pseudo-lock region information
78  * @s:			Resctrl schema for the resource to which this
79  *			pseudo-locked region belongs
80  * @closid:		The closid that this pseudo-locked region uses
81  * @d:			RDT domain to which this pseudo-locked region
82  *			belongs
83  * @cbm:		bitmask of the pseudo-locked region
84  * @lock_thread_wq:	waitqueue used to wait on the pseudo-locking thread
85  *			completion
86  * @thread_done:	variable used by waitqueue to test if pseudo-locking
87  *			thread completed
88  * @cpu:		core associated with the cache on which the setup code
89  *			will be run
90  * @line_size:		size of the cache lines
91  * @size:		size of pseudo-locked region in bytes
92  * @kmem:		the kernel memory associated with pseudo-locked region
93  * @minor:		minor number of character device associated with this
94  *			region
95  * @debugfs_dir:	pointer to this region's directory in the debugfs
96  *			filesystem
97  * @pm_reqs:		Power management QoS requests related to this region
98  */
99 struct pseudo_lock_region {
100 	struct resctrl_schema	*s;
101 	u32			closid;
102 	struct rdt_ctrl_domain	*d;
103 	u32			cbm;
104 	wait_queue_head_t	lock_thread_wq;
105 	int			thread_done;
106 	int			cpu;
107 	unsigned int		line_size;
108 	unsigned int		size;
109 	void			*kmem;
110 	unsigned int		minor;
111 	struct dentry		*debugfs_dir;
112 	struct list_head	pm_reqs;
113 };
114 
115 /**
116  * struct resctrl_staged_config - parsed configuration to be applied
117  * @new_ctrl:		new ctrl value to be loaded
118  * @have_new_ctrl:	whether the user provided new_ctrl is valid
119  */
120 struct resctrl_staged_config {
121 	u32			new_ctrl;
122 	bool			have_new_ctrl;
123 };
124 
125 enum resctrl_domain_type {
126 	RESCTRL_CTRL_DOMAIN,
127 	RESCTRL_MON_DOMAIN,
128 };
129 
130 /**
131  * struct rdt_domain_hdr - common header for different domain types
132  * @list:		all instances of this resource
133  * @id:			unique id for this instance
134  * @type:		type of this instance
135  * @rid:		resource id for this instance
136  * @cpu_mask:		which CPUs share this resource
137  */
138 struct rdt_domain_hdr {
139 	struct list_head		list;
140 	int				id;
141 	enum resctrl_domain_type	type;
142 	enum resctrl_res_level		rid;
143 	struct cpumask			cpu_mask;
144 };
145 
domain_header_is_valid(struct rdt_domain_hdr * hdr,enum resctrl_domain_type type,enum resctrl_res_level rid)146 static inline bool domain_header_is_valid(struct rdt_domain_hdr *hdr,
147 					  enum resctrl_domain_type type,
148 					  enum resctrl_res_level rid)
149 {
150 	return !WARN_ON_ONCE(hdr->type != type || hdr->rid != rid);
151 }
152 
153 /**
154  * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
155  * @hdr:		common header for different domain types
156  * @plr:		pseudo-locked region (if any) associated with domain
157  * @staged_config:	parsed configuration to be applied
158  * @mbps_val:		When mba_sc is enabled, this holds the array of user
159  *			specified control values for mba_sc in MBps, indexed
160  *			by closid
161  */
162 struct rdt_ctrl_domain {
163 	struct rdt_domain_hdr		hdr;
164 	struct pseudo_lock_region	*plr;
165 	struct resctrl_staged_config	staged_config[CDP_NUM_TYPES];
166 	u32				*mbps_val;
167 };
168 
169 /**
170  * struct mbm_cntr_cfg - Assignable counter configuration.
171  * @evtid:		MBM event to which the counter is assigned. Only valid
172  *			if @rdtgroup is not NULL.
173  * @rdtgrp:		resctrl group assigned to the counter. NULL if the
174  *			counter is free.
175  */
176 struct mbm_cntr_cfg {
177 	enum resctrl_event_id	evtid;
178 	struct rdtgroup		*rdtgrp;
179 };
180 
181 /**
182  * struct rdt_l3_mon_domain - group of CPUs sharing RDT_RESOURCE_L3 monitoring
183  * @hdr:		common header for different domain types
184  * @ci_id:		cache info id for this domain
185  * @rmid_busy_llc:	bitmap of which limbo RMIDs are above threshold
186  * @mbm_states:		Per-event pointer to the MBM event's saved state.
187  *			An MBM event's state is an array of struct mbm_state
188  *			indexed by RMID on x86 or combined CLOSID, RMID on Arm.
189  * @mbm_over:		worker to periodically read MBM h/w counters
190  * @cqm_limbo:		worker to periodically read CQM h/w counters
191  * @mbm_work_cpu:	worker CPU for MBM h/w counters
192  * @cqm_work_cpu:	worker CPU for CQM h/w counters
193  * @cntr_cfg:		array of assignable counters' configuration (indexed
194  *			by counter ID)
195  */
196 struct rdt_l3_mon_domain {
197 	struct rdt_domain_hdr		hdr;
198 	unsigned int			ci_id;
199 	unsigned long			*rmid_busy_llc;
200 	struct mbm_state		*mbm_states[QOS_NUM_L3_MBM_EVENTS];
201 	struct delayed_work		mbm_over;
202 	struct delayed_work		cqm_limbo;
203 	int				mbm_work_cpu;
204 	int				cqm_work_cpu;
205 	struct mbm_cntr_cfg		*cntr_cfg;
206 };
207 
208 /**
209  * struct resctrl_cache - Cache allocation related data
210  * @cbm_len:		Length of the cache bit mask
211  * @min_cbm_bits:	Minimum number of consecutive bits to be set.
212  *			The value 0 means the architecture can support
213  *			zero CBM.
214  * @shareable_bits:	Bitmask of shareable resource with other
215  *			executing entities
216  * @arch_has_sparse_bitmasks:	True if a bitmask like f00f is valid.
217  * @arch_has_per_cpu_cfg:	True if QOS_CFG register for this cache
218  *				level has CPU scope.
219  * @io_alloc_capable:	True if portion of the cache can be configured
220  *			for I/O traffic.
221  */
222 struct resctrl_cache {
223 	unsigned int	cbm_len;
224 	unsigned int	min_cbm_bits;
225 	unsigned int	shareable_bits;
226 	bool		arch_has_sparse_bitmasks;
227 	bool		arch_has_per_cpu_cfg;
228 	bool		io_alloc_capable;
229 };
230 
231 /**
232  * enum membw_throttle_mode - System's memory bandwidth throttling mode
233  * @THREAD_THROTTLE_UNDEFINED:	Not relevant to the system
234  * @THREAD_THROTTLE_MAX:	Memory bandwidth is throttled at the core
235  *				always using smallest bandwidth percentage
236  *				assigned to threads, aka "max throttling"
237  * @THREAD_THROTTLE_PER_THREAD:	Memory bandwidth is throttled at the thread
238  */
239 enum membw_throttle_mode {
240 	THREAD_THROTTLE_UNDEFINED = 0,
241 	THREAD_THROTTLE_MAX,
242 	THREAD_THROTTLE_PER_THREAD,
243 };
244 
245 /**
246  * struct resctrl_membw - Memory bandwidth allocation related data
247  * @min_bw:		Minimum memory bandwidth percentage user can request
248  * @max_bw:		Maximum memory bandwidth value, used as the reset value
249  * @bw_gran:		Granularity at which the memory bandwidth is allocated
250  * @delay_linear:	True if memory B/W delay is in linear scale
251  * @arch_needs_linear:	True if we can't configure non-linear resources
252  * @throttle_mode:	Bandwidth throttling mode when threads request
253  *			different memory bandwidths
254  * @mba_sc:		True if MBA software controller(mba_sc) is enabled
255  * @mb_map:		Mapping of memory B/W percentage to memory B/W delay
256  */
257 struct resctrl_membw {
258 	u32				min_bw;
259 	u32				max_bw;
260 	u32				bw_gran;
261 	u32				delay_linear;
262 	bool				arch_needs_linear;
263 	enum membw_throttle_mode	throttle_mode;
264 	bool				mba_sc;
265 	u32				*mb_map;
266 };
267 
268 struct resctrl_schema;
269 
270 enum resctrl_scope {
271 	RESCTRL_L2_CACHE = 2,
272 	RESCTRL_L3_CACHE = 3,
273 	RESCTRL_L3_NODE,
274 	RESCTRL_PACKAGE,
275 };
276 
277 /**
278  * enum resctrl_schema_fmt - The format user-space provides for a schema.
279  * @RESCTRL_SCHEMA_BITMAP:	The schema is a bitmap in hex.
280  * @RESCTRL_SCHEMA_RANGE:	The schema is a decimal number.
281  */
282 enum resctrl_schema_fmt {
283 	RESCTRL_SCHEMA_BITMAP,
284 	RESCTRL_SCHEMA_RANGE,
285 };
286 
287 /**
288  * struct resctrl_mon - Monitoring related data of a resctrl resource.
289  * @num_rmid:		Number of RMIDs available.
290  * @mbm_cfg_mask:	Memory transactions that can be tracked when bandwidth
291  *			monitoring events can be configured.
292  * @num_mbm_cntrs:	Number of assignable counters.
293  * @mbm_cntr_assignable:Is system capable of supporting counter assignment?
294  * @mbm_assign_on_mkdir:True if counters should automatically be assigned to MBM
295  *			events of monitor groups created via mkdir.
296  */
297 struct resctrl_mon {
298 	u32			num_rmid;
299 	unsigned int		mbm_cfg_mask;
300 	int			num_mbm_cntrs;
301 	bool			mbm_cntr_assignable;
302 	bool			mbm_assign_on_mkdir;
303 };
304 
305 /**
306  * struct rdt_resource - attributes of a resctrl resource
307  * @rid:		The index of the resource
308  * @alloc_capable:	Is allocation available on this machine
309  * @mon_capable:	Is monitor feature available on this machine
310  * @ctrl_scope:		Scope of this resource for control functions
311  * @mon_scope:		Scope of this resource for monitor functions
312  * @cache:		Cache allocation related data
313  * @membw:		If the component has bandwidth controls, their properties.
314  * @mon:		Monitoring related data.
315  * @ctrl_domains:	RCU list of all control domains for this resource
316  * @mon_domains:	RCU list of all monitor domains for this resource
317  * @name:		Name to use in "schemata" file.
318  * @schema_fmt:		Which format string and parser is used for this schema.
319  * @cdp_capable:	Is the CDP feature available on this resource
320  */
321 struct rdt_resource {
322 	int			rid;
323 	bool			alloc_capable;
324 	bool			mon_capable;
325 	enum resctrl_scope	ctrl_scope;
326 	enum resctrl_scope	mon_scope;
327 	struct resctrl_cache	cache;
328 	struct resctrl_membw	membw;
329 	struct resctrl_mon	mon;
330 	struct list_head	ctrl_domains;
331 	struct list_head	mon_domains;
332 	char			*name;
333 	enum resctrl_schema_fmt	schema_fmt;
334 	bool			cdp_capable;
335 };
336 
337 /*
338  * Get the resource that exists at this level. If the level is not supported
339  * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES
340  * will return NULL.
341  */
342 struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l);
343 
344 /**
345  * struct resctrl_schema - configuration abilities of a resource presented to
346  *			   user-space
347  * @list:	Member of resctrl_schema_all.
348  * @name:	The name to use in the "schemata" file.
349  * @fmt_str:	Format string to show domain value.
350  * @conf_type:	Whether this schema is specific to code/data.
351  * @res:	The resource structure exported by the architecture to describe
352  *		the hardware that is configured by this schema.
353  * @num_closid:	The number of closid that can be used with this schema. When
354  *		features like CDP are enabled, this will be lower than the
355  *		hardware supports for the resource.
356  */
357 struct resctrl_schema {
358 	struct list_head		list;
359 	char				name[8];
360 	const char			*fmt_str;
361 	enum resctrl_conf_type		conf_type;
362 	struct rdt_resource		*res;
363 	u32				num_closid;
364 };
365 
366 struct resctrl_cpu_defaults {
367 	u32 closid;
368 	u32 rmid;
369 };
370 
371 struct resctrl_mon_config_info {
372 	struct rdt_resource		*r;
373 	struct rdt_l3_mon_domain	*d;
374 	u32				evtid;
375 	u32				mon_config;
376 };
377 
378 /**
379  * resctrl_arch_sync_cpu_closid_rmid() - Refresh this CPU's CLOSID and RMID.
380  *					 Call via IPI.
381  * @info:	If non-NULL, a pointer to a struct resctrl_cpu_defaults
382  *		specifying the new CLOSID and RMID for tasks in the default
383  *		resctrl ctrl and mon group when running on this CPU.  If NULL,
384  *		this CPU is not re-assigned to a different default group.
385  *
386  * Propagates reassignment of CPUs and/or tasks to different resctrl groups
387  * when requested by the resctrl core code.
388  *
389  * This function records the per-cpu defaults specified by @info (if any),
390  * and then reconfigures the CPU's hardware CLOSID and RMID for subsequent
391  * execution based on @current, in the same way as during a task switch.
392  */
393 void resctrl_arch_sync_cpu_closid_rmid(void *info);
394 
395 /**
396  * resctrl_get_default_ctrl() - Return the default control value for this
397  *                              resource.
398  * @r:		The resource whose default control type is queried.
399  */
resctrl_get_default_ctrl(struct rdt_resource * r)400 static inline u32 resctrl_get_default_ctrl(struct rdt_resource *r)
401 {
402 	switch (r->schema_fmt) {
403 	case RESCTRL_SCHEMA_BITMAP:
404 		return BIT_MASK(r->cache.cbm_len) - 1;
405 	case RESCTRL_SCHEMA_RANGE:
406 		return r->membw.max_bw;
407 	}
408 
409 	return WARN_ON_ONCE(1);
410 }
411 
412 /* The number of closid supported by this resource regardless of CDP */
413 u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
414 u32 resctrl_arch_system_num_rmid_idx(void);
415 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
416 
417 bool resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu,
418 			      unsigned int binary_bits, void *arch_priv);
419 
420 bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
421 
422 bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
423 
resctrl_is_mbm_event(enum resctrl_event_id eventid)424 static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid)
425 {
426 	return (eventid >= QOS_L3_MBM_TOTAL_EVENT_ID &&
427 		eventid <= QOS_L3_MBM_LOCAL_EVENT_ID);
428 }
429 
430 u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid);
431 
432 /* Iterate over all memory bandwidth events */
433 #define for_each_mbm_event_id(eventid)				\
434 	for (eventid = QOS_L3_MBM_TOTAL_EVENT_ID;		\
435 	     eventid <= QOS_L3_MBM_LOCAL_EVENT_ID; eventid++)
436 
437 /* Iterate over memory bandwidth arrays in domain structures */
438 #define for_each_mbm_idx(idx)					\
439 	for (idx = 0; idx < QOS_NUM_L3_MBM_EVENTS; idx++)
440 
441 /**
442  * resctrl_arch_mon_event_config_write() - Write the config for an event.
443  * @config_info: struct resctrl_mon_config_info describing the resource, domain
444  *		 and event.
445  *
446  * Reads resource, domain and eventid from @config_info and writes the
447  * event config_info->mon_config into hardware.
448  *
449  * Called via IPI to reach a CPU that is a member of the specified domain.
450  */
451 void resctrl_arch_mon_event_config_write(void *config_info);
452 
453 /**
454  * resctrl_arch_mon_event_config_read() - Read the config for an event.
455  * @config_info: struct resctrl_mon_config_info describing the resource, domain
456  *		 and event.
457  *
458  * Reads resource, domain and eventid from @config_info and reads the
459  * hardware config value into config_info->mon_config.
460  *
461  * Called via IPI to reach a CPU that is a member of the specified domain.
462  */
463 void resctrl_arch_mon_event_config_read(void *config_info);
464 
465 /* For use by arch code to remap resctrl's smaller CDP CLOSID range */
resctrl_get_config_index(u32 closid,enum resctrl_conf_type type)466 static inline u32 resctrl_get_config_index(u32 closid,
467 					   enum resctrl_conf_type type)
468 {
469 	switch (type) {
470 	default:
471 	case CDP_NONE:
472 		return closid;
473 	case CDP_CODE:
474 		return closid * 2 + 1;
475 	case CDP_DATA:
476 		return closid * 2;
477 	}
478 }
479 
480 bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
481 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
482 
483 /**
484  * resctrl_arch_mbm_cntr_assign_enabled() - Check if MBM counter assignment
485  *					    mode is enabled.
486  * @r:		Pointer to the resource structure.
487  *
488  * Return:
489  * true if the assignment mode is enabled, false otherwise.
490  */
491 bool resctrl_arch_mbm_cntr_assign_enabled(struct rdt_resource *r);
492 
493 /**
494  * resctrl_arch_mbm_cntr_assign_set() - Configure the MBM counter assignment mode.
495  * @r:		Pointer to the resource structure.
496  * @enable:	Set to true to enable, false to disable the assignment mode.
497  *
498  * Return:
499  * 0 on success, < 0 on error.
500  */
501 int resctrl_arch_mbm_cntr_assign_set(struct rdt_resource *r, bool enable);
502 
503 /*
504  * Update the ctrl_val and apply this config right now.
505  * Must be called on one of the domain's CPUs.
506  */
507 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
508 			    u32 closid, enum resctrl_conf_type t, u32 cfg_val);
509 
510 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
511 			    u32 closid, enum resctrl_conf_type type);
512 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
513 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
514 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
515 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
516 void resctrl_online_cpu(unsigned int cpu);
517 void resctrl_offline_cpu(unsigned int cpu);
518 
519 /*
520  * Architecture hook called at beginning of first file system mount attempt.
521  * No locks are held.
522  */
523 void resctrl_arch_pre_mount(void);
524 
525 /**
526  * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
527  *			      for this resource and domain.
528  * @r:			resource that the counter should be read from.
529  * @hdr:		Header of domain that the counter should be read from.
530  * @closid:		closid that matches the rmid. Depending on the architecture, the
531  *			counter may match traffic of both @closid and @rmid, or @rmid
532  *			only.
533  * @rmid:		rmid of the counter to read.
534  * @eventid:		eventid to read, e.g. L3 occupancy.
535  * @arch_priv:		Architecture private data for this event.
536  *			The @arch_priv provided by the architecture via
537  *			resctrl_enable_mon_event().
538  * @val:		result of the counter read in bytes.
539  * @arch_mon_ctx:	An architecture specific value from
540  *			resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
541  *			the hardware monitor allocated for this read request.
542  *
543  * Some architectures need to sleep when first programming some of the counters.
544  * (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
545  *  for a short period of time). Call from a non-migrateable process context on
546  * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or
547  * schedule_work_on(). This function can be called with interrupts masked,
548  * e.g. using smp_call_function_any(), but may consistently return an error.
549  *
550  * Return:
551  * 0 on success, or -EIO, -EINVAL etc on error.
552  */
553 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
554 			   u32 closid, u32 rmid, enum resctrl_event_id eventid,
555 			   void *arch_priv, u64 *val, void *arch_mon_ctx);
556 
557 /**
558  * resctrl_arch_rmid_read_context_check()  - warn about invalid contexts
559  *
560  * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when
561  * resctrl_arch_rmid_read() is called with preemption disabled.
562  *
563  * The contract with resctrl_arch_rmid_read() is that if interrupts
564  * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an
565  * IPI, (and fail if the call needed to sleep), while most of the time
566  * the work is scheduled, allowing the call to sleep.
567  */
resctrl_arch_rmid_read_context_check(void)568 static inline void resctrl_arch_rmid_read_context_check(void)
569 {
570 	if (!irqs_disabled())
571 		might_sleep();
572 }
573 
574 /**
575  * resctrl_find_domain() - Search for a domain id in a resource domain list.
576  * @h:		The domain list to search.
577  * @id:		The domain id to search for.
578  * @pos:	A pointer to position in the list id should be inserted.
579  *
580  * Search the domain list to find the domain id. If the domain id is
581  * found, return the domain. NULL otherwise.  If the domain id is not
582  * found (and NULL returned) then the first domain with id bigger than
583  * the input id can be returned to the caller via @pos.
584  */
585 struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
586 					   struct list_head **pos);
587 
588 /**
589  * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
590  *			       and eventid.
591  * @r:		The domain's resource.
592  * @d:		The rmid's domain.
593  * @closid:	closid that matches the rmid. Depending on the architecture, the
594  *		counter may match traffic of both @closid and @rmid, or @rmid only.
595  * @rmid:	The rmid whose counter values should be reset.
596  * @eventid:	The eventid whose counter values should be reset.
597  *
598  * This can be called from any CPU.
599  */
600 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
601 			     u32 closid, u32 rmid,
602 			     enum resctrl_event_id eventid);
603 
604 /**
605  * resctrl_arch_reset_rmid_all() - Reset all private state associated with
606  *				   all rmids and eventids.
607  * @r:		The resctrl resource.
608  * @d:		The domain for which all architectural counter state will
609  *		be cleared.
610  *
611  * This can be called from any CPU.
612  */
613 void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d);
614 
615 /**
616  * resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
617  *				    default.
618  * @r:		The resctrl resource to reset.
619  *
620  * This can be called from any CPU.
621  */
622 void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
623 
624 /**
625  * resctrl_arch_config_cntr() - Configure the counter with its new RMID
626  *				and event details.
627  * @r:			Resource structure.
628  * @d:			The domain in which counter with ID @cntr_id should be configured.
629  * @evtid:		Monitoring event type (e.g., QOS_L3_MBM_TOTAL_EVENT_ID
630  *			or QOS_L3_MBM_LOCAL_EVENT_ID).
631  * @rmid:		RMID.
632  * @closid:		CLOSID.
633  * @cntr_id:		Counter ID to configure.
634  * @assign:		True to assign the counter or update an existing assignment,
635  *			false to unassign the counter.
636  *
637  * This can be called from any CPU.
638  */
639 void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
640 			      enum resctrl_event_id evtid, u32 rmid, u32 closid,
641 			      u32 cntr_id, bool assign);
642 
643 /**
644  * resctrl_arch_cntr_read() - Read the event data corresponding to the counter ID
645  *			      assigned to the RMID, event pair for this resource
646  *			      and domain.
647  * @r:		Resource that the counter should be read from.
648  * @d:		Domain that the counter should be read from.
649  * @closid:	CLOSID that matches the RMID.
650  * @rmid:	The RMID to which @cntr_id is assigned.
651  * @cntr_id:	The counter to read.
652  * @eventid:	The MBM event to which @cntr_id is assigned.
653  * @val:	Result of the counter read in bytes.
654  *
655  * Called on a CPU that belongs to domain @d when "mbm_event" mode is enabled.
656  * Called from a non-migrateable process context via smp_call_on_cpu() unless all
657  * CPUs are nohz_full, in which case it is called via IPI (smp_call_function_any()).
658  *
659  * Return:
660  * 0 on success, or -EIO, -EINVAL etc on error.
661  */
662 int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
663 			   u32 closid, u32 rmid, int cntr_id,
664 			   enum resctrl_event_id eventid, u64 *val);
665 
666 /**
667  * resctrl_arch_reset_cntr() - Reset any private state associated with counter ID.
668  * @r:		The domain's resource.
669  * @d:		The counter ID's domain.
670  * @closid:	CLOSID that matches the RMID.
671  * @rmid:	The RMID to which @cntr_id is assigned.
672  * @cntr_id:	The counter to reset.
673  * @eventid:	The MBM event to which @cntr_id is assigned.
674  *
675  * This can be called from any CPU.
676  */
677 void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
678 			     u32 closid, u32 rmid, int cntr_id,
679 			     enum resctrl_event_id eventid);
680 
681 /**
682  * resctrl_arch_io_alloc_enable() - Enable/disable io_alloc feature.
683  * @r:		The resctrl resource.
684  * @enable:	Enable (true) or disable (false) io_alloc on resource @r.
685  *
686  * This can be called from any CPU.
687  *
688  * Return:
689  * 0 on success, <0 on error.
690  */
691 int resctrl_arch_io_alloc_enable(struct rdt_resource *r, bool enable);
692 
693 /**
694  * resctrl_arch_get_io_alloc_enabled() - Get io_alloc feature state.
695  * @r:		The resctrl resource.
696  *
697  * Return:
698  * true if io_alloc is enabled or false if disabled.
699  */
700 bool resctrl_arch_get_io_alloc_enabled(struct rdt_resource *r);
701 
702 extern unsigned int resctrl_rmid_realloc_threshold;
703 extern unsigned int resctrl_rmid_realloc_limit;
704 
705 int resctrl_init(void);
706 void resctrl_exit(void);
707 
708 #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
709 u64 resctrl_arch_get_prefetch_disable_bits(void);
710 int resctrl_arch_pseudo_lock_fn(void *_plr);
711 int resctrl_arch_measure_cycles_lat_fn(void *_plr);
712 int resctrl_arch_measure_l2_residency(void *_plr);
713 int resctrl_arch_measure_l3_residency(void *_plr);
714 #else
resctrl_arch_get_prefetch_disable_bits(void)715 static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; }
resctrl_arch_pseudo_lock_fn(void * _plr)716 static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; }
resctrl_arch_measure_cycles_lat_fn(void * _plr)717 static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; }
resctrl_arch_measure_l2_residency(void * _plr)718 static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; }
resctrl_arch_measure_l3_residency(void * _plr)719 static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; }
720 #endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
721 #endif /* _RESCTRL_H */
722