1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _FS_RESCTRL_INTERNAL_H
3 #define _FS_RESCTRL_INTERNAL_H
4 
5 #include <linux/resctrl.h>
6 #include <linux/kernfs.h>
7 #include <linux/fs_context.h>
8 #include <linux/tick.h>
9 
10 #define CQM_LIMBOCHECK_INTERVAL	1000
11 
12 /**
13  * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that
14  *			        aren't marked nohz_full
15  * @mask:	The mask to pick a CPU from.
16  * @exclude_cpu:The CPU to avoid picking.
17  *
18  * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping
19  * CPUs that don't use nohz_full, these are preferred. Pass
20  * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs.
21  *
22  * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available.
23  */
24 static inline unsigned int
25 cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu)
26 {
27 	unsigned int cpu;
28 
29 	/* Try to find a CPU that isn't nohz_full to use in preference */
30 	if (tick_nohz_full_enabled()) {
31 		cpu = cpumask_any_andnot_but(mask, tick_nohz_full_mask, exclude_cpu);
32 		if (cpu < nr_cpu_ids)
33 			return cpu;
34 	}
35 
36 	return cpumask_any_but(mask, exclude_cpu);
37 }
38 
39 struct rdt_fs_context {
40 	struct kernfs_fs_context	kfc;
41 	bool				enable_cdpl2;
42 	bool				enable_cdpl3;
43 	bool				enable_mba_mbps;
44 	bool				enable_debug;
45 };
46 
47 static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
48 {
49 	struct kernfs_fs_context *kfc = fc->fs_private;
50 
51 	return container_of(kfc, struct rdt_fs_context, kfc);
52 }
53 
54 /**
55  * struct mon_evt - Entry in the event list of a resource
56  * @evtid:		event id
57  * @name:		name of the event
58  * @configurable:	true if the event is configurable
59  * @list:		entry in &rdt_resource->evt_list
60  */
61 struct mon_evt {
62 	enum resctrl_event_id	evtid;
63 	char			*name;
64 	bool			configurable;
65 	struct list_head	list;
66 };
67 
68 /**
69  * struct mon_data - Monitoring details for each event file.
70  * @list:            Member of the global @mon_data_kn_priv_list list.
71  * @rid:             Resource id associated with the event file.
72  * @evtid:           Event id associated with the event file.
73  * @sum:             Set when event must be summed across multiple
74  *                   domains.
75  * @domid:           When @sum is zero this is the domain to which
76  *                   the event file belongs. When @sum is one this
77  *                   is the id of the L3 cache that all domains to be
78  *                   summed share.
79  *
80  * Pointed to by the kernfs kn->priv field of monitoring event files.
81  * Readers and writers must hold rdtgroup_mutex.
82  */
83 struct mon_data {
84 	struct list_head	list;
85 	enum resctrl_res_level	rid;
86 	enum resctrl_event_id	evtid;
87 	int			domid;
88 	bool			sum;
89 };
90 
91 /**
92  * struct rmid_read - Data passed across smp_call*() to read event count.
93  * @rgrp:  Resource group for which the counter is being read. If it is a parent
94  *	   resource group then its event count is summed with the count from all
95  *	   its child resource groups.
96  * @r:	   Resource describing the properties of the event being read.
97  * @d:	   Domain that the counter should be read from. If NULL then sum all
98  *	   domains in @r sharing L3 @ci.id
99  * @evtid: Which monitor event to read.
100  * @first: Initialize MBM counter when true.
101  * @ci:    Cacheinfo for L3. Only set when @d is NULL. Used when summing domains.
102  * @err:   Error encountered when reading counter.
103  * @val:   Returned value of event counter. If @rgrp is a parent resource group,
104  *	   @val includes the sum of event counts from its child resource groups.
105  *	   If @d is NULL, @val includes the sum of all domains in @r sharing @ci.id,
106  *	   (summed across child resource groups if @rgrp is a parent resource group).
107  * @arch_mon_ctx: Hardware monitor allocated for this read request (MPAM only).
108  */
109 struct rmid_read {
110 	struct rdtgroup		*rgrp;
111 	struct rdt_resource	*r;
112 	struct rdt_mon_domain	*d;
113 	enum resctrl_event_id	evtid;
114 	bool			first;
115 	struct cacheinfo	*ci;
116 	int			err;
117 	u64			val;
118 	void			*arch_mon_ctx;
119 };
120 
121 extern struct list_head resctrl_schema_all;
122 
123 extern bool resctrl_mounted;
124 
125 enum rdt_group_type {
126 	RDTCTRL_GROUP = 0,
127 	RDTMON_GROUP,
128 	RDT_NUM_GROUP,
129 };
130 
131 /**
132  * enum rdtgrp_mode - Mode of a RDT resource group
133  * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
134  * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
135  * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
136  * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
137  *                          allowed AND the allocations are Cache Pseudo-Locked
138  * @RDT_NUM_MODES: Total number of modes
139  *
140  * The mode of a resource group enables control over the allowed overlap
141  * between allocations associated with different resource groups (classes
142  * of service). User is able to modify the mode of a resource group by
143  * writing to the "mode" resctrl file associated with the resource group.
144  *
145  * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
146  * writing the appropriate text to the "mode" file. A resource group enters
147  * "pseudo-locked" mode after the schemata is written while the resource
148  * group is in "pseudo-locksetup" mode.
149  */
150 enum rdtgrp_mode {
151 	RDT_MODE_SHAREABLE = 0,
152 	RDT_MODE_EXCLUSIVE,
153 	RDT_MODE_PSEUDO_LOCKSETUP,
154 	RDT_MODE_PSEUDO_LOCKED,
155 
156 	/* Must be last */
157 	RDT_NUM_MODES,
158 };
159 
160 /**
161  * struct mongroup - store mon group's data in resctrl fs.
162  * @mon_data_kn:		kernfs node for the mon_data directory
163  * @parent:			parent rdtgrp
164  * @crdtgrp_list:		child rdtgroup node list
165  * @rmid:			rmid for this rdtgroup
166  */
167 struct mongroup {
168 	struct kernfs_node	*mon_data_kn;
169 	struct rdtgroup		*parent;
170 	struct list_head	crdtgrp_list;
171 	u32			rmid;
172 };
173 
174 /**
175  * struct rdtgroup - store rdtgroup's data in resctrl file system.
176  * @kn:				kernfs node
177  * @rdtgroup_list:		linked list for all rdtgroups
178  * @closid:			closid for this rdtgroup
179  * @cpu_mask:			CPUs assigned to this rdtgroup
180  * @flags:			status bits
181  * @waitcount:			how many cpus expect to find this
182  *				group when they acquire rdtgroup_mutex
183  * @type:			indicates type of this rdtgroup - either
184  *				monitor only or ctrl_mon group
185  * @mon:			mongroup related data
186  * @mode:			mode of resource group
187  * @mba_mbps_event:		input monitoring event id when mba_sc is enabled
188  * @plr:			pseudo-locked region
189  */
190 struct rdtgroup {
191 	struct kernfs_node		*kn;
192 	struct list_head		rdtgroup_list;
193 	u32				closid;
194 	struct cpumask			cpu_mask;
195 	int				flags;
196 	atomic_t			waitcount;
197 	enum rdt_group_type		type;
198 	struct mongroup			mon;
199 	enum rdtgrp_mode		mode;
200 	enum resctrl_event_id		mba_mbps_event;
201 	struct pseudo_lock_region	*plr;
202 };
203 
204 /* rdtgroup.flags */
205 #define	RDT_DELETED		1
206 
207 /* rftype.flags */
208 #define RFTYPE_FLAGS_CPUS_LIST	1
209 
210 /*
211  * Define the file type flags for base and info directories.
212  */
213 #define RFTYPE_INFO			BIT(0)
214 
215 #define RFTYPE_BASE			BIT(1)
216 
217 #define RFTYPE_CTRL			BIT(4)
218 
219 #define RFTYPE_MON			BIT(5)
220 
221 #define RFTYPE_TOP			BIT(6)
222 
223 #define RFTYPE_RES_CACHE		BIT(8)
224 
225 #define RFTYPE_RES_MB			BIT(9)
226 
227 #define RFTYPE_DEBUG			BIT(10)
228 
229 #define RFTYPE_CTRL_INFO		(RFTYPE_INFO | RFTYPE_CTRL)
230 
231 #define RFTYPE_MON_INFO			(RFTYPE_INFO | RFTYPE_MON)
232 
233 #define RFTYPE_TOP_INFO			(RFTYPE_INFO | RFTYPE_TOP)
234 
235 #define RFTYPE_CTRL_BASE		(RFTYPE_BASE | RFTYPE_CTRL)
236 
237 #define RFTYPE_MON_BASE			(RFTYPE_BASE | RFTYPE_MON)
238 
239 /* List of all resource groups */
240 extern struct list_head rdt_all_groups;
241 
242 extern int max_name_width;
243 
244 /**
245  * struct rftype - describe each file in the resctrl file system
246  * @name:	File name
247  * @mode:	Access mode
248  * @kf_ops:	File operations
249  * @flags:	File specific RFTYPE_FLAGS_* flags
250  * @fflags:	File specific RFTYPE_* flags
251  * @seq_show:	Show content of the file
252  * @write:	Write to the file
253  */
254 struct rftype {
255 	char			*name;
256 	umode_t			mode;
257 	const struct kernfs_ops	*kf_ops;
258 	unsigned long		flags;
259 	unsigned long		fflags;
260 
261 	int (*seq_show)(struct kernfs_open_file *of,
262 			struct seq_file *sf, void *v);
263 	/*
264 	 * write() is the generic write callback which maps directly to
265 	 * kernfs write operation and overrides all other operations.
266 	 * Maximum write size is determined by ->max_write_len.
267 	 */
268 	ssize_t (*write)(struct kernfs_open_file *of,
269 			 char *buf, size_t nbytes, loff_t off);
270 };
271 
272 /**
273  * struct mbm_state - status for each MBM counter in each domain
274  * @prev_bw_bytes: Previous bytes value read for bandwidth calculation
275  * @prev_bw:	The most recent bandwidth in MBps
276  */
277 struct mbm_state {
278 	u64	prev_bw_bytes;
279 	u32	prev_bw;
280 };
281 
282 extern struct mutex rdtgroup_mutex;
283 
284 static inline const char *rdt_kn_name(const struct kernfs_node *kn)
285 {
286 	return rcu_dereference_check(kn->name, lockdep_is_held(&rdtgroup_mutex));
287 }
288 
289 extern struct rdtgroup rdtgroup_default;
290 
291 extern struct dentry *debugfs_resctrl;
292 
293 extern enum resctrl_event_id mba_mbps_default_event;
294 
295 void rdt_last_cmd_clear(void);
296 
297 void rdt_last_cmd_puts(const char *s);
298 
299 __printf(1, 2)
300 void rdt_last_cmd_printf(const char *fmt, ...);
301 
302 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
303 
304 void rdtgroup_kn_unlock(struct kernfs_node *kn);
305 
306 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
307 
308 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
309 			     umode_t mask);
310 
311 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
312 				char *buf, size_t nbytes, loff_t off);
313 
314 int rdtgroup_schemata_show(struct kernfs_open_file *of,
315 			   struct seq_file *s, void *v);
316 
317 ssize_t rdtgroup_mba_mbps_event_write(struct kernfs_open_file *of,
318 				      char *buf, size_t nbytes, loff_t off);
319 
320 int rdtgroup_mba_mbps_event_show(struct kernfs_open_file *of,
321 				 struct seq_file *s, void *v);
322 
323 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
324 			   unsigned long cbm, int closid, bool exclusive);
325 
326 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_ctrl_domain *d,
327 				  unsigned long cbm);
328 
329 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
330 
331 int rdtgroup_tasks_assigned(struct rdtgroup *r);
332 
333 int closids_supported(void);
334 
335 void closid_free(int closid);
336 
337 int alloc_rmid(u32 closid);
338 
339 void free_rmid(u32 closid, u32 rmid);
340 
341 void resctrl_mon_resource_exit(void);
342 
343 void mon_event_count(void *info);
344 
345 int rdtgroup_mondata_show(struct seq_file *m, void *arg);
346 
347 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
348 		    struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
349 		    cpumask_t *cpumask, int evtid, int first);
350 
351 int resctrl_mon_resource_init(void);
352 
353 void mbm_setup_overflow_handler(struct rdt_mon_domain *dom,
354 				unsigned long delay_ms,
355 				int exclude_cpu);
356 
357 void mbm_handle_overflow(struct work_struct *work);
358 
359 bool is_mba_sc(struct rdt_resource *r);
360 
361 void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
362 			     int exclude_cpu);
363 
364 void cqm_handle_limbo(struct work_struct *work);
365 
366 bool has_busy_rmid(struct rdt_mon_domain *d);
367 
368 void __check_limbo(struct rdt_mon_domain *d, bool force_free);
369 
370 void resctrl_file_fflags_init(const char *config, unsigned long fflags);
371 
372 void rdt_staged_configs_clear(void);
373 
374 bool closid_allocated(unsigned int closid);
375 
376 int resctrl_find_cleanest_closid(void);
377 
378 #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
379 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
380 
381 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
382 
383 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm);
384 
385 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d);
386 
387 int rdt_pseudo_lock_init(void);
388 
389 void rdt_pseudo_lock_release(void);
390 
391 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
392 
393 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
394 
395 #else
396 static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
397 {
398 	return -EOPNOTSUPP;
399 }
400 
401 static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
402 {
403 	return -EOPNOTSUPP;
404 }
405 
406 static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
407 {
408 	return false;
409 }
410 
411 static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
412 {
413 	return false;
414 }
415 
416 static inline int rdt_pseudo_lock_init(void) { return 0; }
417 static inline void rdt_pseudo_lock_release(void) { }
418 static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
419 {
420 	return -EOPNOTSUPP;
421 }
422 
423 static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { }
424 #endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
425 
426 #endif /* _FS_RESCTRL_INTERNAL_H */
427