1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_RESCTRL_H
3 #define _ASM_X86_RESCTRL_H
4 
5 #ifdef CONFIG_X86_CPU_RESCTRL
6 
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/resctrl_types.h>
10 #include <linux/sched.h>
11 
12 /*
13  * This value can never be a valid CLOSID, and is used when mapping a
14  * (closid, rmid) pair to an index and back. On x86 only the RMID is
15  * needed. The index is a software defined value.
16  */
17 #define X86_RESCTRL_EMPTY_CLOSID         ((u32)~0)
18 
19 /**
20  * struct resctrl_pqr_state - State cache for the PQR MSR
21  * @cur_rmid:		The cached Resource Monitoring ID
22  * @cur_closid:	The cached Class Of Service ID
23  * @default_rmid:	The user assigned Resource Monitoring ID
24  * @default_closid:	The user assigned cached Class Of Service ID
25  *
26  * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
27  * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
28  * contains both parts, so we need to cache them. This also
29  * stores the user configured per cpu CLOSID and RMID.
30  *
31  * The cache also helps to avoid pointless updates if the value does
32  * not change.
33  */
34 struct resctrl_pqr_state {
35 	u32			cur_rmid;
36 	u32			cur_closid;
37 	u32			default_rmid;
38 	u32			default_closid;
39 };
40 
41 DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
42 
43 extern bool rdt_alloc_capable;
44 extern bool rdt_mon_capable;
45 extern unsigned int rdt_mon_features;
46 
47 DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
48 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
49 DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
50 
resctrl_arch_alloc_capable(void)51 static inline bool resctrl_arch_alloc_capable(void)
52 {
53 	return rdt_alloc_capable;
54 }
55 
resctrl_arch_enable_alloc(void)56 static inline void resctrl_arch_enable_alloc(void)
57 {
58 	static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
59 	static_branch_inc_cpuslocked(&rdt_enable_key);
60 }
61 
resctrl_arch_disable_alloc(void)62 static inline void resctrl_arch_disable_alloc(void)
63 {
64 	static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
65 	static_branch_dec_cpuslocked(&rdt_enable_key);
66 }
67 
resctrl_arch_mon_capable(void)68 static inline bool resctrl_arch_mon_capable(void)
69 {
70 	return rdt_mon_capable;
71 }
72 
resctrl_arch_enable_mon(void)73 static inline void resctrl_arch_enable_mon(void)
74 {
75 	static_branch_enable_cpuslocked(&rdt_mon_enable_key);
76 	static_branch_inc_cpuslocked(&rdt_enable_key);
77 }
78 
resctrl_arch_disable_mon(void)79 static inline void resctrl_arch_disable_mon(void)
80 {
81 	static_branch_disable_cpuslocked(&rdt_mon_enable_key);
82 	static_branch_dec_cpuslocked(&rdt_enable_key);
83 }
84 
resctrl_arch_is_llc_occupancy_enabled(void)85 static inline bool resctrl_arch_is_llc_occupancy_enabled(void)
86 {
87 	return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID));
88 }
89 
resctrl_arch_is_mbm_total_enabled(void)90 static inline bool resctrl_arch_is_mbm_total_enabled(void)
91 {
92 	return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID));
93 }
94 
resctrl_arch_is_mbm_local_enabled(void)95 static inline bool resctrl_arch_is_mbm_local_enabled(void)
96 {
97 	return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID));
98 }
99 
100 /*
101  * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
102  *
103  * Following considerations are made so that this has minimal impact
104  * on scheduler hot path:
105  * - This will stay as no-op unless we are running on an Intel SKU
106  *   which supports resource control or monitoring and we enable by
107  *   mounting the resctrl file system.
108  * - Caches the per cpu CLOSid/RMID values and does the MSR write only
109  *   when a task with a different CLOSid/RMID is scheduled in.
110  * - We allocate RMIDs/CLOSids globally in order to keep this as
111  *   simple as possible.
112  * Must be called with preemption disabled.
113  */
__resctrl_sched_in(struct task_struct * tsk)114 static inline void __resctrl_sched_in(struct task_struct *tsk)
115 {
116 	struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
117 	u32 closid = READ_ONCE(state->default_closid);
118 	u32 rmid = READ_ONCE(state->default_rmid);
119 	u32 tmp;
120 
121 	/*
122 	 * If this task has a closid/rmid assigned, use it.
123 	 * Else use the closid/rmid assigned to this cpu.
124 	 */
125 	if (static_branch_likely(&rdt_alloc_enable_key)) {
126 		tmp = READ_ONCE(tsk->closid);
127 		if (tmp)
128 			closid = tmp;
129 	}
130 
131 	if (static_branch_likely(&rdt_mon_enable_key)) {
132 		tmp = READ_ONCE(tsk->rmid);
133 		if (tmp)
134 			rmid = tmp;
135 	}
136 
137 	if (closid != state->cur_closid || rmid != state->cur_rmid) {
138 		state->cur_closid = closid;
139 		state->cur_rmid = rmid;
140 		wrmsr(MSR_IA32_PQR_ASSOC, rmid, closid);
141 	}
142 }
143 
resctrl_arch_round_mon_val(unsigned int val)144 static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
145 {
146 	unsigned int scale = boot_cpu_data.x86_cache_occ_scale;
147 
148 	/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
149 	val /= scale;
150 	return val * scale;
151 }
152 
resctrl_arch_set_cpu_default_closid_rmid(int cpu,u32 closid,u32 rmid)153 static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid,
154 							    u32 rmid)
155 {
156 	WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid);
157 	WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid);
158 }
159 
resctrl_arch_set_closid_rmid(struct task_struct * tsk,u32 closid,u32 rmid)160 static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk,
161 						u32 closid, u32 rmid)
162 {
163 	WRITE_ONCE(tsk->closid, closid);
164 	WRITE_ONCE(tsk->rmid, rmid);
165 }
166 
resctrl_arch_match_closid(struct task_struct * tsk,u32 closid)167 static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid)
168 {
169 	return READ_ONCE(tsk->closid) == closid;
170 }
171 
resctrl_arch_match_rmid(struct task_struct * tsk,u32 ignored,u32 rmid)172 static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored,
173 					   u32 rmid)
174 {
175 	return READ_ONCE(tsk->rmid) == rmid;
176 }
177 
resctrl_sched_in(struct task_struct * tsk)178 static inline void resctrl_sched_in(struct task_struct *tsk)
179 {
180 	if (static_branch_likely(&rdt_enable_key))
181 		__resctrl_sched_in(tsk);
182 }
183 
resctrl_arch_rmid_idx_decode(u32 idx,u32 * closid,u32 * rmid)184 static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid)
185 {
186 	*rmid = idx;
187 	*closid = X86_RESCTRL_EMPTY_CLOSID;
188 }
189 
resctrl_arch_rmid_idx_encode(u32 ignored,u32 rmid)190 static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
191 {
192 	return rmid;
193 }
194 
195 /* x86 can always read an rmid, nothing needs allocating */
196 struct rdt_resource;
resctrl_arch_mon_ctx_alloc(struct rdt_resource * r,int evtid)197 static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid)
198 {
199 	might_sleep();
200 	return NULL;
201 };
202 
resctrl_arch_mon_ctx_free(struct rdt_resource * r,int evtid,void * ctx)203 static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
204 					     void *ctx) { };
205 
206 u64 resctrl_arch_get_prefetch_disable_bits(void);
207 int resctrl_arch_pseudo_lock_fn(void *_plr);
208 int resctrl_arch_measure_cycles_lat_fn(void *_plr);
209 int resctrl_arch_measure_l2_residency(void *_plr);
210 int resctrl_arch_measure_l3_residency(void *_plr);
211 void resctrl_cpu_detect(struct cpuinfo_x86 *c);
212 
213 #else
214 
resctrl_sched_in(struct task_struct * tsk)215 static inline void resctrl_sched_in(struct task_struct *tsk) {}
resctrl_cpu_detect(struct cpuinfo_x86 * c)216 static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
217 
218 #endif /* CONFIG_X86_CPU_RESCTRL */
219 
220 #endif /* _ASM_X86_RESCTRL_H */
221