xref: /linux/arch/x86/events/amd/iommu.c (revision 74f1af95820fc2ee580a775a3a17c416db30b38c) !
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
4  *
5  * Author: Steven Kinney <Steven.Kinney@amd.com>
6  * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
7  *
8  * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
9  */
10 
11 #define pr_fmt(fmt)	"perf/amd_iommu: " fmt
12 
13 #include <linux/perf_event.h>
14 #include <linux/init.h>
15 #include <linux/cpumask.h>
16 #include <linux/slab.h>
17 #include <linux/amd-iommu.h>
18 
19 #include <asm/msr.h>
20 
21 #include "../perf_event.h"
22 #include "iommu.h"
23 
24 /* iommu pmu conf masks */
25 #define GET_CSOURCE(x)     ((x)->conf & 0xFFULL)
26 #define GET_DEVID(x)       (((x)->conf >> 8)  & 0xFFFFULL)
27 #define GET_DOMID(x)       (((x)->conf >> 24) & 0xFFFFULL)
28 #define GET_PASID(x)       (((x)->conf >> 40) & 0xFFFFFULL)
29 
30 /* iommu pmu conf1 masks */
31 #define GET_DEVID_MASK(x)  ((x)->conf1  & 0xFFFFULL)
32 #define GET_DOMID_MASK(x)  (((x)->conf1 >> 16) & 0xFFFFULL)
33 #define GET_PASID_MASK(x)  (((x)->conf1 >> 32) & 0xFFFFFULL)
34 
35 #define IOMMU_NAME_SIZE 24
36 
37 struct perf_amd_iommu {
38 	struct list_head list;
39 	struct pmu pmu;
40 	struct amd_iommu *iommu;
41 	char name[IOMMU_NAME_SIZE];
42 	u8 max_banks;
43 	u8 max_counters;
44 	u64 cntr_assign_mask;
45 	raw_spinlock_t lock;
46 };
47 
48 static LIST_HEAD(perf_amd_iommu_list);
49 
50 /*---------------------------------------------
51  * sysfs format attributes
52  *---------------------------------------------*/
53 PMU_FORMAT_ATTR(csource,    "config:0-7");
54 PMU_FORMAT_ATTR(devid,      "config:8-23");
55 PMU_FORMAT_ATTR(domid,      "config:24-39");
56 PMU_FORMAT_ATTR(pasid,      "config:40-59");
57 PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
58 PMU_FORMAT_ATTR(domid_mask, "config1:16-31");
59 PMU_FORMAT_ATTR(pasid_mask, "config1:32-51");
60 
61 static struct attribute *iommu_format_attrs[] = {
62 	&format_attr_csource.attr,
63 	&format_attr_devid.attr,
64 	&format_attr_pasid.attr,
65 	&format_attr_domid.attr,
66 	&format_attr_devid_mask.attr,
67 	&format_attr_pasid_mask.attr,
68 	&format_attr_domid_mask.attr,
69 	NULL,
70 };
71 
72 static struct attribute_group amd_iommu_format_group = {
73 	.name = "format",
74 	.attrs = iommu_format_attrs,
75 };
76 
77 /*---------------------------------------------
78  * sysfs events attributes
79  *---------------------------------------------*/
80 static struct attribute_group amd_iommu_events_group = {
81 	.name = "events",
82 };
83 
84 struct amd_iommu_event_desc {
85 	struct device_attribute attr;
86 	const char *event;
87 };
88 
89 static ssize_t _iommu_event_show(struct device *dev,
90 				struct device_attribute *attr, char *buf)
91 {
92 	struct amd_iommu_event_desc *event =
93 		container_of(attr, struct amd_iommu_event_desc, attr);
94 	return sprintf(buf, "%s\n", event->event);
95 }
96 
97 #define AMD_IOMMU_EVENT_DESC(_name, _event)			\
98 {								\
99 	.attr  = __ATTR(_name, 0444, _iommu_event_show, NULL),	\
100 	.event = _event,					\
101 }
102 
103 static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
104 	AMD_IOMMU_EVENT_DESC(mem_pass_untrans,        "csource=0x01"),
105 	AMD_IOMMU_EVENT_DESC(mem_pass_pretrans,       "csource=0x02"),
106 	AMD_IOMMU_EVENT_DESC(mem_pass_excl,           "csource=0x03"),
107 	AMD_IOMMU_EVENT_DESC(mem_target_abort,        "csource=0x04"),
108 	AMD_IOMMU_EVENT_DESC(mem_trans_total,         "csource=0x05"),
109 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit,   "csource=0x06"),
110 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis,   "csource=0x07"),
111 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit,   "csource=0x08"),
112 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis,   "csource=0x09"),
113 	AMD_IOMMU_EVENT_DESC(mem_dte_hit,             "csource=0x0a"),
114 	AMD_IOMMU_EVENT_DESC(mem_dte_mis,             "csource=0x0b"),
115 	AMD_IOMMU_EVENT_DESC(page_tbl_read_tot,       "csource=0x0c"),
116 	AMD_IOMMU_EVENT_DESC(page_tbl_read_nst,       "csource=0x0d"),
117 	AMD_IOMMU_EVENT_DESC(page_tbl_read_gst,       "csource=0x0e"),
118 	AMD_IOMMU_EVENT_DESC(int_dte_hit,             "csource=0x0f"),
119 	AMD_IOMMU_EVENT_DESC(int_dte_mis,             "csource=0x10"),
120 	AMD_IOMMU_EVENT_DESC(cmd_processed,           "csource=0x11"),
121 	AMD_IOMMU_EVENT_DESC(cmd_processed_inv,       "csource=0x12"),
122 	AMD_IOMMU_EVENT_DESC(tlb_inv,                 "csource=0x13"),
123 	AMD_IOMMU_EVENT_DESC(ign_rd_wr_mmio_1ff8h,    "csource=0x14"),
124 	AMD_IOMMU_EVENT_DESC(vapic_int_non_guest,     "csource=0x15"),
125 	AMD_IOMMU_EVENT_DESC(vapic_int_guest,         "csource=0x16"),
126 	AMD_IOMMU_EVENT_DESC(smi_recv,                "csource=0x17"),
127 	AMD_IOMMU_EVENT_DESC(smi_blk,                 "csource=0x18"),
128 	{ /* end: all zeroes */ },
129 };
130 
131 /*---------------------------------------------
132  * sysfs cpumask attributes
133  *---------------------------------------------*/
134 static cpumask_t iommu_cpumask;
135 
136 static ssize_t _iommu_cpumask_show(struct device *dev,
137 				   struct device_attribute *attr,
138 				   char *buf)
139 {
140 	return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask);
141 }
142 static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
143 
144 static struct attribute *iommu_cpumask_attrs[] = {
145 	&dev_attr_cpumask.attr,
146 	NULL,
147 };
148 
149 static struct attribute_group amd_iommu_cpumask_group = {
150 	.attrs = iommu_cpumask_attrs,
151 };
152 
153 /*---------------------------------------------*/
154 
155 static int get_next_avail_iommu_bnk_cntr(struct perf_event *event)
156 {
157 	struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu);
158 	int max_cntrs = piommu->max_counters;
159 	int max_banks = piommu->max_banks;
160 	u32 shift, bank, cntr;
161 	unsigned long flags;
162 	int retval;
163 
164 	raw_spin_lock_irqsave(&piommu->lock, flags);
165 
166 	for (bank = 0; bank < max_banks; bank++) {
167 		for (cntr = 0; cntr < max_cntrs; cntr++) {
168 			shift = bank + (bank*3) + cntr;
169 			if (piommu->cntr_assign_mask & BIT_ULL(shift)) {
170 				continue;
171 			} else {
172 				piommu->cntr_assign_mask |= BIT_ULL(shift);
173 				event->hw.iommu_bank = bank;
174 				event->hw.iommu_cntr = cntr;
175 				retval = 0;
176 				goto out;
177 			}
178 		}
179 	}
180 	retval = -ENOSPC;
181 out:
182 	raw_spin_unlock_irqrestore(&piommu->lock, flags);
183 	return retval;
184 }
185 
186 static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
187 					u8 bank, u8 cntr)
188 {
189 	unsigned long flags;
190 	int max_banks, max_cntrs;
191 	int shift = 0;
192 
193 	max_banks = perf_iommu->max_banks;
194 	max_cntrs = perf_iommu->max_counters;
195 
196 	if ((bank > max_banks) || (cntr > max_cntrs))
197 		return -EINVAL;
198 
199 	shift = bank + cntr + (bank*3);
200 
201 	raw_spin_lock_irqsave(&perf_iommu->lock, flags);
202 	perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
203 	raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
204 
205 	return 0;
206 }
207 
208 static int perf_iommu_event_init(struct perf_event *event)
209 {
210 	struct hw_perf_event *hwc = &event->hw;
211 
212 	/* test the event attr type check for PMU enumeration */
213 	if (event->attr.type != event->pmu->type)
214 		return -ENOENT;
215 
216 	/*
217 	 * IOMMU counters are shared across all cores.
218 	 * Therefore, it does not support per-process mode.
219 	 * Also, it does not support event sampling mode.
220 	 */
221 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
222 		return -EINVAL;
223 
224 	if (event->cpu < 0)
225 		return -EINVAL;
226 
227 	/* update the hw_perf_event struct with the iommu config data */
228 	hwc->conf  = event->attr.config;
229 	hwc->conf1 = event->attr.config1;
230 
231 	return 0;
232 }
233 
234 static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
235 {
236 	return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
237 }
238 
239 static void perf_iommu_enable_event(struct perf_event *ev)
240 {
241 	struct amd_iommu *iommu = perf_event_2_iommu(ev);
242 	struct hw_perf_event *hwc = &ev->hw;
243 	u8 bank = hwc->iommu_bank;
244 	u8 cntr = hwc->iommu_cntr;
245 	u64 reg = 0ULL;
246 
247 	reg = GET_CSOURCE(hwc);
248 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, &reg);
249 
250 	reg = GET_DEVID_MASK(hwc);
251 	reg = GET_DEVID(hwc) | (reg << 32);
252 	if (reg)
253 		reg |= BIT(31);
254 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, &reg);
255 
256 	reg = GET_PASID_MASK(hwc);
257 	reg = GET_PASID(hwc) | (reg << 32);
258 	if (reg)
259 		reg |= BIT(31);
260 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, &reg);
261 
262 	reg = GET_DOMID_MASK(hwc);
263 	reg = GET_DOMID(hwc) | (reg << 32);
264 	if (reg)
265 		reg |= BIT(31);
266 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, &reg);
267 }
268 
269 static void perf_iommu_disable_event(struct perf_event *event)
270 {
271 	struct amd_iommu *iommu = perf_event_2_iommu(event);
272 	struct hw_perf_event *hwc = &event->hw;
273 	u64 reg = 0ULL;
274 
275 	amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
276 			     IOMMU_PC_COUNTER_SRC_REG, &reg);
277 }
278 
279 static void perf_iommu_start(struct perf_event *event, int flags)
280 {
281 	struct hw_perf_event *hwc = &event->hw;
282 
283 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
284 		return;
285 
286 	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
287 	hwc->state = 0;
288 
289 	/*
290 	 * To account for power-gating, which prevents write to
291 	 * the counter, we need to enable the counter
292 	 * before setting up counter register.
293 	 */
294 	perf_iommu_enable_event(event);
295 
296 	if (flags & PERF_EF_RELOAD) {
297 		u64 count = 0;
298 		struct amd_iommu *iommu = perf_event_2_iommu(event);
299 
300 		/*
301 		 * Since the IOMMU PMU only support counting mode,
302 		 * the counter always start with value zero.
303 		 */
304 		amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
305 				     IOMMU_PC_COUNTER_REG, &count);
306 	}
307 
308 	perf_event_update_userpage(event);
309 }
310 
311 static void perf_iommu_read(struct perf_event *event)
312 {
313 	u64 count;
314 	struct hw_perf_event *hwc = &event->hw;
315 	struct amd_iommu *iommu = perf_event_2_iommu(event);
316 
317 	if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
318 				 IOMMU_PC_COUNTER_REG, &count))
319 		return;
320 
321 	/* IOMMU pc counter register is only 48 bits */
322 	count &= GENMASK_ULL(47, 0);
323 
324 	/*
325 	 * Since the counter always start with value zero,
326 	 * simply just accumulate the count for the event.
327 	 */
328 	local64_add(count, &event->count);
329 }
330 
331 static void perf_iommu_stop(struct perf_event *event, int flags)
332 {
333 	struct hw_perf_event *hwc = &event->hw;
334 
335 	if (hwc->state & PERF_HES_UPTODATE)
336 		return;
337 
338 	/*
339 	 * To account for power-gating, in which reading the counter would
340 	 * return zero, we need to read the register before disabling.
341 	 */
342 	perf_iommu_read(event);
343 	hwc->state |= PERF_HES_UPTODATE;
344 
345 	perf_iommu_disable_event(event);
346 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
347 	hwc->state |= PERF_HES_STOPPED;
348 }
349 
350 static int perf_iommu_add(struct perf_event *event, int flags)
351 {
352 	int retval;
353 
354 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
355 
356 	/* request an iommu bank/counter */
357 	retval = get_next_avail_iommu_bnk_cntr(event);
358 	if (retval)
359 		return retval;
360 
361 	if (flags & PERF_EF_START)
362 		perf_iommu_start(event, PERF_EF_RELOAD);
363 
364 	return 0;
365 }
366 
367 static void perf_iommu_del(struct perf_event *event, int flags)
368 {
369 	struct hw_perf_event *hwc = &event->hw;
370 	struct perf_amd_iommu *perf_iommu =
371 			container_of(event->pmu, struct perf_amd_iommu, pmu);
372 
373 	perf_iommu_stop(event, PERF_EF_UPDATE);
374 
375 	/* clear the assigned iommu bank/counter */
376 	clear_avail_iommu_bnk_cntr(perf_iommu,
377 				   hwc->iommu_bank, hwc->iommu_cntr);
378 
379 	perf_event_update_userpage(event);
380 }
381 
382 static __init int _init_events_attrs(void)
383 {
384 	int i = 0, j;
385 	struct attribute **attrs;
386 
387 	while (amd_iommu_v2_event_descs[i].attr.attr.name)
388 		i++;
389 
390 	attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
391 	if (!attrs)
392 		return -ENOMEM;
393 
394 	for (j = 0; j < i; j++)
395 		attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
396 
397 	amd_iommu_events_group.attrs = attrs;
398 	return 0;
399 }
400 
401 static const struct attribute_group *amd_iommu_attr_groups[] = {
402 	&amd_iommu_format_group,
403 	&amd_iommu_cpumask_group,
404 	&amd_iommu_events_group,
405 	NULL,
406 };
407 
408 static const struct pmu iommu_pmu __initconst = {
409 	.event_init	= perf_iommu_event_init,
410 	.add		= perf_iommu_add,
411 	.del		= perf_iommu_del,
412 	.start		= perf_iommu_start,
413 	.stop		= perf_iommu_stop,
414 	.read		= perf_iommu_read,
415 	.task_ctx_nr	= perf_invalid_context,
416 	.attr_groups	= amd_iommu_attr_groups,
417 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
418 };
419 
420 static __init int init_one_iommu(unsigned int idx)
421 {
422 	struct perf_amd_iommu *perf_iommu;
423 	int ret;
424 
425 	perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
426 	if (!perf_iommu)
427 		return -ENOMEM;
428 
429 	raw_spin_lock_init(&perf_iommu->lock);
430 
431 	perf_iommu->pmu          = iommu_pmu;
432 	perf_iommu->iommu        = get_amd_iommu(idx);
433 	perf_iommu->max_banks    = amd_iommu_pc_get_max_banks(idx);
434 	perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
435 
436 	if (!perf_iommu->iommu ||
437 	    !perf_iommu->max_banks ||
438 	    !perf_iommu->max_counters) {
439 		kfree(perf_iommu);
440 		return -EINVAL;
441 	}
442 
443 	snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
444 
445 	ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
446 	if (!ret) {
447 		pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
448 			idx, perf_iommu->max_banks, perf_iommu->max_counters);
449 		list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
450 	} else {
451 		pr_warn("Error initializing IOMMU %d.\n", idx);
452 		kfree(perf_iommu);
453 	}
454 	return ret;
455 }
456 
457 static __init int amd_iommu_pc_init(void)
458 {
459 	unsigned int i, cnt = 0;
460 	int ret;
461 
462 	/* Make sure the IOMMU PC resource is available */
463 	if (!amd_iommu_pc_supported())
464 		return -ENODEV;
465 
466 	ret = _init_events_attrs();
467 	if (ret)
468 		return ret;
469 
470 	/*
471 	 * An IOMMU PMU is specific to an IOMMU, and can function independently.
472 	 * So we go through all IOMMUs and ignore the one that fails init
473 	 * unless all IOMMU are failing.
474 	 */
475 	for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
476 		ret = init_one_iommu(i);
477 		if (!ret)
478 			cnt++;
479 	}
480 
481 	if (!cnt) {
482 		kfree(amd_iommu_events_group.attrs);
483 		return -ENODEV;
484 	}
485 
486 	/* Init cpumask attributes to only core 0 */
487 	cpumask_set_cpu(0, &iommu_cpumask);
488 	return 0;
489 }
490 
491 device_initcall(amd_iommu_pc_init);
492