xref: /kvmtool/arm/aarch64/pmu.c (revision d9fdaad02dfdb0ea079245218058d60270264660)
1 #include <dirent.h>
2 #include <sched.h>
3 
4 #include "linux/cpumask.h"
5 #include "linux/err.h"
6 
7 #include "kvm/fdt.h"
8 #include "kvm/kvm.h"
9 #include "kvm/kvm-cpu.h"
10 #include "kvm/util.h"
11 
12 #include "arm-common/gic.h"
13 
14 #include "asm/pmu.h"
15 
pmu_has_attr(struct kvm_cpu * vcpu,u64 attr)16 static bool pmu_has_attr(struct kvm_cpu *vcpu, u64 attr)
17 {
18 	struct kvm_device_attr pmu_attr = {
19 		.group	= KVM_ARM_VCPU_PMU_V3_CTRL,
20 		.attr	= attr,
21 	};
22 	int ret = ioctl(vcpu->vcpu_fd, KVM_HAS_DEVICE_ATTR, &pmu_attr);
23 
24 	return ret == 0;
25 }
26 
set_pmu_attr(struct kvm_cpu * vcpu,void * addr,u64 attr)27 static void set_pmu_attr(struct kvm_cpu *vcpu, void *addr, u64 attr)
28 {
29 	struct kvm_device_attr pmu_attr = {
30 		.group	= KVM_ARM_VCPU_PMU_V3_CTRL,
31 		.addr	= (u64)addr,
32 		.attr	= attr,
33 	};
34 	int ret;
35 
36 	if (pmu_has_attr(vcpu, attr)) {
37 		ret = ioctl(vcpu->vcpu_fd, KVM_SET_DEVICE_ATTR, &pmu_attr);
38 		if (ret)
39 			die_perror("PMU KVM_SET_DEVICE_ATTR");
40 	} else {
41 		die_perror("PMU KVM_HAS_DEVICE_ATTR");
42 	}
43 }
44 
45 #define SYS_EVENT_SOURCE	"/sys/bus/event_source/devices/"
46 /*
47  * int is 32 bits and INT_MAX translates in decimal to 2 * 10^9.
48  * Make room for newline and NUL.
49  */
50 #define PMU_ID_MAXLEN		12
51 
find_pmu_cpumask(struct kvm * kvm,cpumask_t * cpumask)52 static int find_pmu_cpumask(struct kvm *kvm, cpumask_t *cpumask)
53 {
54 	cpumask_t pmu_cpumask, tmp;
55 	char buf[PMU_ID_MAXLEN];
56 	struct dirent *dirent;
57 	char *cpulist, *path;
58 	int pmu_id = -ENXIO;
59 	unsigned long val;
60 	ssize_t fd_sz;
61 	int fd, ret;
62 	DIR *dir;
63 
64 	memset(buf, 0, sizeof(buf));
65 
66 	cpulist = calloc(1, PAGE_SIZE);
67 	if (!cpulist)
68 		die_perror("calloc");
69 
70 	path = calloc(1, PAGE_SIZE);
71 	if (!path)
72 		die_perror("calloc");
73 
74 	dir = opendir(SYS_EVENT_SOURCE);
75 	if (!dir) {
76 		pmu_id = -errno;
77 		goto out_free;
78 	}
79 
80 	/* Make the compiler happy by copying the NUL terminating byte. */
81 	strncpy(path, SYS_EVENT_SOURCE, strlen(SYS_EVENT_SOURCE) + 1);
82 
83 	while ((dirent = readdir(dir))) {
84 		if (dirent->d_type != DT_LNK)
85 			continue;
86 
87 		strcat(path, dirent->d_name);
88 		strcat(path, "/cpus");
89 		fd = open(path, O_RDONLY);
90 		if (fd < 0)
91 			goto next_dir;
92 
93 		fd_sz = read_file(fd, cpulist, PAGE_SIZE);
94 		if (fd_sz < 0) {
95 			pmu_id = -errno;
96 			goto out_free;
97 		}
98 		close(fd);
99 
100 		ret = cpulist_parse(cpulist, &pmu_cpumask);
101 		if (ret) {
102 			pmu_id = ret;
103 			goto out_free;
104 		}
105 
106 		if (!cpumask_and(&tmp, cpumask, &pmu_cpumask))
107 			goto next_dir;
108 
109 		/*
110 		 * One CPU cannot more than one PMU, hence the set of CPUs which
111 		 * share PMU A and the set of CPUs which share PMU B are
112 		 * disjoint. If the target CPUs and the current PMU have at
113 		 * least one CPU in common, but the target CPUs is not a subset
114 		 * of the current PMU, then a PMU which is associated with all
115 		 * the target CPUs does not exist. Stop searching for a PMU when
116 		 * this happens.
117 		 */
118 		if (!cpumask_subset(cpumask, &pmu_cpumask))
119 			goto out_free;
120 
121 		strcpy(&path[strlen(path) - 4], "type");
122 		fd = open(path, O_RDONLY);
123 		if (fd < 0)
124 			goto next_dir;
125 
126 		fd_sz = read_file(fd, buf, PMU_ID_MAXLEN - 1);
127 		if (fd_sz < 0) {
128 			pmu_id = -errno;
129 			goto out_free;
130 		}
131 		close(fd);
132 
133 		val = strtoul(buf, NULL, 10);
134 		if (val > INT_MAX) {
135 			pmu_id = -EOVERFLOW;
136 			goto out_free;
137 		}
138 		pmu_id = (int)val;
139 		pr_debug("Using PMU %s (id %d)", dirent->d_name, pmu_id);
140 		break;
141 
142 next_dir:
143 		/* Reset path. */
144 		memset(&path[strlen(SYS_EVENT_SOURCE)], '\0',
145 		       strlen(path) - strlen(SYS_EVENT_SOURCE));
146 	}
147 
148 out_free:
149 	free(path);
150 	free(cpulist);
151 	return pmu_id;
152 }
153 
154 /*
155  * In the case of homogeneous systems, there only one hardware PMU, and all
156  * VCPUs will use the same PMU, regardless of the physical CPUs on which the
157  * VCPU threads will be executing.
158  *
159  * For heterogeneous systems, there are 2 ways for the user to ensure that the
160  * VM runs on CPUs that have the same PMU:
161  *
162  * 1. By pinning the entire VM to the desired CPUs, in which case kvmtool will
163  * choose the PMU associated with the CPU on which the main thread is executing
164  * (the thread that calls find_pmu()).
165  *
166  * 2. By setting the affinity mask for the VCPUs with the --vcpu-affinity
167  * command line argument. All CPUs in the affinity mask must have the same PMU,
168  * otherwise kvmtool will not be able to set a PMU.
169  */
find_pmu(struct kvm * kvm)170 static int find_pmu(struct kvm *kvm)
171 {
172 	cpumask_t *cpumask;
173 	int i, this_cpu;
174 
175 	cpumask = calloc(1, cpumask_size());
176 	if (!cpumask)
177 		die_perror("calloc");
178 
179 	if (!kvm->arch.vcpu_affinity_cpuset) {
180 		this_cpu = sched_getcpu();
181 		if (this_cpu < 0)
182 			return -errno;
183 		cpumask_set_cpu(this_cpu, cpumask);
184 	} else {
185 		for (i = 0; i < CPU_SETSIZE; i ++) {
186 			if (CPU_ISSET(i, kvm->arch.vcpu_affinity_cpuset))
187 				cpumask_set_cpu(i, cpumask);
188 		}
189 	}
190 
191 	return find_pmu_cpumask(kvm, cpumask);
192 }
193 
pmu__generate_fdt_nodes(void * fdt,struct kvm * kvm)194 void pmu__generate_fdt_nodes(void *fdt, struct kvm *kvm)
195 {
196 	const char compatible[] = "arm,armv8-pmuv3";
197 	int irq = KVM_ARM_PMUv3_PPI;
198 	struct kvm_cpu *vcpu;
199 	int pmu_id = -ENXIO;
200 	int i;
201 
202 	u32 cpu_mask = gic__get_fdt_irq_cpumask(kvm);
203 	u32 irq_prop[] = {
204 		cpu_to_fdt32(GIC_FDT_IRQ_TYPE_PPI),
205 		cpu_to_fdt32(irq - 16),
206 		cpu_to_fdt32(cpu_mask | IRQ_TYPE_LEVEL_HIGH),
207 	};
208 
209 	if (!kvm->cfg.arch.has_pmuv3)
210 		return;
211 
212 	if (pmu_has_attr(kvm->cpus[0], KVM_ARM_VCPU_PMU_V3_SET_PMU)) {
213 		pmu_id = find_pmu(kvm);
214 		if (pmu_id < 0) {
215 			pr_debug("Failed to find a PMU (errno: %d), "
216 				 "PMU events might not work", -pmu_id);
217 		}
218 	}
219 
220 	for (i = 0; i < kvm->nrcpus; i++) {
221 		vcpu = kvm->cpus[i];
222 		set_pmu_attr(vcpu, &irq, KVM_ARM_VCPU_PMU_V3_IRQ);
223 		/*
224 		 * PMU IDs 0-5 are reserved; a positive value means a PMU was
225 		 * found.
226 		 */
227 		if (pmu_id > 0)
228 			set_pmu_attr(vcpu, &pmu_id, KVM_ARM_VCPU_PMU_V3_SET_PMU);
229 		set_pmu_attr(vcpu, NULL, KVM_ARM_VCPU_PMU_V3_INIT);
230 	}
231 
232 	_FDT(fdt_begin_node(fdt, "pmu"));
233 	_FDT(fdt_property(fdt, "compatible", compatible, sizeof(compatible)));
234 	_FDT(fdt_property(fdt, "interrupts", irq_prop, sizeof(irq_prop)));
235 	_FDT(fdt_end_node(fdt));
236 }
237