xref: /src/sys/sys/pmc.h (revision cb9e24221672a7f77c858518c292c1eac09b3740)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2008, Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #ifndef _SYS_PMC_H_
34 #define	_SYS_PMC_H_
35 
36 #include <dev/hwpmc/pmc_events.h>
37 #include <sys/proc.h>
38 #include <sys/counter.h>
39 #include <machine/pmc_mdep.h>
40 #include <machine/profile.h>
41 #ifdef _KERNEL
42 #include <sys/epoch.h>
43 #include <ck_queue.h>
44 #endif
45 
46 #define	PMC_MODULE_NAME		"hwpmc"
47 #define	PMC_NAME_MAX		64 /* HW counter name size */
48 #define	PMC_CLASS_MAX		8  /* max #classes of PMCs per-system */
49 
50 /*
51  * Kernel<->userland API version number [MMmmpppp]
52  *
53  * Major numbers are to be incremented when an incompatible change to
54  * the ABI occurs that older clients will not be able to handle.
55  *
56  * Minor numbers are incremented when a backwards compatible change
57  * occurs that allows older correct programs to run unchanged.  For
58  * example, when support for a new PMC type is added.
59  *
60  * The patch version is incremented for every bug fix.
61  */
62 #define	PMC_VERSION_MAJOR	0x0A
63 #define	PMC_VERSION_MINOR	0x01
64 #define	PMC_VERSION_PATCH	0x0000
65 
66 #define	PMC_VERSION		(PMC_VERSION_MAJOR << 24 |		\
67 	PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH)
68 
69 #define PMC_CPUID_LEN 64
70 /* cpu model name for pmu lookup */
71 extern char pmc_cpuid[PMC_CPUID_LEN];
72 
73 /*
74  * Kinds of CPUs known.
75  *
76  * We keep track of CPU variants that need to be distinguished in
77  * some way for PMC operations.  CPU names are grouped by manufacturer
78  * and numbered sparsely in order to minimize changes to the ABI involved
79  * when new CPUs are added.
80  *
81  * Please keep the pmc(3) manual page in sync with this list.
82  */
83 #define	__PMC_CPUS()								\
84     __PMC_CPU(AMD_K8,			0x01,	"AMD K8")			\
85     __PMC_CPU(INTEL_CORE,		0x87,	"Intel Core Solo/Duo")		\
86     __PMC_CPU(INTEL_CORE2,		0x88,	"Intel Core2")			\
87     __PMC_CPU(INTEL_CORE2EXTREME,	0x89,	"Intel Core2 Extreme")		\
88     __PMC_CPU(INTEL_ATOM,		0x8A,	"Intel Atom")			\
89     __PMC_CPU(INTEL_COREI7,		0x8B,	"Intel Core i7")		\
90     __PMC_CPU(INTEL_WESTMERE,		0x8C,	"Intel Westmere")		\
91     __PMC_CPU(INTEL_SANDYBRIDGE,	0x8D,	"Intel Sandy Bridge")		\
92     __PMC_CPU(INTEL_IVYBRIDGE,		0x8E,	"Intel Ivy Bridge")		\
93     __PMC_CPU(INTEL_SANDYBRIDGE_XEON,	0x8F,	"Intel Sandy Bridge Xeon")	\
94     __PMC_CPU(INTEL_IVYBRIDGE_XEON,	0x90,	"Intel Ivy Bridge Xeon")	\
95     __PMC_CPU(INTEL_HASWELL,		0x91,	"Intel Haswell")		\
96     __PMC_CPU(INTEL_ATOM_SILVERMONT,	0x92,	"Intel Atom Silvermont")	\
97     __PMC_CPU(INTEL_NEHALEM_EX,		0x93,	"Intel Nehalem Xeon 7500")	\
98     __PMC_CPU(INTEL_WESTMERE_EX,	0x94,	"Intel Westmere Xeon E7")	\
99     __PMC_CPU(INTEL_HASWELL_XEON,	0x95,	"Intel Haswell Xeon E5 v3")	\
100     __PMC_CPU(INTEL_BROADWELL,		0x96,	"Intel Broadwell")		\
101     __PMC_CPU(INTEL_BROADWELL_XEON,	0x97,	"Intel Broadwell Xeon")		\
102     __PMC_CPU(INTEL_SKYLAKE,		0x98,	"Intel Skylake")		\
103     __PMC_CPU(INTEL_SKYLAKE_XEON,	0x99,	"Intel Skylake Xeon")		\
104     __PMC_CPU(INTEL_ATOM_GOLDMONT,	0x9A,	"Intel Atom Goldmont")		\
105     __PMC_CPU(INTEL_ICELAKE,		0x9B,	"Intel Icelake")		\
106     __PMC_CPU(INTEL_ICELAKE_XEON,	0x9C,	"Intel Icelake Xeon")		\
107     __PMC_CPU(INTEL_ALDERLAKE,		0x9D,	"Intel Alderlake")		\
108     __PMC_CPU(INTEL_ATOM_GOLDMONT_P,	0x9E,	"Intel Atom Goldmont Plus")	\
109     __PMC_CPU(INTEL_ATOM_TREMONT,	0x9F,	"Intel Atom Tremont")		\
110     __PMC_CPU(INTEL_EMERALD_RAPIDS,	0xA0,	"Intel Emerald Rapids")		\
111     __PMC_CPU(INTEL_ALDERLAKEN,		0xA1,	"Intel AlderlakeN")		\
112     __PMC_CPU(INTEL_GRANITE_RAPIDS,	0xA2,	"Intel Granite Rapids")		\
113     __PMC_CPU(INTEL_METEOR_LAKE,	0xA3,	"Intel Meteorlake")		\
114     __PMC_CPU(PPC_7450,			0x300,	"PowerPC MPC7450")		\
115     __PMC_CPU(PPC_E500,			0x340,	"PowerPC e500 Core")		\
116     __PMC_CPU(PPC_970,			0x380,	"IBM PowerPC 970")		\
117     __PMC_CPU(PPC_POWER8,		0x390,	"IBM POWER8")			\
118     __PMC_CPU(GENERIC,			0x400,	"Generic")			\
119     __PMC_CPU(ARMV7_CORTEX_A5,		0x500,	"ARMv7 Cortex A5")		\
120     __PMC_CPU(ARMV7_CORTEX_A7,		0x501,	"ARMv7 Cortex A7")		\
121     __PMC_CPU(ARMV7_CORTEX_A8,		0x502,	"ARMv7 Cortex A8")		\
122     __PMC_CPU(ARMV7_CORTEX_A9,		0x503,	"ARMv7 Cortex A9")		\
123     __PMC_CPU(ARMV7_CORTEX_A15,		0x504,	"ARMv7 Cortex A15")		\
124     __PMC_CPU(ARMV7_CORTEX_A17,		0x505,	"ARMv7 Cortex A17")		\
125     __PMC_CPU(ARMV8_CORTEX_A53,		0x600,	"ARMv8 Cortex A53")		\
126     __PMC_CPU(ARMV8_CORTEX_A57,		0x601,	"ARMv8 Cortex A57")		\
127     __PMC_CPU(ARMV8_CORTEX_A76,		0x602,	"ARMv8 Cortex A76")
128 
129 enum pmc_cputype {
130 #undef	__PMC_CPU
131 #define	__PMC_CPU(S,V,D)	PMC_CPU_##S = V,
132 	__PMC_CPUS()
133 };
134 
135 #define	PMC_CPU_FIRST	PMC_CPU_AMD_K8
136 #define	PMC_CPU_LAST	PMC_CPU_ARMV8_CORTEX_A76
137 
138 /*
139  * Classes of PMCs
140  */
141 #define	__PMC_CLASSES()								\
142     __PMC_CLASS(TSC,		0x00,	"CPU Timestamp counter")		\
143     __PMC_CLASS(K8,		0x02,	"AMD K8 performance counters")		\
144     __PMC_CLASS(IBS,		0x03,	"AMD IBS performance counters")		\
145     __PMC_CLASS(IAF,		0x06,	"Intel Core2/Atom, fixed function")	\
146     __PMC_CLASS(IAP,		0x07,	"Intel Core...Atom, programmable")	\
147     __PMC_CLASS(UCF,		0x08,	"Intel Uncore fixed function")		\
148     __PMC_CLASS(UCP,		0x09,	"Intel Uncore programmable")		\
149     __PMC_CLASS(PPC7450,	0x0D,	"Motorola MPC7450 class")		\
150     __PMC_CLASS(PPC970,		0x0E,	"IBM PowerPC 970 class")		\
151     __PMC_CLASS(SOFT,		0x0F,	"Software events")			\
152     __PMC_CLASS(ARMV7,		0x10,	"ARMv7")				\
153     __PMC_CLASS(ARMV8,		0x11,	"ARMv8")				\
154     __PMC_CLASS(E500,		0x13,	"Freescale e500 class")			\
155     __PMC_CLASS(POWER8,		0x15,	"IBM POWER8 class")			\
156     __PMC_CLASS(DMC620_PMU_CD2,	0x16,	"ARM DMC620 Memory Controller PMU CLKDIV2") \
157     __PMC_CLASS(DMC620_PMU_C,	0x17,	"ARM DMC620 Memory Controller PMU CLK")	\
158     __PMC_CLASS(CMN600_PMU,	0x18,	"Arm CoreLink CMN600 Coherent Mesh Network PMU")
159 
160 enum pmc_class {
161 #undef  __PMC_CLASS
162 #define	__PMC_CLASS(S,V,D)	PMC_CLASS_##S = V,
163 	__PMC_CLASSES()
164 };
165 
166 #define	PMC_CLASS_FIRST	PMC_CLASS_TSC
167 #define	PMC_CLASS_LAST	PMC_CLASS_CMN600_PMU
168 
169 /*
170  * A PMC can be in the following states:
171  *
172  * Hardware states:
173  *   DISABLED   -- administratively prohibited from being used.
174  *   FREE       -- HW available for use
175  * Software states:
176  *   ALLOCATED  -- allocated
177  *   STOPPED    -- allocated, but not counting events
178  *   RUNNING    -- allocated, and in operation; 'pm_runcount'
179  *                 holds the number of CPUs using this PMC at
180  *                 a given instant
181  *   DELETED    -- being destroyed
182  */
183 
184 #define	__PMC_HWSTATES()			\
185 	__PMC_STATE(DISABLED)			\
186 	__PMC_STATE(FREE)
187 
188 #define	__PMC_SWSTATES()			\
189 	__PMC_STATE(ALLOCATED)			\
190 	__PMC_STATE(STOPPED)			\
191 	__PMC_STATE(RUNNING)			\
192 	__PMC_STATE(DELETED)
193 
194 #define	__PMC_STATES()				\
195 	__PMC_HWSTATES()			\
196 	__PMC_SWSTATES()
197 
198 enum pmc_state {
199 #undef	__PMC_STATE
200 #define	__PMC_STATE(S)	PMC_STATE_##S,
201 	__PMC_STATES()
202 	__PMC_STATE(MAX)
203 };
204 
205 #define	PMC_STATE_FIRST	PMC_STATE_DISABLED
206 #define	PMC_STATE_LAST	PMC_STATE_DELETED
207 
208 /*
209  * An allocated PMC may used as a 'global' counter or as a
210  * 'thread-private' one.  Each such mode of use can be in either
211  * statistical sampling mode or in counting mode.  Thus a PMC in use
212  *
213  * SS i.e., SYSTEM STATISTICAL  -- system-wide statistical profiling
214  * SC i.e., SYSTEM COUNTER      -- system-wide counting mode
215  * TS i.e., THREAD STATISTICAL  -- thread virtual, statistical profiling
216  * TC i.e., THREAD COUNTER      -- thread virtual, counting mode
217  *
218  * Statistical profiling modes rely on the PMC periodically delivering
219  * a interrupt to the CPU (when the configured number of events have
220  * been measured), so the PMC must have the ability to generate
221  * interrupts.
222  *
223  * In counting modes, the PMC counts its configured events, with the
224  * value of the PMC being read whenever needed by its owner process.
225  *
226  * The thread specific modes "virtualize" the PMCs -- the PMCs appear
227  * to be thread private and count events only when the profiled thread
228  * actually executes on the CPU.
229  *
230  * The system-wide "global" modes keep the PMCs running all the time
231  * and are used to measure the behaviour of the whole system.
232  */
233 
234 #define	__PMC_MODES()				\
235 	__PMC_MODE(SS,	0)			\
236 	__PMC_MODE(SC,	1)			\
237 	__PMC_MODE(TS,	2)			\
238 	__PMC_MODE(TC,	3)
239 
240 enum pmc_mode {
241 #undef	__PMC_MODE
242 #define	__PMC_MODE(M,N)	PMC_MODE_##M = N,
243 	__PMC_MODES()
244 };
245 
246 #define	PMC_MODE_FIRST	PMC_MODE_SS
247 #define	PMC_MODE_LAST	PMC_MODE_TC
248 
249 #define	PMC_IS_COUNTING_MODE(mode)				\
250 	((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC)
251 #define	PMC_IS_SYSTEM_MODE(mode)				\
252 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC)
253 #define	PMC_IS_SAMPLING_MODE(mode)				\
254 	((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS)
255 #define	PMC_IS_VIRTUAL_MODE(mode)				\
256 	((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC)
257 
258 /*
259  * PMC row disposition
260  */
261 
262 #define	__PMC_DISPOSITIONS(N)					\
263 	__PMC_DISP(STANDALONE)	/* global/disabled counters */	\
264 	__PMC_DISP(FREE)	/* free/available */		\
265 	__PMC_DISP(THREAD)	/* thread-virtual PMCs */	\
266 	__PMC_DISP(UNKNOWN)	/* sentinel */
267 
268 enum pmc_disp {
269 #undef	__PMC_DISP
270 #define	__PMC_DISP(D)	PMC_DISP_##D ,
271 	__PMC_DISPOSITIONS()
272 };
273 
274 #define	PMC_DISP_FIRST	PMC_DISP_STANDALONE
275 #define	PMC_DISP_LAST	PMC_DISP_THREAD
276 
277 /*
278  * Counter capabilities
279  *
280  * __PMC_CAPS(NAME, VALUE, DESCRIPTION)
281  */
282 
283 #define	__PMC_CAPS()							\
284 	__PMC_CAP(INTERRUPT,	0, "generate interrupts")		\
285 	__PMC_CAP(USER,		1, "count user-mode events")		\
286 	__PMC_CAP(SYSTEM,	2, "count system-mode events")		\
287 	__PMC_CAP(EDGE,		3, "do edge detection of events")	\
288 	__PMC_CAP(THRESHOLD,	4, "ignore events below a threshold")	\
289 	__PMC_CAP(READ,		5, "read PMC counter")			\
290 	__PMC_CAP(WRITE,	6, "reprogram PMC counter")		\
291 	__PMC_CAP(INVERT,	7, "invert comparison sense")		\
292 	__PMC_CAP(QUALIFIER,	8, "further qualify monitored events")	\
293 	__PMC_CAP(PRECISE,	9, "perform precise sampling")		\
294 	__PMC_CAP(TAGGING,	10, "tag upstream events")		\
295 	__PMC_CAP(CASCADE,	11, "cascade counters")			\
296 	__PMC_CAP(SYSWIDE,	12, "system wide counter")		\
297 	__PMC_CAP(DOMWIDE,	13, "NUMA domain wide counter")
298 
299 enum pmc_caps
300 {
301 #undef	__PMC_CAP
302 #define	__PMC_CAP(NAME, VALUE, DESCR)	PMC_CAP_##NAME = (1 << VALUE) ,
303 	__PMC_CAPS()
304 };
305 
306 #define	PMC_CAP_FIRST		PMC_CAP_INTERRUPT
307 #define	PMC_CAP_LAST		PMC_CAP_DOMWIDE
308 
309 /*
310  * PMC Event Numbers
311  *
312  * These are generated from the definitions in "dev/hwpmc/pmc_events.h".
313  */
314 
315 enum pmc_event {
316 #undef	__PMC_EV
317 #undef	__PMC_EV_BLOCK
318 #define	__PMC_EV_BLOCK(C,V)	PMC_EV_ ## C ## __BLOCK_START = (V) - 1 ,
319 #define	__PMC_EV(C,N)		PMC_EV_ ## C ## _ ## N ,
320 	__PMC_EVENTS()
321 };
322 
323 /*
324  * PMC SYSCALL INTERFACE
325  */
326 
327 /*
328  * "PMC_OPS" -- these are the commands recognized by the kernel
329  * module, and are used when performing a system call from userland.
330  */
331 #define	__PMC_OPS()							\
332 	__PMC_OP(CONFIGURELOG, "Set log file")				\
333 	__PMC_OP(FLUSHLOG, "Flush log file")				\
334 	__PMC_OP(GETCPUINFO, "Get system CPU information")		\
335 	__PMC_OP(GETDRIVERSTATS, "Get driver statistics")		\
336 	__PMC_OP(GETMODULEVERSION, "Get module version")		\
337 	__PMC_OP(GETPMCINFO, "Get per-cpu PMC information")		\
338 	__PMC_OP(PMCADMIN, "Set PMC state")				\
339 	__PMC_OP(PMCALLOCATE, "Allocate and configure a PMC")		\
340 	__PMC_OP(PMCATTACH, "Attach a PMC to a process")		\
341 	__PMC_OP(PMCDETACH, "Detach a PMC from a process")		\
342 	__PMC_OP(PMCGETMSR, "Get a PMC's hardware address")		\
343 	__PMC_OP(PMCRELEASE, "Release a PMC")				\
344 	__PMC_OP(PMCRW, "Read/Set a PMC")				\
345 	__PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate")	\
346 	__PMC_OP(PMCSTART, "Start a PMC")				\
347 	__PMC_OP(PMCSTOP, "Stop a PMC")					\
348 	__PMC_OP(WRITELOG, "Write a cookie to the log file")		\
349 	__PMC_OP(CLOSELOG, "Close log file")				\
350 	__PMC_OP(GETDYNEVENTINFO, "Get dynamic events list")		\
351 	__PMC_OP(GETCAPS, "Get capabilities")
352 
353 enum pmc_ops {
354 #undef	__PMC_OP
355 #define	__PMC_OP(N, D)	PMC_OP_##N,
356 	__PMC_OPS()
357 };
358 
359 /*
360  * Flags used in operations on PMCs.
361  */
362 
363 #define	PMC_F_UNUSED1		0x00000001 /* unused */
364 #define	PMC_F_DESCENDANTS	0x00000002 /*OP ALLOCATE track descendants */
365 #define	PMC_F_LOG_PROCCSW	0x00000004 /*OP ALLOCATE track ctx switches */
366 #define	PMC_F_LOG_PROCEXIT	0x00000008 /*OP ALLOCATE log proc exits */
367 #define	PMC_F_NEWVALUE		0x00000010 /*OP RW write new value */
368 #define	PMC_F_OLDVALUE		0x00000020 /*OP RW get old value */
369 
370 /* V2 API */
371 #define	PMC_F_CALLCHAIN		0x00000080 /*OP ALLOCATE capture callchains */
372 #define	PMC_F_USERCALLCHAIN	0x00000100 /*OP ALLOCATE use userspace stack */
373 
374 /* V10 API */
375 #define	PMC_F_EV_PMU		0x00000200 /*
376 					    * OP ALLOCATE: pm_ev has special
377 					    * userspace meaning; counter
378 					    * configuration is communicated
379 					    * through class-dependent fields
380 					    */
381 
382 /* internal flags */
383 #define	PMC_F_ATTACHED_TO_OWNER	0x00010000 /*attached to owner*/
384 #define	PMC_F_NEEDS_LOGFILE	0x00020000 /*needs log file */
385 #define	PMC_F_ATTACH_DONE	0x00040000 /*attached at least once */
386 
387 #define	PMC_CALLCHAIN_DEPTH_MAX	512
388 
389 #define	PMC_CC_F_USERSPACE	0x01	   /*userspace callchain*/
390 #define	PMC_CC_F_MULTIPART	0x02	   /*multipart data*/
391 
392 /*
393  * Cookies used to denote allocated PMCs, and the values of PMCs.
394  */
395 
396 typedef uint32_t	pmc_id_t;
397 typedef uint64_t	pmc_value_t;
398 
399 #define	PMC_ID_INVALID		(~ (pmc_id_t) 0)
400 
401 /*
402  * PMC IDs have the following format:
403  *
404  * +-----------------------+-------+-----------+
405  * |   CPU      | PMC MODE | CLASS | ROW INDEX |
406  * +-----------------------+-------+-----------+
407  *
408  * where CPU is 12 bits, MODE 4, CLASS 8, and ROW INDEX 8  Field 'CPU'
409  * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for
410  * process-mode PMCs.  Field 'PMC MODE' is the allocated PMC mode.
411  * Field 'PMC CLASS' is the class of the PMC.  Field 'ROW INDEX' is the
412  * row index for the PMC.
413  *
414  * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total
415  * number of hardware PMCs on this cpu.
416  */
417 
418 #define	PMC_ID_TO_ROWINDEX(ID)	((ID) & 0xFF)
419 #define	PMC_ID_TO_CLASS(ID)	(((ID) & 0xFF00) >> 8)
420 #define	PMC_ID_TO_MODE(ID)	(((ID) & 0xF0000) >> 16)
421 #define	PMC_ID_TO_CPU(ID)	(((ID) & 0xFFF00000) >> 20)
422 #define	PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX)			\
423 	((((CPU) & 0xFFF) << 20) | (((MODE) & 0xF) << 16) |	\
424 	(((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF))
425 
426 /*
427  * Data structures for system calls supported by the pmc driver.
428  */
429 
430 /*
431  * OP PMCALLOCATE
432  *
433  * Allocate a PMC on the named CPU.
434  */
435 
436 #define	PMC_CPU_ANY	~0
437 
438 struct pmc_op_pmcallocate {
439 	uint32_t	pm_caps;	/* PMC_CAP_* */
440 	uint32_t	pm_cpu;		/* CPU number or PMC_CPU_ANY */
441 	enum pmc_class	pm_class;	/* class of PMC desired */
442 	enum pmc_event	pm_ev;		/* [enum pmc_event] desired */
443 	uint32_t	pm_flags;	/* additional modifiers PMC_F_* */
444 	enum pmc_mode	pm_mode;	/* desired mode */
445 	pmc_id_t	pm_pmcid;	/* [return] process pmc id */
446 	pmc_value_t	pm_count;	/* initial/sample count */
447 
448 	union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */
449 };
450 
451 /*
452  * OP PMCADMIN
453  *
454  * Set the administrative state (i.e., whether enabled or disabled) of
455  * a PMC 'pm_pmc' on CPU 'pm_cpu'.  Note that 'pm_pmc' specifies an
456  * absolute PMC number and need not have been first allocated by the
457  * calling process.
458  */
459 
460 struct pmc_op_pmcadmin {
461 	int		pm_cpu;		/* CPU# */
462 	uint32_t	pm_flags;	/* flags */
463 	int		pm_pmc;         /* PMC# */
464 	enum pmc_state  pm_state;	/* desired state */
465 };
466 
467 /*
468  * OP PMCATTACH / OP PMCDETACH
469  *
470  * Attach/detach a PMC and a process.
471  */
472 
473 struct pmc_op_pmcattach {
474 	pmc_id_t	pm_pmc;		/* PMC to attach to */
475 	pid_t		pm_pid;		/* target process */
476 };
477 
478 /*
479  * OP PMCSETCOUNT
480  *
481  * Set the sampling rate (i.e., the reload count) for statistical counters.
482  * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE.
483  */
484 
485 struct pmc_op_pmcsetcount {
486 	pmc_value_t	pm_count;	/* initial/sample count */
487 	pmc_id_t	pm_pmcid;	/* PMC id to set */
488 };
489 
490 /*
491  * OP PMCRW
492  *
493  * Read the value of a PMC named by 'pm_pmcid'.  'pm_pmcid' needs
494  * to have been previously allocated using PMCALLOCATE.
495  */
496 
497 struct pmc_op_pmcrw {
498 	uint32_t	pm_flags;	/* PMC_F_{OLD,NEW}VALUE*/
499 	pmc_id_t	pm_pmcid;	/* pmc id */
500 	pmc_value_t	pm_value;	/* new&returned value */
501 };
502 
503 /*
504  * OP GETPMCINFO
505  *
506  * retrieve PMC state for a named CPU.  The caller is expected to
507  * allocate 'npmc' * 'struct pmc_info' bytes of space for the return
508  * values.
509  */
510 
511 struct pmc_info {
512 	char		pm_name[PMC_NAME_MAX]; /* pmc name */
513 	enum pmc_class	pm_class;	/* enum pmc_class */
514 	int		pm_enabled;	/* whether enabled */
515 	enum pmc_disp	pm_rowdisp;	/* FREE, THREAD or STANDLONE */
516 	pid_t		pm_ownerpid;	/* owner, or -1 */
517 	enum pmc_mode	pm_mode;	/* current mode [enum pmc_mode] */
518 	enum pmc_event	pm_event;	/* current event */
519 	uint32_t	pm_flags;	/* current flags */
520 	pmc_value_t	pm_reloadcount;	/* sampling counters only */
521 };
522 
523 struct pmc_op_getpmcinfo {
524 	int32_t		pm_cpu;		/* 0 <= cpu < mp_maxid */
525 	struct pmc_info	pm_pmcs[];	/* space for 'npmc' structures */
526 };
527 
528 /*
529  * OP GETCPUINFO
530  *
531  * Retrieve system CPU information.
532  */
533 
534 struct pmc_classinfo {
535 	enum pmc_class	pm_class;	/* class id */
536 	uint32_t	pm_caps;	/* counter capabilities */
537 	uint32_t	pm_width;	/* width of the PMC */
538 	uint32_t	pm_num;		/* number of PMCs in class */
539 };
540 
541 struct pmc_op_getcpuinfo {
542 	enum pmc_cputype pm_cputype; /* what kind of CPU */
543 	uint32_t	pm_ncpu;    /* max CPU number */
544 	uint32_t	pm_npmc;    /* #PMCs per CPU */
545 	uint32_t	pm_nclass;  /* #classes of PMCs */
546 	struct pmc_classinfo  pm_classes[PMC_CLASS_MAX];
547 };
548 
549 /*
550  * OP CONFIGURELOG
551  *
552  * Configure a log file for writing system-wide statistics to.
553  */
554 
555 struct pmc_op_configurelog {
556 	int		pm_flags;
557 	int		pm_logfd;   /* logfile fd (or -1) */
558 };
559 
560 /*
561  * OP GETDRIVERSTATS
562  *
563  * Retrieve pmc(4) driver-wide statistics.
564  */
565 #ifdef _KERNEL
566 struct pmc_driverstats {
567 	counter_u64_t	pm_intr_ignored;	/* #interrupts ignored */
568 	counter_u64_t	pm_intr_processed;	/* #interrupts processed */
569 	counter_u64_t	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
570 	counter_u64_t	pm_syscalls;		/* #syscalls */
571 	counter_u64_t	pm_syscall_errors;	/* #syscalls with errors */
572 	counter_u64_t	pm_buffer_requests;	/* #buffer requests */
573 	counter_u64_t	pm_buffer_requests_failed; /* #failed buffer requests */
574 	counter_u64_t	pm_log_sweeps;		/* #sample buffer processing
575 						   passes */
576 	counter_u64_t	pm_merges;		/* merged k+u */
577 	counter_u64_t	pm_overwrites;		/* UR overwrites */
578 };
579 #endif
580 
581 struct pmc_op_getdriverstats {
582 	unsigned int	pm_intr_ignored;	/* #interrupts ignored */
583 	unsigned int	pm_intr_processed;	/* #interrupts processed */
584 	unsigned int	pm_intr_bufferfull;	/* #interrupts with ENOSPC */
585 	unsigned int	pm_syscalls;		/* #syscalls */
586 	unsigned int	pm_syscall_errors;	/* #syscalls with errors */
587 	unsigned int	pm_buffer_requests;	/* #buffer requests */
588 	unsigned int	pm_buffer_requests_failed; /* #failed buffer requests */
589 	unsigned int	pm_log_sweeps;		/* #sample buffer processing
590 						   passes */
591 };
592 
593 /*
594  * OP RELEASE / OP START / OP STOP
595  *
596  * Simple operations on a PMC id.
597  */
598 
599 struct pmc_op_simple {
600 	pmc_id_t	pm_pmcid;
601 };
602 
603 /*
604  * OP WRITELOG
605  *
606  * Flush the current log buffer and write 4 bytes of user data to it.
607  */
608 
609 struct pmc_op_writelog {
610 	uint32_t	pm_userdata;
611 };
612 
613 /*
614  * OP GETMSR
615  *
616  * Retrieve the machine specific address associated with the allocated
617  * PMC.  This number can be used subsequently with a read-performance-counter
618  * instruction.
619  */
620 
621 struct pmc_op_getmsr {
622 	uint32_t	pm_msr;		/* machine specific address */
623 	pmc_id_t	pm_pmcid;	/* allocated pmc id */
624 };
625 
626 /*
627  * OP GETDYNEVENTINFO
628  *
629  * Retrieve a PMC dynamic class events list.
630  */
631 
632 struct pmc_dyn_event_descr {
633 	char		pm_ev_name[PMC_NAME_MAX];
634 	enum pmc_event	pm_ev_code;
635 };
636 
637 struct pmc_op_getdyneventinfo {
638 	enum pmc_class			pm_class;
639 	unsigned int			pm_nevent;
640 	struct pmc_dyn_event_descr	pm_events[PMC_EV_DYN_COUNT];
641 };
642 
643 /*
644  * OP GETCAPS
645  *
646  * Retrieve the PMC capabilties flags for this type of counter.
647  */
648 
649 struct pmc_op_caps {
650 	pmc_id_t	pm_pmcid;	/* allocated pmc id */
651 	uint32_t	pm_caps;	/* capabilities */
652 };
653 
654 #ifdef _KERNEL
655 
656 #include <sys/malloc.h>
657 #include <sys/sysctl.h>
658 #include <sys/_cpuset.h>
659 
660 #include <machine/frame.h>
661 
662 #define	PMC_HASH_SIZE				1024
663 #define	PMC_MTXPOOL_SIZE			2048
664 #define	PMC_LOG_BUFFER_SIZE			256
665 #define	PMC_LOG_BUFFER_SIZE_MAX			(16 * 1024)
666 #define	PMC_NLOGBUFFERS_PCPU			32
667 #define	PMC_NLOGBUFFERS_PCPU_MEM_MAX		(32 * 1024)
668 #define	PMC_NSAMPLES				256
669 #define	PMC_CALLCHAIN_DEPTH			128
670 #define	PMC_THREADLIST_MAX			128
671 
672 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "."
673 
674 /*
675  * Locking keys
676  *
677  * (b) - pmc_bufferlist_mtx (spin lock)
678  * (k) - pmc_kthread_mtx (sleep lock)
679  * (o) - po->po_mtx (spin lock)
680  * (g) - global_epoch_preempt (epoch)
681  * (p) - pmc_sx (sx)
682  */
683 
684 /*
685  * PMC commands
686  */
687 
688 struct pmc_syscall_args {
689 	register_t	pmop_code;	/* one of PMC_OP_* */
690 	void		*pmop_data;	/* syscall parameter */
691 };
692 
693 /*
694  * Interface to processor specific s1tuff
695  */
696 
697 /*
698  * struct pmc_descr
699  *
700  * Machine independent (i.e., the common parts) of a human readable
701  * PMC description.
702  */
703 
704 struct pmc_descr {
705 	char		pd_name[PMC_NAME_MAX]; /* name */
706 	uint32_t	pd_caps;	/* capabilities */
707 	enum pmc_class	pd_class;	/* class of the PMC */
708 	uint32_t	pd_width;	/* width in bits */
709 };
710 
711 /*
712  * struct pmc_target
713  *
714  * This structure records all the target processes associated with a
715  * PMC.
716  */
717 
718 struct pmc_target {
719 	LIST_ENTRY(pmc_target)	pt_next;
720 	struct pmc_process	*pt_process; /* target descriptor */
721 };
722 
723 /*
724  * struct pmc
725  *
726  * Describes each allocated PMC.
727  *
728  * Each PMC has precisely one owner, namely the process that allocated
729  * the PMC.
730  *
731  * A PMC may be attached to multiple target processes.  The
732  * 'pm_targets' field links all the target processes being monitored
733  * by this PMC.
734  *
735  * The 'pm_savedvalue' field is protected by a mutex.
736  *
737  * On a multi-cpu machine, multiple target threads associated with a
738  * process-virtual PMC could be concurrently executing on different
739  * CPUs.  The 'pm_runcount' field is atomically incremented every time
740  * the PMC gets scheduled on a CPU and atomically decremented when it
741  * get descheduled.  Deletion of a PMC is only permitted when this
742  * field is '0'.
743  *
744  */
745 struct pmc_pcpu_state {
746 	uint32_t pps_overflowcnt;	/* count overflow interrupts */
747 	uint8_t pps_stalled;
748 	uint8_t pps_cpustate;
749 } __aligned(CACHE_LINE_SIZE);
750 struct pmc {
751 	LIST_HEAD(,pmc_target)	pm_targets;	/* list of target processes */
752 	LIST_ENTRY(pmc)		pm_next;	/* owner's list */
753 
754 	/*
755 	 * System-wide PMCs are allocated on a CPU and are not moved
756 	 * around.  For system-wide PMCs we record the CPU the PMC was
757 	 * allocated on in the 'CPU' field of the pmc ID.
758 	 *
759 	 * Virtual PMCs run on whichever CPU is currently executing
760 	 * their targets' threads.  For these PMCs we need to save
761 	 * their current PMC counter values when they are taken off
762 	 * CPU.
763 	 */
764 
765 	union {
766 		pmc_value_t	pm_savedvalue;	/* Virtual PMCS */
767 	} pm_gv;
768 
769 	/*
770 	 * For sampling mode PMCs, we keep track of the PMC's "reload
771 	 * count", which is the counter value to be loaded in when
772 	 * arming the PMC for the next counting session.  For counting
773 	 * modes on PMCs that are read-only (e.g., the x86 TSC), we
774 	 * keep track of the initial value at the start of
775 	 * counting-mode operation.
776 	 */
777 
778 	union {
779 		pmc_value_t	pm_reloadcount;	/* sampling PMC modes */
780 		pmc_value_t	pm_initial;	/* counting PMC modes */
781 	} pm_sc;
782 
783 	struct pmc_pcpu_state *pm_pcpu_state;
784 	volatile cpuset_t pm_cpustate;	/* CPUs where PMC should be active */
785 	uint32_t	pm_caps;	/* PMC capabilities */
786 	enum pmc_event	pm_event;	/* event being measured */
787 	uint32_t	pm_flags;	/* additional flags PMC_F_... */
788 	struct pmc_owner *pm_owner;	/* owner thread state */
789 	counter_u64_t		pm_runcount;	/* #cpus currently on */
790 	enum pmc_state	pm_state;	/* current PMC state */
791 
792 	/*
793 	 * The PMC ID field encodes the row-index for the PMC, its
794 	 * mode, class and the CPU# associated with the PMC.
795 	 */
796 
797 	pmc_id_t	pm_id;		/* allocated PMC id */
798 	enum pmc_class pm_class;
799 
800 	/* md extensions */
801 	union pmc_md_pmc	pm_md;
802 };
803 
804 /*
805  * Accessor macros for 'struct pmc'
806  */
807 
808 #define	PMC_TO_MODE(P)		PMC_ID_TO_MODE((P)->pm_id)
809 #define	PMC_TO_CLASS(P)		PMC_ID_TO_CLASS((P)->pm_id)
810 #define	PMC_TO_ROWINDEX(P)	PMC_ID_TO_ROWINDEX((P)->pm_id)
811 #define	PMC_TO_CPU(P)		PMC_ID_TO_CPU((P)->pm_id)
812 
813 /*
814  * struct pmc_threadpmcstate
815  *
816  * Record per-PMC, per-thread state.
817  */
818 struct pmc_threadpmcstate {
819 	pmc_value_t	pt_pmcval;	/* per-thread reload count */
820 };
821 
822 /*
823  * struct pmc_thread
824  *
825  * Record a 'target' thread being profiled.
826  */
827 struct pmc_thread {
828 	LIST_ENTRY(pmc_thread) pt_next;		/* linked list */
829 	struct thread	*pt_td;			/* target thread */
830 	struct pmc_threadpmcstate pt_pmcs[];	/* per-PMC state */
831 };
832 
833 /*
834  * struct pmc_process
835  *
836  * Record a 'target' process being profiled.
837  *
838  * The target process being profiled could be different from the owner
839  * process which allocated the PMCs.  Each target process descriptor
840  * is associated with NHWPMC 'struct pmc *' pointers.  Each PMC at a
841  * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]'
842  * array.  The size of this structure is thus PMC architecture
843  * dependent.
844  *
845  */
846 
847 struct pmc_targetstate {
848 	struct pmc	*pp_pmc;   /* target PMC */
849 	pmc_value_t	pp_pmcval; /* per-process value */
850 };
851 
852 struct pmc_process {
853 	LIST_ENTRY(pmc_process) pp_next;	/* hash chain */
854 	LIST_HEAD(,pmc_thread) pp_tds;		/* list of threads */
855 	struct mtx	*pp_tdslock;		/* lock on pp_tds thread list */
856 	int		pp_refcnt;		/* reference count */
857 	uint32_t	pp_flags;		/* flags PMC_PP_* */
858 	struct proc	*pp_proc;		/* target process */
859 	struct pmc_targetstate pp_pmcs[];       /* NHWPMCs */
860 };
861 
862 #define	PMC_PP_ENABLE_MSR_ACCESS	0x00000001
863 
864 /*
865  * struct pmc_owner
866  *
867  * We associate a PMC with an 'owner' process.
868  *
869  * A process can be associated with 0..NCPUS*NHWPMC PMCs during its
870  * lifetime, where NCPUS is the numbers of CPUS in the system and
871  * NHWPMC is the number of hardware PMCs per CPU.  These are
872  * maintained in the list headed by the 'po_pmcs' to save on space.
873  *
874  */
875 
876 struct pmc_owner  {
877 	LIST_ENTRY(pmc_owner)	po_next;	/* hash chain */
878 	CK_LIST_ENTRY(pmc_owner)	po_ssnext;	/* (g/p) list of SS PMC owners */
879 	LIST_HEAD(, pmc)	po_pmcs;	/* owned PMC list */
880 	TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */
881 	struct mtx		po_mtx;		/* spin lock for (o) */
882 	struct proc		*po_owner;	/* owner proc */
883 	uint32_t		po_flags;	/* (k) flags PMC_PO_* */
884 	struct proc		*po_kthread;	/* (k) helper kthread */
885 	struct file		*po_file;	/* file reference */
886 	int			po_error;	/* recorded error */
887 	short			po_sscount;	/* # SS PMCs owned */
888 	short			po_logprocmaps;	/* global mappings done */
889 	struct pmclog_buffer	*po_curbuf[MAXCPU];	/* current log buffer */
890 };
891 
892 #define	PMC_PO_OWNS_LOGFILE		0x00000001 /* has a log file */
893 #define	PMC_PO_SHUTDOWN			0x00000010 /* in the process of shutdown */
894 #define	PMC_PO_INITIAL_MAPPINGS_DONE	0x00000020
895 
896 /*
897  * struct pmc_hw -- describe the state of the PMC hardware
898  *
899  * When in use, a HW PMC is associated with one allocated 'struct pmc'
900  * pointed to by field 'phw_pmc'.  When inactive, this field is NULL.
901  *
902  * On an SMP box, one or more HW PMC's in process virtual mode with
903  * the same 'phw_pmc' could be executing on different CPUs.  In order
904  * to handle this case correctly, we need to ensure that only
905  * incremental counts get added to the saved value in the associated
906  * 'struct pmc'.  The 'phw_save' field is used to keep the saved PMC
907  * value at the time the hardware is started during this context
908  * switch (i.e., the difference between the new (hardware) count and
909  * the saved count is atomically added to the count field in 'struct
910  * pmc' at context switch time).
911  *
912  */
913 
914 struct pmc_hw {
915 	uint32_t	phw_state;	/* see PHW_* macros below */
916 	struct pmc	*phw_pmc;	/* current thread PMC */
917 };
918 
919 #define	PMC_PHW_RI_MASK		0x000000FF
920 #define	PMC_PHW_CPU_SHIFT	8
921 #define	PMC_PHW_CPU_MASK	0x0000FF00
922 #define	PMC_PHW_FLAGS_SHIFT	16
923 #define	PMC_PHW_FLAGS_MASK	0xFFFF0000
924 
925 #define	PMC_PHW_INDEX_TO_STATE(ri)	((ri) & PMC_PHW_RI_MASK)
926 #define	PMC_PHW_STATE_TO_INDEX(state)	((state) & PMC_PHW_RI_MASK)
927 #define	PMC_PHW_CPU_TO_STATE(cpu)	(((cpu) << PMC_PHW_CPU_SHIFT) & \
928 	PMC_PHW_CPU_MASK)
929 #define	PMC_PHW_STATE_TO_CPU(state)	(((state) & PMC_PHW_CPU_MASK) >> \
930 	PMC_PHW_CPU_SHIFT)
931 #define	PMC_PHW_FLAGS_TO_STATE(flags)	(((flags) << PMC_PHW_FLAGS_SHIFT) & \
932 	PMC_PHW_FLAGS_MASK)
933 #define	PMC_PHW_STATE_TO_FLAGS(state)	(((state) & PMC_PHW_FLAGS_MASK) >> \
934 	PMC_PHW_FLAGS_SHIFT)
935 #define	PMC_PHW_FLAG_IS_ENABLED		(PMC_PHW_FLAGS_TO_STATE(0x01))
936 #define	PMC_PHW_FLAG_IS_SHAREABLE	(PMC_PHW_FLAGS_TO_STATE(0x02))
937 
938 /*
939  * struct pmc_sample
940  *
941  * Space for N (tunable) PC samples and associated control data.
942  */
943 
944 struct pmc_sample {
945 	uint16_t		ps_nsamples;	/* callchain depth */
946 	uint16_t		ps_nsamples_actual;
947 	uint16_t		ps_cpu;		/* cpu number */
948 	uint16_t		ps_flags;	/* other flags */
949 	lwpid_t			ps_tid;		/* thread id */
950 	pid_t			ps_pid;		/* process PID or -1 */
951 	int		ps_ticks; /* ticks at sample time */
952 	/* pad */
953 	struct thread		*ps_td;		/* which thread */
954 	struct pmc		*ps_pmc;	/* interrupting PMC */
955 	uintptr_t		*ps_pc;		/* (const) callchain start */
956 	uint64_t		ps_tsc;		/* tsc value */
957 };
958 
959 #define 	PMC_SAMPLE_FREE		((uint16_t) 0)
960 #define 	PMC_USER_CALLCHAIN_PENDING	((uint16_t) 0xFFFF)
961 
962 struct pmc_samplebuffer {
963 	volatile uint64_t		ps_prodidx; /* producer index */
964 	volatile uint64_t		ps_considx; /* consumer index */
965 	uintptr_t		*ps_callchains;	/* all saved call chains */
966 	struct pmc_sample	ps_samples[];	/* array of sample entries */
967 };
968 
969 #define PMC_CONS_SAMPLE(psb)					\
970 	(&(psb)->ps_samples[(psb)->ps_considx & pmc_sample_mask])
971 
972 #define PMC_CONS_SAMPLE_OFF(psb, off)							\
973 	(&(psb)->ps_samples[(off) & pmc_sample_mask])
974 
975 #define PMC_PROD_SAMPLE(psb)					\
976 	(&(psb)->ps_samples[(psb)->ps_prodidx & pmc_sample_mask])
977 
978 
979 /*
980  * struct pmc_multipart
981  *
982  * Multipart payload
983  */
984 struct pmc_multipart {
985 	char			pl_type;
986 	char			pl_length;
987 	uint64_t		pl_mpdata[10];
988 };
989 
990 /*
991  * struct pmc_cpustate
992  *
993  * A CPU is modelled as a collection of HW PMCs with space for additional
994  * flags.
995  */
996 
997 struct pmc_cpu {
998 	uint32_t	pc_state;	/* physical cpu number + flags */
999 	struct pmc_samplebuffer *pc_sb[3]; /* space for samples */
1000 	struct pmc_hw	*pc_hwpmcs[];	/* 'npmc' pointers */
1001 };
1002 
1003 #define	PMC_PCPU_CPU_MASK		0x000000FF
1004 #define	PMC_PCPU_FLAGS_MASK		0xFFFFFF00
1005 #define	PMC_PCPU_FLAGS_SHIFT		8
1006 #define	PMC_PCPU_STATE_TO_CPU(S)	((S) & PMC_PCPU_CPU_MASK)
1007 #define	PMC_PCPU_STATE_TO_FLAGS(S)	(((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT)
1008 #define	PMC_PCPU_FLAGS_TO_STATE(F)	(((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK)
1009 #define	PMC_PCPU_CPU_TO_STATE(C)	((C) & PMC_PCPU_CPU_MASK)
1010 #define	PMC_PCPU_FLAG_HTT		(PMC_PCPU_FLAGS_TO_STATE(0x1))
1011 
1012 /*
1013  * struct pmc_binding
1014  *
1015  * CPU binding information.
1016  */
1017 
1018 struct pmc_binding {
1019 	int	pb_bound;	/* is bound? */
1020 	int	pb_cpu;		/* if so, to which CPU */
1021 	u_char	pb_priority;	/* Thread active priority. */
1022 };
1023 
1024 struct pmc_mdep;
1025 
1026 /*
1027  * struct pmc_classdep
1028  *
1029  * PMC class-dependent operations.
1030  */
1031 struct pmc_classdep {
1032 	uint32_t	pcd_caps;	/* class capabilities */
1033 	enum pmc_class	pcd_class;	/* class id */
1034 	int		pcd_num;	/* number of PMCs */
1035 	int		pcd_ri;		/* row index of the first PMC in class */
1036 	int		pcd_width;	/* width of the PMC */
1037 
1038 	/* configuring/reading/writing the hardware PMCs */
1039 	int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm);
1040 	int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm);
1041 	int (*pcd_read_pmc)(int _cpu, int _ri, struct pmc *_pm,
1042 	    pmc_value_t *_value);
1043 	int (*pcd_write_pmc)(int _cpu, int _ri, struct pmc *_pm,
1044 	    pmc_value_t _value);
1045 
1046 	/* pmc allocation/release */
1047 	int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t,
1048 		const struct pmc_op_pmcallocate *_a);
1049 	int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm);
1050 
1051 	/* starting and stopping PMCs */
1052 	int (*pcd_start_pmc)(int _cpu, int _ri, struct pmc *_pm);
1053 	int (*pcd_stop_pmc)(int _cpu, int _ri, struct pmc *_pm);
1054 
1055 	/* description */
1056 	int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi,
1057 		struct pmc **_ppmc);
1058 	int (*pcd_get_caps)(int _ri, uint32_t *_caps);
1059 
1060 	/* class-dependent initialization & finalization */
1061 	int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu);
1062 	int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu);
1063 
1064 	/* machine-specific interface */
1065 	int (*pcd_get_msr)(int _ri, uint32_t *_msr);
1066 };
1067 
1068 /*
1069  * struct pmc_mdep
1070  *
1071  * Machine dependent bits needed per CPU type.
1072  */
1073 
1074 struct pmc_mdep  {
1075 	uint32_t	pmd_cputype;    /* from enum pmc_cputype */
1076 	uint32_t	pmd_npmc;	/* number of PMCs per CPU */
1077 	uint32_t	pmd_nclass;	/* number of PMC classes present */
1078 
1079 	/*
1080 	 * Machine dependent methods.
1081 	 */
1082 
1083 	/* thread context switch in/out */
1084 	int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp);
1085 	int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp);
1086 
1087 	/* handle a PMC interrupt */
1088 	int (*pmd_intr)(struct trapframe *_tf);
1089 
1090 	/*
1091 	 * PMC class dependent information.
1092 	 */
1093 	struct pmc_classdep pmd_classdep[];
1094 };
1095 
1096 /*
1097  * Per-CPU state.  This is an array of 'mp_ncpu' pointers
1098  * to struct pmc_cpu descriptors.
1099  */
1100 
1101 extern struct pmc_cpu **pmc_pcpu;
1102 
1103 /* driver statistics */
1104 extern struct pmc_driverstats pmc_stats;
1105 
1106 #if	defined(HWPMC_DEBUG)
1107 
1108 /* HWPMC_DEBUG without KTR will compile but is a no-op. */
1109 #if !defined(KTR) || !defined(KTR_COMPILE) || ((KTR_COMPILE & KTR_SUBSYS) == 0)
1110 #error "HWPMC_DEBUG requires KTR and KTR_COMPILE=KTR_SUBSYS -- see ktr(4)"
1111 #endif
1112 
1113 #include <sys/ktr.h>
1114 
1115 #define	__pmcdbg_used		/* unused variable annotation */
1116 
1117 /*
1118  * Debug flags, major flag groups.
1119  *
1120  * Please keep the DEBUGGING section of the hwpmc(4) man page in sync.
1121  */
1122 struct pmc_debugflags {
1123 	int	pdb_CPU;
1124 	int	pdb_CSW;
1125 	int	pdb_LOG;
1126 	int	pdb_MDP;
1127 	int	pdb_MOD;
1128 	int	pdb_OWN;
1129 	int	pdb_PMC;
1130 	int	pdb_PRC;
1131 	int	pdb_SAM;
1132 };
1133 
1134 extern struct pmc_debugflags pmc_debugflags;
1135 
1136 #define	KTR_PMC			KTR_SUBSYS
1137 
1138 #define	PMC_DEBUG_STRSIZE		128
1139 #define	PMC_DEBUG_DEFAULT_FLAGS		{ 0, 0, 0, 0, 0, 0, 0, 0, 0 }
1140 
1141 #define	PMCDBG0(M, N, L, F) do {					\
1142 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1143 		CTR0(KTR_PMC, #M ":" #N ":" #L  ": " F);		\
1144 } while (0)
1145 #define	PMCDBG1(M, N, L, F, p1) do {					\
1146 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1147 		CTR1(KTR_PMC, #M ":" #N ":" #L  ": " F, p1);		\
1148 } while (0)
1149 #define	PMCDBG2(M, N, L, F, p1, p2) do {				\
1150 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1151 		CTR2(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2);	\
1152 } while (0)
1153 #define	PMCDBG3(M, N, L, F, p1, p2, p3) do {				\
1154 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1155 		CTR3(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3);	\
1156 } while (0)
1157 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4) do {			\
1158 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1159 		CTR4(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4);\
1160 } while (0)
1161 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do {			\
1162 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1163 		CTR5(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1164 		    p5);						\
1165 } while (0)
1166 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do {		\
1167 	if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N))	\
1168 		CTR6(KTR_PMC, #M ":" #N ":" #L  ": " F, p1, p2, p3, p4,	\
1169 		    p5, p6);						\
1170 } while (0)
1171 
1172 /* Major numbers */
1173 #define	PMC_DEBUG_MAJ_CPU		0 /* cpu switches */
1174 #define	PMC_DEBUG_MAJ_CSW		1 /* context switches */
1175 #define	PMC_DEBUG_MAJ_LOG		2 /* logging */
1176 #define	PMC_DEBUG_MAJ_MDP		3 /* machine dependent */
1177 #define	PMC_DEBUG_MAJ_MOD		4 /* misc module infrastructure */
1178 #define	PMC_DEBUG_MAJ_OWN		5 /* owner */
1179 #define	PMC_DEBUG_MAJ_PMC		6 /* pmc management */
1180 #define	PMC_DEBUG_MAJ_PRC		7 /* processes */
1181 #define	PMC_DEBUG_MAJ_SAM		8 /* sampling */
1182 
1183 /* Minor numbers */
1184 
1185 /* Common (8 bits) */
1186 #define	PMC_DEBUG_MIN_ALL		0 /* allocation */
1187 #define	PMC_DEBUG_MIN_REL		1 /* release */
1188 #define	PMC_DEBUG_MIN_OPS		2 /* ops: start, stop, ... */
1189 #define	PMC_DEBUG_MIN_INI		3 /* init */
1190 #define	PMC_DEBUG_MIN_FND		4 /* find */
1191 
1192 /* MODULE */
1193 #define	PMC_DEBUG_MIN_PMH	       14 /* pmc_hook */
1194 #define	PMC_DEBUG_MIN_PMS	       15 /* pmc_syscall */
1195 
1196 /* OWN */
1197 #define	PMC_DEBUG_MIN_ORM		8 /* owner remove */
1198 #define	PMC_DEBUG_MIN_OMR		9 /* owner maybe remove */
1199 
1200 /* PROCESSES */
1201 #define	PMC_DEBUG_MIN_TLK		8 /* link target */
1202 #define	PMC_DEBUG_MIN_TUL		9 /* unlink target */
1203 #define	PMC_DEBUG_MIN_EXT	       10 /* process exit */
1204 #define	PMC_DEBUG_MIN_EXC	       11 /* process exec */
1205 #define	PMC_DEBUG_MIN_FRK	       12 /* process fork */
1206 #define	PMC_DEBUG_MIN_ATT	       13 /* attach/detach */
1207 #define	PMC_DEBUG_MIN_SIG	       14 /* signalling */
1208 
1209 /* CONTEXT SWITCHES */
1210 #define	PMC_DEBUG_MIN_SWI		8 /* switch in */
1211 #define	PMC_DEBUG_MIN_SWO		9 /* switch out */
1212 
1213 /* PMC */
1214 #define	PMC_DEBUG_MIN_REG		8 /* pmc register */
1215 #define	PMC_DEBUG_MIN_ALR		9 /* allocate row */
1216 
1217 /* MACHINE DEPENDENT LAYER */
1218 #define	PMC_DEBUG_MIN_REA		8 /* read */
1219 #define	PMC_DEBUG_MIN_WRI		9 /* write */
1220 #define	PMC_DEBUG_MIN_CFG	       10 /* config */
1221 #define	PMC_DEBUG_MIN_STA	       11 /* start */
1222 #define	PMC_DEBUG_MIN_STO	       12 /* stop */
1223 #define	PMC_DEBUG_MIN_INT	       13 /* interrupts */
1224 
1225 /* CPU */
1226 #define	PMC_DEBUG_MIN_BND		8 /* bind */
1227 #define	PMC_DEBUG_MIN_SEL		9 /* select */
1228 
1229 /* LOG */
1230 #define	PMC_DEBUG_MIN_GTB		8 /* get buf */
1231 #define	PMC_DEBUG_MIN_SIO		9 /* schedule i/o */
1232 #define	PMC_DEBUG_MIN_FLS	       10 /* flush */
1233 #define	PMC_DEBUG_MIN_SAM	       11 /* sample */
1234 #define	PMC_DEBUG_MIN_CLO	       12 /* close */
1235 
1236 #else
1237 #define	__pmcdbg_used			__unused
1238 #define	PMCDBG0(M, N, L, F)		/* nothing */
1239 #define	PMCDBG1(M, N, L, F, p1)
1240 #define	PMCDBG2(M, N, L, F, p1, p2)
1241 #define	PMCDBG3(M, N, L, F, p1, p2, p3)
1242 #define	PMCDBG4(M, N, L, F, p1, p2, p3, p4)
1243 #define	PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5)
1244 #define	PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6)
1245 #endif
1246 
1247 /* declare a dedicated memory pool */
1248 MALLOC_DECLARE(M_PMC);
1249 
1250 /*
1251  * Functions
1252  */
1253 
1254 struct pmc_mdep *pmc_md_initialize(void);	/* MD init function */
1255 void	pmc_md_finalize(struct pmc_mdep *_md);	/* MD fini function */
1256 int	pmc_getrowdisp(int _ri);
1257 int	pmc_process_interrupt_mp(int _ring, struct pmc *_pm,
1258     struct trapframe *_tf, struct pmc_multipart *mp);
1259 int	pmc_process_interrupt(int _ring, struct pmc *_pm,
1260     struct trapframe *_tf);
1261 int	pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples,
1262     struct trapframe *_tf);
1263 int	pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples,
1264     struct trapframe *_tf);
1265 void	pmc_restore_cpu_binding(struct pmc_binding *pb);
1266 void	pmc_save_cpu_binding(struct pmc_binding *pb);
1267 void	pmc_select_cpu(int cpu);
1268 struct pmc_mdep *pmc_mdep_alloc(int nclasses);
1269 void pmc_mdep_free(struct pmc_mdep *md);
1270 uint64_t pmc_rdtsc(void);
1271 #endif /* _KERNEL */
1272 #endif /* _SYS_PMC_H_ */
1273