1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2008, Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by A. Joseph Koshy under 9 * sponsorship from the FreeBSD Foundation and Google, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #ifndef _SYS_PMC_H_ 34 #define _SYS_PMC_H_ 35 36 #include <dev/hwpmc/pmc_events.h> 37 #include <sys/proc.h> 38 #include <sys/counter.h> 39 #include <machine/pmc_mdep.h> 40 #include <machine/profile.h> 41 #ifdef _KERNEL 42 #include <sys/epoch.h> 43 #include <ck_queue.h> 44 #endif 45 46 #define PMC_MODULE_NAME "hwpmc" 47 #define PMC_NAME_MAX 64 /* HW counter name size */ 48 #define PMC_CLASS_MAX 8 /* max #classes of PMCs per-system */ 49 50 /* 51 * Kernel<->userland API version number [MMmmpppp] 52 * 53 * Major numbers are to be incremented when an incompatible change to 54 * the ABI occurs that older clients will not be able to handle. 55 * 56 * Minor numbers are incremented when a backwards compatible change 57 * occurs that allows older correct programs to run unchanged. For 58 * example, when support for a new PMC type is added. 59 * 60 * The patch version is incremented for every bug fix. 61 */ 62 #define PMC_VERSION_MAJOR 0x0A 63 #define PMC_VERSION_MINOR 0x01 64 #define PMC_VERSION_PATCH 0x0000 65 66 #define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \ 67 PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH) 68 69 #define PMC_CPUID_LEN 64 70 /* cpu model name for pmu lookup */ 71 extern char pmc_cpuid[PMC_CPUID_LEN]; 72 73 /* 74 * Kinds of CPUs known. 75 * 76 * We keep track of CPU variants that need to be distinguished in 77 * some way for PMC operations. CPU names are grouped by manufacturer 78 * and numbered sparsely in order to minimize changes to the ABI involved 79 * when new CPUs are added. 80 * 81 * Please keep the pmc(3) manual page in sync with this list. 82 */ 83 #define __PMC_CPUS() \ 84 __PMC_CPU(AMD_K8, 0x01, "AMD K8") \ 85 __PMC_CPU(INTEL_CORE, 0x87, "Intel Core Solo/Duo") \ 86 __PMC_CPU(INTEL_CORE2, 0x88, "Intel Core2") \ 87 __PMC_CPU(INTEL_CORE2EXTREME, 0x89, "Intel Core2 Extreme") \ 88 __PMC_CPU(INTEL_ATOM, 0x8A, "Intel Atom") \ 89 __PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \ 90 __PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \ 91 __PMC_CPU(INTEL_SANDYBRIDGE, 0x8D, "Intel Sandy Bridge") \ 92 __PMC_CPU(INTEL_IVYBRIDGE, 0x8E, "Intel Ivy Bridge") \ 93 __PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F, "Intel Sandy Bridge Xeon") \ 94 __PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90, "Intel Ivy Bridge Xeon") \ 95 __PMC_CPU(INTEL_HASWELL, 0x91, "Intel Haswell") \ 96 __PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92, "Intel Atom Silvermont") \ 97 __PMC_CPU(INTEL_NEHALEM_EX, 0x93, "Intel Nehalem Xeon 7500") \ 98 __PMC_CPU(INTEL_WESTMERE_EX, 0x94, "Intel Westmere Xeon E7") \ 99 __PMC_CPU(INTEL_HASWELL_XEON, 0x95, "Intel Haswell Xeon E5 v3") \ 100 __PMC_CPU(INTEL_BROADWELL, 0x96, "Intel Broadwell") \ 101 __PMC_CPU(INTEL_BROADWELL_XEON, 0x97, "Intel Broadwell Xeon") \ 102 __PMC_CPU(INTEL_SKYLAKE, 0x98, "Intel Skylake") \ 103 __PMC_CPU(INTEL_SKYLAKE_XEON, 0x99, "Intel Skylake Xeon") \ 104 __PMC_CPU(INTEL_ATOM_GOLDMONT, 0x9A, "Intel Atom Goldmont") \ 105 __PMC_CPU(INTEL_ICELAKE, 0x9B, "Intel Icelake") \ 106 __PMC_CPU(INTEL_ICELAKE_XEON, 0x9C, "Intel Icelake Xeon") \ 107 __PMC_CPU(INTEL_ALDERLAKE, 0x9D, "Intel Alderlake") \ 108 __PMC_CPU(INTEL_ATOM_GOLDMONT_P, 0x9E, "Intel Atom Goldmont Plus") \ 109 __PMC_CPU(INTEL_ATOM_TREMONT, 0x9F, "Intel Atom Tremont") \ 110 __PMC_CPU(INTEL_EMERALD_RAPIDS, 0xA0, "Intel Emerald Rapids") \ 111 __PMC_CPU(INTEL_ALDERLAKEN, 0xA1, "Intel AlderlakeN") \ 112 __PMC_CPU(INTEL_GRANITE_RAPIDS, 0xA2, "Intel Granite Rapids") \ 113 __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ 114 __PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \ 115 __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ 116 __PMC_CPU(PPC_POWER8, 0x390, "IBM POWER8") \ 117 __PMC_CPU(GENERIC, 0x400, "Generic") \ 118 __PMC_CPU(ARMV7_CORTEX_A5, 0x500, "ARMv7 Cortex A5") \ 119 __PMC_CPU(ARMV7_CORTEX_A7, 0x501, "ARMv7 Cortex A7") \ 120 __PMC_CPU(ARMV7_CORTEX_A8, 0x502, "ARMv7 Cortex A8") \ 121 __PMC_CPU(ARMV7_CORTEX_A9, 0x503, "ARMv7 Cortex A9") \ 122 __PMC_CPU(ARMV7_CORTEX_A15, 0x504, "ARMv7 Cortex A15") \ 123 __PMC_CPU(ARMV7_CORTEX_A17, 0x505, "ARMv7 Cortex A17") \ 124 __PMC_CPU(ARMV8_CORTEX_A53, 0x600, "ARMv8 Cortex A53") \ 125 __PMC_CPU(ARMV8_CORTEX_A57, 0x601, "ARMv8 Cortex A57") \ 126 __PMC_CPU(ARMV8_CORTEX_A76, 0x602, "ARMv8 Cortex A76") 127 128 enum pmc_cputype { 129 #undef __PMC_CPU 130 #define __PMC_CPU(S,V,D) PMC_CPU_##S = V, 131 __PMC_CPUS() 132 }; 133 134 #define PMC_CPU_FIRST PMC_CPU_AMD_K8 135 #define PMC_CPU_LAST PMC_CPU_ARMV8_CORTEX_A76 136 137 /* 138 * Classes of PMCs 139 */ 140 #define __PMC_CLASSES() \ 141 __PMC_CLASS(TSC, 0x00, "CPU Timestamp counter") \ 142 __PMC_CLASS(K8, 0x02, "AMD K8 performance counters") \ 143 __PMC_CLASS(IBS, 0x03, "AMD IBS performance counters") \ 144 __PMC_CLASS(IAF, 0x06, "Intel Core2/Atom, fixed function") \ 145 __PMC_CLASS(IAP, 0x07, "Intel Core...Atom, programmable") \ 146 __PMC_CLASS(UCF, 0x08, "Intel Uncore fixed function") \ 147 __PMC_CLASS(UCP, 0x09, "Intel Uncore programmable") \ 148 __PMC_CLASS(PPC7450, 0x0D, "Motorola MPC7450 class") \ 149 __PMC_CLASS(PPC970, 0x0E, "IBM PowerPC 970 class") \ 150 __PMC_CLASS(SOFT, 0x0F, "Software events") \ 151 __PMC_CLASS(ARMV7, 0x10, "ARMv7") \ 152 __PMC_CLASS(ARMV8, 0x11, "ARMv8") \ 153 __PMC_CLASS(E500, 0x13, "Freescale e500 class") \ 154 __PMC_CLASS(POWER8, 0x15, "IBM POWER8 class") \ 155 __PMC_CLASS(DMC620_PMU_CD2, 0x16, "ARM DMC620 Memory Controller PMU CLKDIV2") \ 156 __PMC_CLASS(DMC620_PMU_C, 0x17, "ARM DMC620 Memory Controller PMU CLK") \ 157 __PMC_CLASS(CMN600_PMU, 0x18, "Arm CoreLink CMN600 Coherent Mesh Network PMU") 158 159 enum pmc_class { 160 #undef __PMC_CLASS 161 #define __PMC_CLASS(S,V,D) PMC_CLASS_##S = V, 162 __PMC_CLASSES() 163 }; 164 165 #define PMC_CLASS_FIRST PMC_CLASS_TSC 166 #define PMC_CLASS_LAST PMC_CLASS_CMN600_PMU 167 168 /* 169 * A PMC can be in the following states: 170 * 171 * Hardware states: 172 * DISABLED -- administratively prohibited from being used. 173 * FREE -- HW available for use 174 * Software states: 175 * ALLOCATED -- allocated 176 * STOPPED -- allocated, but not counting events 177 * RUNNING -- allocated, and in operation; 'pm_runcount' 178 * holds the number of CPUs using this PMC at 179 * a given instant 180 * DELETED -- being destroyed 181 */ 182 183 #define __PMC_HWSTATES() \ 184 __PMC_STATE(DISABLED) \ 185 __PMC_STATE(FREE) 186 187 #define __PMC_SWSTATES() \ 188 __PMC_STATE(ALLOCATED) \ 189 __PMC_STATE(STOPPED) \ 190 __PMC_STATE(RUNNING) \ 191 __PMC_STATE(DELETED) 192 193 #define __PMC_STATES() \ 194 __PMC_HWSTATES() \ 195 __PMC_SWSTATES() 196 197 enum pmc_state { 198 #undef __PMC_STATE 199 #define __PMC_STATE(S) PMC_STATE_##S, 200 __PMC_STATES() 201 __PMC_STATE(MAX) 202 }; 203 204 #define PMC_STATE_FIRST PMC_STATE_DISABLED 205 #define PMC_STATE_LAST PMC_STATE_DELETED 206 207 /* 208 * An allocated PMC may used as a 'global' counter or as a 209 * 'thread-private' one. Each such mode of use can be in either 210 * statistical sampling mode or in counting mode. Thus a PMC in use 211 * 212 * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling 213 * SC i.e., SYSTEM COUNTER -- system-wide counting mode 214 * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling 215 * TC i.e., THREAD COUNTER -- thread virtual, counting mode 216 * 217 * Statistical profiling modes rely on the PMC periodically delivering 218 * a interrupt to the CPU (when the configured number of events have 219 * been measured), so the PMC must have the ability to generate 220 * interrupts. 221 * 222 * In counting modes, the PMC counts its configured events, with the 223 * value of the PMC being read whenever needed by its owner process. 224 * 225 * The thread specific modes "virtualize" the PMCs -- the PMCs appear 226 * to be thread private and count events only when the profiled thread 227 * actually executes on the CPU. 228 * 229 * The system-wide "global" modes keep the PMCs running all the time 230 * and are used to measure the behaviour of the whole system. 231 */ 232 233 #define __PMC_MODES() \ 234 __PMC_MODE(SS, 0) \ 235 __PMC_MODE(SC, 1) \ 236 __PMC_MODE(TS, 2) \ 237 __PMC_MODE(TC, 3) 238 239 enum pmc_mode { 240 #undef __PMC_MODE 241 #define __PMC_MODE(M,N) PMC_MODE_##M = N, 242 __PMC_MODES() 243 }; 244 245 #define PMC_MODE_FIRST PMC_MODE_SS 246 #define PMC_MODE_LAST PMC_MODE_TC 247 248 #define PMC_IS_COUNTING_MODE(mode) \ 249 ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC) 250 #define PMC_IS_SYSTEM_MODE(mode) \ 251 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) 252 #define PMC_IS_SAMPLING_MODE(mode) \ 253 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) 254 #define PMC_IS_VIRTUAL_MODE(mode) \ 255 ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC) 256 257 /* 258 * PMC row disposition 259 */ 260 261 #define __PMC_DISPOSITIONS(N) \ 262 __PMC_DISP(STANDALONE) /* global/disabled counters */ \ 263 __PMC_DISP(FREE) /* free/available */ \ 264 __PMC_DISP(THREAD) /* thread-virtual PMCs */ \ 265 __PMC_DISP(UNKNOWN) /* sentinel */ 266 267 enum pmc_disp { 268 #undef __PMC_DISP 269 #define __PMC_DISP(D) PMC_DISP_##D , 270 __PMC_DISPOSITIONS() 271 }; 272 273 #define PMC_DISP_FIRST PMC_DISP_STANDALONE 274 #define PMC_DISP_LAST PMC_DISP_THREAD 275 276 /* 277 * Counter capabilities 278 * 279 * __PMC_CAPS(NAME, VALUE, DESCRIPTION) 280 */ 281 282 #define __PMC_CAPS() \ 283 __PMC_CAP(INTERRUPT, 0, "generate interrupts") \ 284 __PMC_CAP(USER, 1, "count user-mode events") \ 285 __PMC_CAP(SYSTEM, 2, "count system-mode events") \ 286 __PMC_CAP(EDGE, 3, "do edge detection of events") \ 287 __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \ 288 __PMC_CAP(READ, 5, "read PMC counter") \ 289 __PMC_CAP(WRITE, 6, "reprogram PMC counter") \ 290 __PMC_CAP(INVERT, 7, "invert comparison sense") \ 291 __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \ 292 __PMC_CAP(PRECISE, 9, "perform precise sampling") \ 293 __PMC_CAP(TAGGING, 10, "tag upstream events") \ 294 __PMC_CAP(CASCADE, 11, "cascade counters") \ 295 __PMC_CAP(SYSWIDE, 12, "system wide counter") \ 296 __PMC_CAP(DOMWIDE, 13, "NUMA domain wide counter") 297 298 enum pmc_caps 299 { 300 #undef __PMC_CAP 301 #define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) , 302 __PMC_CAPS() 303 }; 304 305 #define PMC_CAP_FIRST PMC_CAP_INTERRUPT 306 #define PMC_CAP_LAST PMC_CAP_DOMWIDE 307 308 /* 309 * PMC Event Numbers 310 * 311 * These are generated from the definitions in "dev/hwpmc/pmc_events.h". 312 */ 313 314 enum pmc_event { 315 #undef __PMC_EV 316 #undef __PMC_EV_BLOCK 317 #define __PMC_EV_BLOCK(C,V) PMC_EV_ ## C ## __BLOCK_START = (V) - 1 , 318 #define __PMC_EV(C,N) PMC_EV_ ## C ## _ ## N , 319 __PMC_EVENTS() 320 }; 321 322 /* 323 * PMC SYSCALL INTERFACE 324 */ 325 326 /* 327 * "PMC_OPS" -- these are the commands recognized by the kernel 328 * module, and are used when performing a system call from userland. 329 */ 330 #define __PMC_OPS() \ 331 __PMC_OP(CONFIGURELOG, "Set log file") \ 332 __PMC_OP(FLUSHLOG, "Flush log file") \ 333 __PMC_OP(GETCPUINFO, "Get system CPU information") \ 334 __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \ 335 __PMC_OP(GETMODULEVERSION, "Get module version") \ 336 __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \ 337 __PMC_OP(PMCADMIN, "Set PMC state") \ 338 __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \ 339 __PMC_OP(PMCATTACH, "Attach a PMC to a process") \ 340 __PMC_OP(PMCDETACH, "Detach a PMC from a process") \ 341 __PMC_OP(PMCGETMSR, "Get a PMC's hardware address") \ 342 __PMC_OP(PMCRELEASE, "Release a PMC") \ 343 __PMC_OP(PMCRW, "Read/Set a PMC") \ 344 __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \ 345 __PMC_OP(PMCSTART, "Start a PMC") \ 346 __PMC_OP(PMCSTOP, "Stop a PMC") \ 347 __PMC_OP(WRITELOG, "Write a cookie to the log file") \ 348 __PMC_OP(CLOSELOG, "Close log file") \ 349 __PMC_OP(GETDYNEVENTINFO, "Get dynamic events list") \ 350 __PMC_OP(GETCAPS, "Get capabilities") 351 352 enum pmc_ops { 353 #undef __PMC_OP 354 #define __PMC_OP(N, D) PMC_OP_##N, 355 __PMC_OPS() 356 }; 357 358 /* 359 * Flags used in operations on PMCs. 360 */ 361 362 #define PMC_F_UNUSED1 0x00000001 /* unused */ 363 #define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */ 364 #define PMC_F_LOG_PROCCSW 0x00000004 /*OP ALLOCATE track ctx switches */ 365 #define PMC_F_LOG_PROCEXIT 0x00000008 /*OP ALLOCATE log proc exits */ 366 #define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */ 367 #define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */ 368 369 /* V2 API */ 370 #define PMC_F_CALLCHAIN 0x00000080 /*OP ALLOCATE capture callchains */ 371 #define PMC_F_USERCALLCHAIN 0x00000100 /*OP ALLOCATE use userspace stack */ 372 373 /* V10 API */ 374 #define PMC_F_EV_PMU 0x00000200 /* 375 * OP ALLOCATE: pm_ev has special 376 * userspace meaning; counter 377 * configuration is communicated 378 * through class-dependent fields 379 */ 380 381 /* internal flags */ 382 #define PMC_F_ATTACHED_TO_OWNER 0x00010000 /*attached to owner*/ 383 #define PMC_F_NEEDS_LOGFILE 0x00020000 /*needs log file */ 384 #define PMC_F_ATTACH_DONE 0x00040000 /*attached at least once */ 385 386 #define PMC_CALLCHAIN_DEPTH_MAX 512 387 388 #define PMC_CC_F_USERSPACE 0x01 /*userspace callchain*/ 389 #define PMC_CC_F_MULTIPART 0x02 /*multipart data*/ 390 391 /* 392 * Cookies used to denote allocated PMCs, and the values of PMCs. 393 */ 394 395 typedef uint32_t pmc_id_t; 396 typedef uint64_t pmc_value_t; 397 398 #define PMC_ID_INVALID (~ (pmc_id_t) 0) 399 400 /* 401 * PMC IDs have the following format: 402 * 403 * +-----------------------+-------+-----------+ 404 * | CPU | PMC MODE | CLASS | ROW INDEX | 405 * +-----------------------+-------+-----------+ 406 * 407 * where CPU is 12 bits, MODE 4, CLASS 8, and ROW INDEX 8 Field 'CPU' 408 * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for 409 * process-mode PMCs. Field 'PMC MODE' is the allocated PMC mode. 410 * Field 'PMC CLASS' is the class of the PMC. Field 'ROW INDEX' is the 411 * row index for the PMC. 412 * 413 * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total 414 * number of hardware PMCs on this cpu. 415 */ 416 417 #define PMC_ID_TO_ROWINDEX(ID) ((ID) & 0xFF) 418 #define PMC_ID_TO_CLASS(ID) (((ID) & 0xFF00) >> 8) 419 #define PMC_ID_TO_MODE(ID) (((ID) & 0xF0000) >> 16) 420 #define PMC_ID_TO_CPU(ID) (((ID) & 0xFFF00000) >> 20) 421 #define PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX) \ 422 ((((CPU) & 0xFFF) << 20) | (((MODE) & 0xF) << 16) | \ 423 (((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF)) 424 425 /* 426 * Data structures for system calls supported by the pmc driver. 427 */ 428 429 /* 430 * OP PMCALLOCATE 431 * 432 * Allocate a PMC on the named CPU. 433 */ 434 435 #define PMC_CPU_ANY ~0 436 437 struct pmc_op_pmcallocate { 438 uint32_t pm_caps; /* PMC_CAP_* */ 439 uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */ 440 enum pmc_class pm_class; /* class of PMC desired */ 441 enum pmc_event pm_ev; /* [enum pmc_event] desired */ 442 uint32_t pm_flags; /* additional modifiers PMC_F_* */ 443 enum pmc_mode pm_mode; /* desired mode */ 444 pmc_id_t pm_pmcid; /* [return] process pmc id */ 445 pmc_value_t pm_count; /* initial/sample count */ 446 447 union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */ 448 }; 449 450 /* 451 * OP PMCADMIN 452 * 453 * Set the administrative state (i.e., whether enabled or disabled) of 454 * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an 455 * absolute PMC number and need not have been first allocated by the 456 * calling process. 457 */ 458 459 struct pmc_op_pmcadmin { 460 int pm_cpu; /* CPU# */ 461 uint32_t pm_flags; /* flags */ 462 int pm_pmc; /* PMC# */ 463 enum pmc_state pm_state; /* desired state */ 464 }; 465 466 /* 467 * OP PMCATTACH / OP PMCDETACH 468 * 469 * Attach/detach a PMC and a process. 470 */ 471 472 struct pmc_op_pmcattach { 473 pmc_id_t pm_pmc; /* PMC to attach to */ 474 pid_t pm_pid; /* target process */ 475 }; 476 477 /* 478 * OP PMCSETCOUNT 479 * 480 * Set the sampling rate (i.e., the reload count) for statistical counters. 481 * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE. 482 */ 483 484 struct pmc_op_pmcsetcount { 485 pmc_value_t pm_count; /* initial/sample count */ 486 pmc_id_t pm_pmcid; /* PMC id to set */ 487 }; 488 489 /* 490 * OP PMCRW 491 * 492 * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs 493 * to have been previously allocated using PMCALLOCATE. 494 */ 495 496 struct pmc_op_pmcrw { 497 uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/ 498 pmc_id_t pm_pmcid; /* pmc id */ 499 pmc_value_t pm_value; /* new&returned value */ 500 }; 501 502 /* 503 * OP GETPMCINFO 504 * 505 * retrieve PMC state for a named CPU. The caller is expected to 506 * allocate 'npmc' * 'struct pmc_info' bytes of space for the return 507 * values. 508 */ 509 510 struct pmc_info { 511 char pm_name[PMC_NAME_MAX]; /* pmc name */ 512 enum pmc_class pm_class; /* enum pmc_class */ 513 int pm_enabled; /* whether enabled */ 514 enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */ 515 pid_t pm_ownerpid; /* owner, or -1 */ 516 enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */ 517 enum pmc_event pm_event; /* current event */ 518 uint32_t pm_flags; /* current flags */ 519 pmc_value_t pm_reloadcount; /* sampling counters only */ 520 }; 521 522 struct pmc_op_getpmcinfo { 523 int32_t pm_cpu; /* 0 <= cpu < mp_maxid */ 524 struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */ 525 }; 526 527 /* 528 * OP GETCPUINFO 529 * 530 * Retrieve system CPU information. 531 */ 532 533 struct pmc_classinfo { 534 enum pmc_class pm_class; /* class id */ 535 uint32_t pm_caps; /* counter capabilities */ 536 uint32_t pm_width; /* width of the PMC */ 537 uint32_t pm_num; /* number of PMCs in class */ 538 }; 539 540 struct pmc_op_getcpuinfo { 541 enum pmc_cputype pm_cputype; /* what kind of CPU */ 542 uint32_t pm_ncpu; /* max CPU number */ 543 uint32_t pm_npmc; /* #PMCs per CPU */ 544 uint32_t pm_nclass; /* #classes of PMCs */ 545 struct pmc_classinfo pm_classes[PMC_CLASS_MAX]; 546 }; 547 548 /* 549 * OP CONFIGURELOG 550 * 551 * Configure a log file for writing system-wide statistics to. 552 */ 553 554 struct pmc_op_configurelog { 555 int pm_flags; 556 int pm_logfd; /* logfile fd (or -1) */ 557 }; 558 559 /* 560 * OP GETDRIVERSTATS 561 * 562 * Retrieve pmc(4) driver-wide statistics. 563 */ 564 #ifdef _KERNEL 565 struct pmc_driverstats { 566 counter_u64_t pm_intr_ignored; /* #interrupts ignored */ 567 counter_u64_t pm_intr_processed; /* #interrupts processed */ 568 counter_u64_t pm_intr_bufferfull; /* #interrupts with ENOSPC */ 569 counter_u64_t pm_syscalls; /* #syscalls */ 570 counter_u64_t pm_syscall_errors; /* #syscalls with errors */ 571 counter_u64_t pm_buffer_requests; /* #buffer requests */ 572 counter_u64_t pm_buffer_requests_failed; /* #failed buffer requests */ 573 counter_u64_t pm_log_sweeps; /* #sample buffer processing 574 passes */ 575 counter_u64_t pm_merges; /* merged k+u */ 576 counter_u64_t pm_overwrites; /* UR overwrites */ 577 }; 578 #endif 579 580 struct pmc_op_getdriverstats { 581 unsigned int pm_intr_ignored; /* #interrupts ignored */ 582 unsigned int pm_intr_processed; /* #interrupts processed */ 583 unsigned int pm_intr_bufferfull; /* #interrupts with ENOSPC */ 584 unsigned int pm_syscalls; /* #syscalls */ 585 unsigned int pm_syscall_errors; /* #syscalls with errors */ 586 unsigned int pm_buffer_requests; /* #buffer requests */ 587 unsigned int pm_buffer_requests_failed; /* #failed buffer requests */ 588 unsigned int pm_log_sweeps; /* #sample buffer processing 589 passes */ 590 }; 591 592 /* 593 * OP RELEASE / OP START / OP STOP 594 * 595 * Simple operations on a PMC id. 596 */ 597 598 struct pmc_op_simple { 599 pmc_id_t pm_pmcid; 600 }; 601 602 /* 603 * OP WRITELOG 604 * 605 * Flush the current log buffer and write 4 bytes of user data to it. 606 */ 607 608 struct pmc_op_writelog { 609 uint32_t pm_userdata; 610 }; 611 612 /* 613 * OP GETMSR 614 * 615 * Retrieve the machine specific address associated with the allocated 616 * PMC. This number can be used subsequently with a read-performance-counter 617 * instruction. 618 */ 619 620 struct pmc_op_getmsr { 621 uint32_t pm_msr; /* machine specific address */ 622 pmc_id_t pm_pmcid; /* allocated pmc id */ 623 }; 624 625 /* 626 * OP GETDYNEVENTINFO 627 * 628 * Retrieve a PMC dynamic class events list. 629 */ 630 631 struct pmc_dyn_event_descr { 632 char pm_ev_name[PMC_NAME_MAX]; 633 enum pmc_event pm_ev_code; 634 }; 635 636 struct pmc_op_getdyneventinfo { 637 enum pmc_class pm_class; 638 unsigned int pm_nevent; 639 struct pmc_dyn_event_descr pm_events[PMC_EV_DYN_COUNT]; 640 }; 641 642 /* 643 * OP GETCAPS 644 * 645 * Retrieve the PMC capabilties flags for this type of counter. 646 */ 647 648 struct pmc_op_caps { 649 pmc_id_t pm_pmcid; /* allocated pmc id */ 650 uint32_t pm_caps; /* capabilities */ 651 }; 652 653 #ifdef _KERNEL 654 655 #include <sys/malloc.h> 656 #include <sys/sysctl.h> 657 #include <sys/_cpuset.h> 658 659 #include <machine/frame.h> 660 661 #define PMC_HASH_SIZE 1024 662 #define PMC_MTXPOOL_SIZE 2048 663 #define PMC_LOG_BUFFER_SIZE 256 664 #define PMC_NLOGBUFFERS_PCPU 32 665 #define PMC_NSAMPLES 256 666 #define PMC_CALLCHAIN_DEPTH 128 667 #define PMC_THREADLIST_MAX 128 668 669 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "." 670 671 /* 672 * Locking keys 673 * 674 * (b) - pmc_bufferlist_mtx (spin lock) 675 * (k) - pmc_kthread_mtx (sleep lock) 676 * (o) - po->po_mtx (spin lock) 677 * (g) - global_epoch_preempt (epoch) 678 * (p) - pmc_sx (sx) 679 */ 680 681 /* 682 * PMC commands 683 */ 684 685 struct pmc_syscall_args { 686 register_t pmop_code; /* one of PMC_OP_* */ 687 void *pmop_data; /* syscall parameter */ 688 }; 689 690 /* 691 * Interface to processor specific s1tuff 692 */ 693 694 /* 695 * struct pmc_descr 696 * 697 * Machine independent (i.e., the common parts) of a human readable 698 * PMC description. 699 */ 700 701 struct pmc_descr { 702 char pd_name[PMC_NAME_MAX]; /* name */ 703 uint32_t pd_caps; /* capabilities */ 704 enum pmc_class pd_class; /* class of the PMC */ 705 uint32_t pd_width; /* width in bits */ 706 }; 707 708 /* 709 * struct pmc_target 710 * 711 * This structure records all the target processes associated with a 712 * PMC. 713 */ 714 715 struct pmc_target { 716 LIST_ENTRY(pmc_target) pt_next; 717 struct pmc_process *pt_process; /* target descriptor */ 718 }; 719 720 /* 721 * struct pmc 722 * 723 * Describes each allocated PMC. 724 * 725 * Each PMC has precisely one owner, namely the process that allocated 726 * the PMC. 727 * 728 * A PMC may be attached to multiple target processes. The 729 * 'pm_targets' field links all the target processes being monitored 730 * by this PMC. 731 * 732 * The 'pm_savedvalue' field is protected by a mutex. 733 * 734 * On a multi-cpu machine, multiple target threads associated with a 735 * process-virtual PMC could be concurrently executing on different 736 * CPUs. The 'pm_runcount' field is atomically incremented every time 737 * the PMC gets scheduled on a CPU and atomically decremented when it 738 * get descheduled. Deletion of a PMC is only permitted when this 739 * field is '0'. 740 * 741 */ 742 struct pmc_pcpu_state { 743 uint32_t pps_overflowcnt; /* count overflow interrupts */ 744 uint8_t pps_stalled; 745 uint8_t pps_cpustate; 746 } __aligned(CACHE_LINE_SIZE); 747 struct pmc { 748 LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */ 749 LIST_ENTRY(pmc) pm_next; /* owner's list */ 750 751 /* 752 * System-wide PMCs are allocated on a CPU and are not moved 753 * around. For system-wide PMCs we record the CPU the PMC was 754 * allocated on in the 'CPU' field of the pmc ID. 755 * 756 * Virtual PMCs run on whichever CPU is currently executing 757 * their targets' threads. For these PMCs we need to save 758 * their current PMC counter values when they are taken off 759 * CPU. 760 */ 761 762 union { 763 pmc_value_t pm_savedvalue; /* Virtual PMCS */ 764 } pm_gv; 765 766 /* 767 * For sampling mode PMCs, we keep track of the PMC's "reload 768 * count", which is the counter value to be loaded in when 769 * arming the PMC for the next counting session. For counting 770 * modes on PMCs that are read-only (e.g., the x86 TSC), we 771 * keep track of the initial value at the start of 772 * counting-mode operation. 773 */ 774 775 union { 776 pmc_value_t pm_reloadcount; /* sampling PMC modes */ 777 pmc_value_t pm_initial; /* counting PMC modes */ 778 } pm_sc; 779 780 struct pmc_pcpu_state *pm_pcpu_state; 781 volatile cpuset_t pm_cpustate; /* CPUs where PMC should be active */ 782 uint32_t pm_caps; /* PMC capabilities */ 783 enum pmc_event pm_event; /* event being measured */ 784 uint32_t pm_flags; /* additional flags PMC_F_... */ 785 struct pmc_owner *pm_owner; /* owner thread state */ 786 counter_u64_t pm_runcount; /* #cpus currently on */ 787 enum pmc_state pm_state; /* current PMC state */ 788 789 /* 790 * The PMC ID field encodes the row-index for the PMC, its 791 * mode, class and the CPU# associated with the PMC. 792 */ 793 794 pmc_id_t pm_id; /* allocated PMC id */ 795 enum pmc_class pm_class; 796 797 /* md extensions */ 798 union pmc_md_pmc pm_md; 799 }; 800 801 /* 802 * Accessor macros for 'struct pmc' 803 */ 804 805 #define PMC_TO_MODE(P) PMC_ID_TO_MODE((P)->pm_id) 806 #define PMC_TO_CLASS(P) PMC_ID_TO_CLASS((P)->pm_id) 807 #define PMC_TO_ROWINDEX(P) PMC_ID_TO_ROWINDEX((P)->pm_id) 808 #define PMC_TO_CPU(P) PMC_ID_TO_CPU((P)->pm_id) 809 810 /* 811 * struct pmc_threadpmcstate 812 * 813 * Record per-PMC, per-thread state. 814 */ 815 struct pmc_threadpmcstate { 816 pmc_value_t pt_pmcval; /* per-thread reload count */ 817 }; 818 819 /* 820 * struct pmc_thread 821 * 822 * Record a 'target' thread being profiled. 823 */ 824 struct pmc_thread { 825 LIST_ENTRY(pmc_thread) pt_next; /* linked list */ 826 struct thread *pt_td; /* target thread */ 827 struct pmc_threadpmcstate pt_pmcs[]; /* per-PMC state */ 828 }; 829 830 /* 831 * struct pmc_process 832 * 833 * Record a 'target' process being profiled. 834 * 835 * The target process being profiled could be different from the owner 836 * process which allocated the PMCs. Each target process descriptor 837 * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a 838 * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]' 839 * array. The size of this structure is thus PMC architecture 840 * dependent. 841 * 842 */ 843 844 struct pmc_targetstate { 845 struct pmc *pp_pmc; /* target PMC */ 846 pmc_value_t pp_pmcval; /* per-process value */ 847 }; 848 849 struct pmc_process { 850 LIST_ENTRY(pmc_process) pp_next; /* hash chain */ 851 LIST_HEAD(,pmc_thread) pp_tds; /* list of threads */ 852 struct mtx *pp_tdslock; /* lock on pp_tds thread list */ 853 int pp_refcnt; /* reference count */ 854 uint32_t pp_flags; /* flags PMC_PP_* */ 855 struct proc *pp_proc; /* target process */ 856 struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */ 857 }; 858 859 #define PMC_PP_ENABLE_MSR_ACCESS 0x00000001 860 861 /* 862 * struct pmc_owner 863 * 864 * We associate a PMC with an 'owner' process. 865 * 866 * A process can be associated with 0..NCPUS*NHWPMC PMCs during its 867 * lifetime, where NCPUS is the numbers of CPUS in the system and 868 * NHWPMC is the number of hardware PMCs per CPU. These are 869 * maintained in the list headed by the 'po_pmcs' to save on space. 870 * 871 */ 872 873 struct pmc_owner { 874 LIST_ENTRY(pmc_owner) po_next; /* hash chain */ 875 CK_LIST_ENTRY(pmc_owner) po_ssnext; /* (g/p) list of SS PMC owners */ 876 LIST_HEAD(, pmc) po_pmcs; /* owned PMC list */ 877 TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */ 878 struct mtx po_mtx; /* spin lock for (o) */ 879 struct proc *po_owner; /* owner proc */ 880 uint32_t po_flags; /* (k) flags PMC_PO_* */ 881 struct proc *po_kthread; /* (k) helper kthread */ 882 struct file *po_file; /* file reference */ 883 int po_error; /* recorded error */ 884 short po_sscount; /* # SS PMCs owned */ 885 short po_logprocmaps; /* global mappings done */ 886 struct pmclog_buffer *po_curbuf[MAXCPU]; /* current log buffer */ 887 }; 888 889 #define PMC_PO_OWNS_LOGFILE 0x00000001 /* has a log file */ 890 #define PMC_PO_SHUTDOWN 0x00000010 /* in the process of shutdown */ 891 #define PMC_PO_INITIAL_MAPPINGS_DONE 0x00000020 892 893 /* 894 * struct pmc_hw -- describe the state of the PMC hardware 895 * 896 * When in use, a HW PMC is associated with one allocated 'struct pmc' 897 * pointed to by field 'phw_pmc'. When inactive, this field is NULL. 898 * 899 * On an SMP box, one or more HW PMC's in process virtual mode with 900 * the same 'phw_pmc' could be executing on different CPUs. In order 901 * to handle this case correctly, we need to ensure that only 902 * incremental counts get added to the saved value in the associated 903 * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC 904 * value at the time the hardware is started during this context 905 * switch (i.e., the difference between the new (hardware) count and 906 * the saved count is atomically added to the count field in 'struct 907 * pmc' at context switch time). 908 * 909 */ 910 911 struct pmc_hw { 912 uint32_t phw_state; /* see PHW_* macros below */ 913 struct pmc *phw_pmc; /* current thread PMC */ 914 }; 915 916 #define PMC_PHW_RI_MASK 0x000000FF 917 #define PMC_PHW_CPU_SHIFT 8 918 #define PMC_PHW_CPU_MASK 0x0000FF00 919 #define PMC_PHW_FLAGS_SHIFT 16 920 #define PMC_PHW_FLAGS_MASK 0xFFFF0000 921 922 #define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK) 923 #define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK) 924 #define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \ 925 PMC_PHW_CPU_MASK) 926 #define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \ 927 PMC_PHW_CPU_SHIFT) 928 #define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \ 929 PMC_PHW_FLAGS_MASK) 930 #define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \ 931 PMC_PHW_FLAGS_SHIFT) 932 #define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01)) 933 #define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02)) 934 935 /* 936 * struct pmc_sample 937 * 938 * Space for N (tunable) PC samples and associated control data. 939 */ 940 941 struct pmc_sample { 942 uint16_t ps_nsamples; /* callchain depth */ 943 uint16_t ps_nsamples_actual; 944 uint16_t ps_cpu; /* cpu number */ 945 uint16_t ps_flags; /* other flags */ 946 lwpid_t ps_tid; /* thread id */ 947 pid_t ps_pid; /* process PID or -1 */ 948 int ps_ticks; /* ticks at sample time */ 949 /* pad */ 950 struct thread *ps_td; /* which thread */ 951 struct pmc *ps_pmc; /* interrupting PMC */ 952 uintptr_t *ps_pc; /* (const) callchain start */ 953 uint64_t ps_tsc; /* tsc value */ 954 }; 955 956 #define PMC_SAMPLE_FREE ((uint16_t) 0) 957 #define PMC_USER_CALLCHAIN_PENDING ((uint16_t) 0xFFFF) 958 959 struct pmc_samplebuffer { 960 volatile uint64_t ps_prodidx; /* producer index */ 961 volatile uint64_t ps_considx; /* consumer index */ 962 uintptr_t *ps_callchains; /* all saved call chains */ 963 struct pmc_sample ps_samples[]; /* array of sample entries */ 964 }; 965 966 #define PMC_CONS_SAMPLE(psb) \ 967 (&(psb)->ps_samples[(psb)->ps_considx & pmc_sample_mask]) 968 969 #define PMC_CONS_SAMPLE_OFF(psb, off) \ 970 (&(psb)->ps_samples[(off) & pmc_sample_mask]) 971 972 #define PMC_PROD_SAMPLE(psb) \ 973 (&(psb)->ps_samples[(psb)->ps_prodidx & pmc_sample_mask]) 974 975 976 /* 977 * struct pmc_multipart 978 * 979 * Multipart payload 980 */ 981 struct pmc_multipart { 982 char pl_type; 983 char pl_length; 984 uint64_t pl_mpdata[10]; 985 }; 986 987 /* 988 * struct pmc_cpustate 989 * 990 * A CPU is modelled as a collection of HW PMCs with space for additional 991 * flags. 992 */ 993 994 struct pmc_cpu { 995 uint32_t pc_state; /* physical cpu number + flags */ 996 struct pmc_samplebuffer *pc_sb[3]; /* space for samples */ 997 struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */ 998 }; 999 1000 #define PMC_PCPU_CPU_MASK 0x000000FF 1001 #define PMC_PCPU_FLAGS_MASK 0xFFFFFF00 1002 #define PMC_PCPU_FLAGS_SHIFT 8 1003 #define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK) 1004 #define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT) 1005 #define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK) 1006 #define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK) 1007 #define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1)) 1008 1009 /* 1010 * struct pmc_binding 1011 * 1012 * CPU binding information. 1013 */ 1014 1015 struct pmc_binding { 1016 int pb_bound; /* is bound? */ 1017 int pb_cpu; /* if so, to which CPU */ 1018 u_char pb_priority; /* Thread active priority. */ 1019 }; 1020 1021 struct pmc_mdep; 1022 1023 /* 1024 * struct pmc_classdep 1025 * 1026 * PMC class-dependent operations. 1027 */ 1028 struct pmc_classdep { 1029 uint32_t pcd_caps; /* class capabilities */ 1030 enum pmc_class pcd_class; /* class id */ 1031 int pcd_num; /* number of PMCs */ 1032 int pcd_ri; /* row index of the first PMC in class */ 1033 int pcd_width; /* width of the PMC */ 1034 1035 /* configuring/reading/writing the hardware PMCs */ 1036 int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm); 1037 int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm); 1038 int (*pcd_read_pmc)(int _cpu, int _ri, struct pmc *_pm, 1039 pmc_value_t *_value); 1040 int (*pcd_write_pmc)(int _cpu, int _ri, struct pmc *_pm, 1041 pmc_value_t _value); 1042 1043 /* pmc allocation/release */ 1044 int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t, 1045 const struct pmc_op_pmcallocate *_a); 1046 int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm); 1047 1048 /* starting and stopping PMCs */ 1049 int (*pcd_start_pmc)(int _cpu, int _ri, struct pmc *_pm); 1050 int (*pcd_stop_pmc)(int _cpu, int _ri, struct pmc *_pm); 1051 1052 /* description */ 1053 int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi, 1054 struct pmc **_ppmc); 1055 int (*pcd_get_caps)(int _ri, uint32_t *_caps); 1056 1057 /* class-dependent initialization & finalization */ 1058 int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu); 1059 int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); 1060 1061 /* machine-specific interface */ 1062 int (*pcd_get_msr)(int _ri, uint32_t *_msr); 1063 }; 1064 1065 /* 1066 * struct pmc_mdep 1067 * 1068 * Machine dependent bits needed per CPU type. 1069 */ 1070 1071 struct pmc_mdep { 1072 uint32_t pmd_cputype; /* from enum pmc_cputype */ 1073 uint32_t pmd_npmc; /* number of PMCs per CPU */ 1074 uint32_t pmd_nclass; /* number of PMC classes present */ 1075 1076 /* 1077 * Machine dependent methods. 1078 */ 1079 1080 /* thread context switch in/out */ 1081 int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp); 1082 int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp); 1083 1084 /* handle a PMC interrupt */ 1085 int (*pmd_intr)(struct trapframe *_tf); 1086 1087 /* 1088 * PMC class dependent information. 1089 */ 1090 struct pmc_classdep pmd_classdep[]; 1091 }; 1092 1093 /* 1094 * Per-CPU state. This is an array of 'mp_ncpu' pointers 1095 * to struct pmc_cpu descriptors. 1096 */ 1097 1098 extern struct pmc_cpu **pmc_pcpu; 1099 1100 /* driver statistics */ 1101 extern struct pmc_driverstats pmc_stats; 1102 1103 #if defined(HWPMC_DEBUG) 1104 1105 /* HWPMC_DEBUG without KTR will compile but is a no-op. */ 1106 #if !defined(KTR) || !defined(KTR_COMPILE) || ((KTR_COMPILE & KTR_SUBSYS) == 0) 1107 #error "HWPMC_DEBUG requires KTR and KTR_COMPILE=KTR_SUBSYS -- see ktr(4)" 1108 #endif 1109 1110 #include <sys/ktr.h> 1111 1112 #define __pmcdbg_used /* unused variable annotation */ 1113 1114 /* 1115 * Debug flags, major flag groups. 1116 * 1117 * Please keep the DEBUGGING section of the hwpmc(4) man page in sync. 1118 */ 1119 struct pmc_debugflags { 1120 int pdb_CPU; 1121 int pdb_CSW; 1122 int pdb_LOG; 1123 int pdb_MDP; 1124 int pdb_MOD; 1125 int pdb_OWN; 1126 int pdb_PMC; 1127 int pdb_PRC; 1128 int pdb_SAM; 1129 }; 1130 1131 extern struct pmc_debugflags pmc_debugflags; 1132 1133 #define KTR_PMC KTR_SUBSYS 1134 1135 #define PMC_DEBUG_STRSIZE 128 1136 #define PMC_DEBUG_DEFAULT_FLAGS { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 1137 1138 #define PMCDBG0(M, N, L, F) do { \ 1139 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1140 CTR0(KTR_PMC, #M ":" #N ":" #L ": " F); \ 1141 } while (0) 1142 #define PMCDBG1(M, N, L, F, p1) do { \ 1143 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1144 CTR1(KTR_PMC, #M ":" #N ":" #L ": " F, p1); \ 1145 } while (0) 1146 #define PMCDBG2(M, N, L, F, p1, p2) do { \ 1147 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1148 CTR2(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2); \ 1149 } while (0) 1150 #define PMCDBG3(M, N, L, F, p1, p2, p3) do { \ 1151 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1152 CTR3(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3); \ 1153 } while (0) 1154 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) do { \ 1155 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1156 CTR4(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4);\ 1157 } while (0) 1158 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do { \ 1159 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1160 CTR5(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ 1161 p5); \ 1162 } while (0) 1163 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do { \ 1164 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1165 CTR6(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ 1166 p5, p6); \ 1167 } while (0) 1168 1169 /* Major numbers */ 1170 #define PMC_DEBUG_MAJ_CPU 0 /* cpu switches */ 1171 #define PMC_DEBUG_MAJ_CSW 1 /* context switches */ 1172 #define PMC_DEBUG_MAJ_LOG 2 /* logging */ 1173 #define PMC_DEBUG_MAJ_MDP 3 /* machine dependent */ 1174 #define PMC_DEBUG_MAJ_MOD 4 /* misc module infrastructure */ 1175 #define PMC_DEBUG_MAJ_OWN 5 /* owner */ 1176 #define PMC_DEBUG_MAJ_PMC 6 /* pmc management */ 1177 #define PMC_DEBUG_MAJ_PRC 7 /* processes */ 1178 #define PMC_DEBUG_MAJ_SAM 8 /* sampling */ 1179 1180 /* Minor numbers */ 1181 1182 /* Common (8 bits) */ 1183 #define PMC_DEBUG_MIN_ALL 0 /* allocation */ 1184 #define PMC_DEBUG_MIN_REL 1 /* release */ 1185 #define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */ 1186 #define PMC_DEBUG_MIN_INI 3 /* init */ 1187 #define PMC_DEBUG_MIN_FND 4 /* find */ 1188 1189 /* MODULE */ 1190 #define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */ 1191 #define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */ 1192 1193 /* OWN */ 1194 #define PMC_DEBUG_MIN_ORM 8 /* owner remove */ 1195 #define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */ 1196 1197 /* PROCESSES */ 1198 #define PMC_DEBUG_MIN_TLK 8 /* link target */ 1199 #define PMC_DEBUG_MIN_TUL 9 /* unlink target */ 1200 #define PMC_DEBUG_MIN_EXT 10 /* process exit */ 1201 #define PMC_DEBUG_MIN_EXC 11 /* process exec */ 1202 #define PMC_DEBUG_MIN_FRK 12 /* process fork */ 1203 #define PMC_DEBUG_MIN_ATT 13 /* attach/detach */ 1204 #define PMC_DEBUG_MIN_SIG 14 /* signalling */ 1205 1206 /* CONTEXT SWITCHES */ 1207 #define PMC_DEBUG_MIN_SWI 8 /* switch in */ 1208 #define PMC_DEBUG_MIN_SWO 9 /* switch out */ 1209 1210 /* PMC */ 1211 #define PMC_DEBUG_MIN_REG 8 /* pmc register */ 1212 #define PMC_DEBUG_MIN_ALR 9 /* allocate row */ 1213 1214 /* MACHINE DEPENDENT LAYER */ 1215 #define PMC_DEBUG_MIN_REA 8 /* read */ 1216 #define PMC_DEBUG_MIN_WRI 9 /* write */ 1217 #define PMC_DEBUG_MIN_CFG 10 /* config */ 1218 #define PMC_DEBUG_MIN_STA 11 /* start */ 1219 #define PMC_DEBUG_MIN_STO 12 /* stop */ 1220 #define PMC_DEBUG_MIN_INT 13 /* interrupts */ 1221 1222 /* CPU */ 1223 #define PMC_DEBUG_MIN_BND 8 /* bind */ 1224 #define PMC_DEBUG_MIN_SEL 9 /* select */ 1225 1226 /* LOG */ 1227 #define PMC_DEBUG_MIN_GTB 8 /* get buf */ 1228 #define PMC_DEBUG_MIN_SIO 9 /* schedule i/o */ 1229 #define PMC_DEBUG_MIN_FLS 10 /* flush */ 1230 #define PMC_DEBUG_MIN_SAM 11 /* sample */ 1231 #define PMC_DEBUG_MIN_CLO 12 /* close */ 1232 1233 #else 1234 #define __pmcdbg_used __unused 1235 #define PMCDBG0(M, N, L, F) /* nothing */ 1236 #define PMCDBG1(M, N, L, F, p1) 1237 #define PMCDBG2(M, N, L, F, p1, p2) 1238 #define PMCDBG3(M, N, L, F, p1, p2, p3) 1239 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) 1240 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) 1241 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) 1242 #endif 1243 1244 /* declare a dedicated memory pool */ 1245 MALLOC_DECLARE(M_PMC); 1246 1247 /* 1248 * Functions 1249 */ 1250 1251 struct pmc_mdep *pmc_md_initialize(void); /* MD init function */ 1252 void pmc_md_finalize(struct pmc_mdep *_md); /* MD fini function */ 1253 int pmc_getrowdisp(int _ri); 1254 int pmc_process_interrupt_mp(int _ring, struct pmc *_pm, 1255 struct trapframe *_tf, struct pmc_multipart *mp); 1256 int pmc_process_interrupt(int _ring, struct pmc *_pm, 1257 struct trapframe *_tf); 1258 int pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples, 1259 struct trapframe *_tf); 1260 int pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples, 1261 struct trapframe *_tf); 1262 void pmc_restore_cpu_binding(struct pmc_binding *pb); 1263 void pmc_save_cpu_binding(struct pmc_binding *pb); 1264 void pmc_select_cpu(int cpu); 1265 struct pmc_mdep *pmc_mdep_alloc(int nclasses); 1266 void pmc_mdep_free(struct pmc_mdep *md); 1267 uint64_t pmc_rdtsc(void); 1268 #endif /* _KERNEL */ 1269 #endif /* _SYS_PMC_H_ */ 1270