/linux-3.3/tools/perf/util/ |
D | cpumap.c | 9 struct cpu_map *cpus; in cpu_map__default_new() local 16 cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); in cpu_map__default_new() 17 if (cpus != NULL) { in cpu_map__default_new() 20 cpus->map[i] = i; in cpu_map__default_new() 22 cpus->nr = nr_cpus; in cpu_map__default_new() 25 return cpus; in cpu_map__default_new() 31 struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); in cpu_map__trim_new() local 33 if (cpus != NULL) { in cpu_map__trim_new() 34 cpus->nr = nr_cpus; in cpu_map__trim_new() 35 memcpy(cpus->map, tmp_cpus, payload_size); in cpu_map__trim_new() [all …]
|
D | evlist.c | 28 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, in perf_evlist__init() argument 36 perf_evlist__set_maps(evlist, cpus, threads); in perf_evlist__init() 40 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, in perf_evlist__new() argument 46 perf_evlist__init(evlist, cpus, threads); in perf_evlist__new() 56 if (evlist->cpus->map[0] < 0) in perf_evlist__config_attrs() 260 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { in perf_evlist__disable() 273 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { in perf_evlist__enable() 283 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; in perf_evlist__alloc_pollfd() 443 evlist->nr_mmaps = evlist->cpus->nr; in perf_evlist__alloc_mmap() 444 if (evlist->cpus->map[0] == -1) in perf_evlist__alloc_mmap() [all …]
|
D | evlist.h | 35 struct cpu_map *cpus; member 46 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 48 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 102 struct cpu_map *cpus, in perf_evlist__set_maps() argument 105 evlist->cpus = cpus; in perf_evlist__set_maps()
|
/linux-3.3/Documentation/cgroups/ |
D | cpusets.txt | 28 2.2 Adding/removing cpus 40 Cpusets provide a mechanism for assigning a set of CPUs and Memory 54 include CPUs in its CPU affinity mask, and using the mbind(2) and 57 CPUs or Memory Nodes not in that cpuset. The scheduler will not 64 cpusets and which CPUs and Memory Nodes are assigned to each cpuset, 72 The management of large computer systems, with many processors (CPUs), 110 Cpusets provide a Linux kernel mechanism to constrain which CPUs and 114 CPUs a task may be scheduled (sched_setaffinity) and on which Memory 119 - Cpusets are sets of allowed CPUs and Memory Nodes, known to the 123 - Calls to sched_setaffinity are filtered to just those CPUs [all …]
|
/linux-3.3/include/linux/ |
D | stop_machine.h | 13 * function to be executed on a single or multiple cpus preempting all 14 * other processes and monopolizing those cpus until it finishes. 18 * cpus are online. 103 * stop_machine: freeze the machine on all CPUs and run this function 106 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 115 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 118 * __stop_machine: freeze the machine on all CPUs and run this function 121 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 123 * Description: This is a special version of the above, which assumes cpus 126 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); [all …]
|
/linux-3.3/tools/power/cpupower/utils/ |
D | cpufreq-info.c | 31 …printf(_("Couldn't count the number of CPUs (%s: %s), assuming 1\n"), "/proc/stat", strerror(errno… in count_cpus() 225 struct cpufreq_affected_cpus *cpus; in debug_output_one() local 248 cpus = cpufreq_get_related_cpus(cpu); in debug_output_one() 249 if (cpus) { in debug_output_one() 250 printf(_(" CPUs which run at the same hardware frequency: ")); in debug_output_one() 251 while (cpus->next) { in debug_output_one() 252 printf("%d ", cpus->cpu); in debug_output_one() 253 cpus = cpus->next; in debug_output_one() 255 printf("%d\n", cpus->cpu); in debug_output_one() 256 cpufreq_put_related_cpus(cpus); in debug_output_one() [all …]
|
D | cpufreq-set.c | 287 /* Default is: set all CPUs */ in cmd_freq_set() 291 /* Also set frequency settings for related CPUs if -r is passed */ in cmd_freq_set() 295 struct cpufreq_affected_cpus *cpus; in cmd_freq_set() local 301 cpus = cpufreq_get_related_cpus(cpu); in cmd_freq_set() 302 if (!cpus) in cmd_freq_set() 304 while (cpus->next) { in cmd_freq_set() 305 bitmask_setbit(cpus_chosen, cpus->cpu); in cmd_freq_set() 306 cpus = cpus->next; in cmd_freq_set() 308 cpufreq_put_related_cpus(cpus); in cmd_freq_set() 313 /* loop over CPUs */ in cmd_freq_set()
|
/linux-3.3/Documentation/ |
D | cpu-hotplug.txt | 30 Such advances require CPUs available to a kernel to be removed either for 45 maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using 47 other cpus later online, read FAQ's for more info. 49 additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets 62 determine the number of potentially hot-pluggable cpus. The implementation 63 should only rely on this to count the # of cpus, but *MUST* not rely 65 BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could 66 use this parameter "additional_cpus=x" to represent those cpus in the 69 possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus. 79 cpu_possible_map: Bitmap of possible CPUs that can ever be available in the [all …]
|
D | cputopology.txt | 72 offline: CPUs that are not online because they have been 74 of CPUs allowed by the kernel configuration (kernel_max 75 above). [~cpu_online_mask + cpus >= NR_CPUS] 77 online: CPUs that are online and being scheduled [cpu_online_mask] 79 possible: CPUs that have been allocated resources and can be 82 present: CPUs that have been identified as being present in the 88 In this example, there are 64 CPUs in the system but cpus 32-63 exceed 90 being 32. Note also that CPUs 2 and 4-31 are not online but could be 100 started with possible_cpus=144. There are 4 CPUs in the system and cpu2
|
/linux-3.3/Documentation/power/ |
D | suspend-and-cpuhotplug.txt | 22 |tasks | | cpus | | | | cpus | |tasks| 54 online CPUs 70 Note down these cpus in | P 93 | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop] 149 the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called 167 update on the CPUs, as discussed below: 174 a. When all the CPUs are identical: 177 to apply the same microcode revision to each of the CPUs. 182 all CPUs, in order to handle case 'b' described below. 185 b. When some of the CPUs are different than the rest: [all …]
|
/linux-3.3/arch/x86/kernel/ |
D | tsc_sync.c | 6 * We check whether all boot CPUs have their TSC's synchronized, 9 * The warp-check is point-to-point between two CPUs, the CPU 13 * Only two CPUs may participate - they can enter in any order. 15 * protects against more than 2 CPUs entering this code. ) 25 * Entry/exit counters that make sure that both CPUs 43 * TSC-warp measurement loop running on both CPUs: 107 int cpus = 2; in check_tsc_sync_source() local 131 while (atomic_read(&start_count) != cpus-1) in check_tsc_sync_source() 140 while (atomic_read(&stop_count) != cpus-1) in check_tsc_sync_source() 146 pr_warning("Measured %Ld cycles TSC warp between CPUs, " in check_tsc_sync_source() [all …]
|
/linux-3.3/kernel/ |
D | stop_machine.c | 26 * be shared by works on different cpus. 35 /* the actual stopper, one per every possible cpu, enabled on online cpus */ 94 * partially or fully on different cpus. @fn should either be ready 179 * stop_cpus - stop multiple cpus 180 * @cpumask: cpus to stop 184 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, 189 * This function doesn't guarantee the cpus in @cpumask stay online 190 * till @fn completes. If some cpus go down in the middle, execution 191 * on the cpu may happen partially or fully on different cpus. @fn 193 * the cpus stay online until this function completes. [all …]
|
/linux-3.3/Documentation/scheduler/ |
D | sched-domains.txt | 6 Each scheduling domain spans a number of CPUs (stored in the ->span field). 9 i. The top domain for each CPU will generally span all CPUs in the system 11 CPUs will never be given tasks to run unless the CPUs allowed mask is 13 CPUs". 21 CPUs as they contain read only data after they have been set up. 25 load of each of its member CPUs, and only when the load of a group becomes 43 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in 54 In SMP, the parent of the base domain will span all physical CPUs in the 76 CPUs using cpu_attach_domain.
|
/linux-3.3/tools/perf/scripts/perl/ |
D | workqueue-stats.pl | 24 my @cpus; 32 $cpus[$common_cpu]{$thread_pid}{destroyed}++; 33 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 42 $cpus[$common_cpu]{$thread_pid}{created}++; 43 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 52 $cpus[$common_cpu]{$thread_pid}{executed}++; 53 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 62 $cpus[$common_cpu]{$thread_pid}{inserted}++; 63 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 72 foreach my $pidhash (@cpus) { [all …]
|
/linux-3.3/arch/mips/cavium-octeon/ |
D | smp.c | 81 * Detect available CPUs, populate cpu_possible_map 99 int cpus; in octeon_smp_setup() local 106 /* The present CPUs are initially just the boot cpu (CPU 0). */ in octeon_smp_setup() 115 /* The present CPUs get the lowest CPU numbers. */ in octeon_smp_setup() 116 cpus = 1; in octeon_smp_setup() 119 set_cpu_possible(cpus, true); in octeon_smp_setup() 120 set_cpu_present(cpus, true); in octeon_smp_setup() 121 __cpu_number_map[id] = cpus; in octeon_smp_setup() 122 __cpu_logical_map[cpus] = id; in octeon_smp_setup() 123 cpus++; in octeon_smp_setup() [all …]
|
/linux-3.3/Documentation/networking/ |
D | scaling.txt | 25 queues to distribute processing among CPUs. The NIC distributes packets by 28 queue, which in turn can be processed by separate CPUs. This mechanism is 55 one for each memory domain, where a memory domain is a set of CPUs that 75 to spread receive interrupts between CPUs. To manually adjust the IRQ 83 interrupt processing forms a bottleneck. Spreading load between CPUs 85 is to allocate as many queues as there are CPUs in the system (or the 128 Each receive hardware queue has an associated list of CPUs to which 133 the end of the bottom half routine, IPIs are sent to any CPUs for which 142 explicitly configured. The list of CPUs to which RPS may forward traffic 147 This file implements a bitmap of CPUs. RPS is disabled when it is zero [all …]
|
/linux-3.3/arch/powerpc/kernel/ |
D | crash.c | 34 * The primary CPU waits a while for all secondary CPUs to enter. This is to 35 * avoid sending an IPI if the secondary CPUs are entering 89 * This barrier is needed to make sure that all CPUs are stopped. in crash_ipi_callback() 113 printk(KERN_EMERG "Sending IPI to other CPUs\n"); in crash_kexec_prepare_cpus() 120 * FIXME: Until we will have the way to stop other CPUs reliably, in crash_kexec_prepare_cpus() 121 * the crash CPU will send an IPI and wait for other CPUs to in crash_kexec_prepare_cpus() 147 * A system reset will cause all CPUs to take an 0x100 exception. in crash_kexec_prepare_cpus() 149 * CPUs reexecute the crash_kexec_secondary path. in crash_kexec_prepare_cpus() 160 * A system reset will force all CPUs to execute the in crash_kexec_prepare_cpus() 179 * This function will be called by secondary cpus. [all …]
|
/linux-3.3/tools/power/cpupower/utils/helpers/ |
D | topology.c | 11 in the end, but double check for -1 for offlined cpus at other places */ 69 * Returns amount of cpus, negative on error, cpu_top must be 76 int cpu, cpus = sysconf(_SC_NPROCESSORS_CONF); in get_cpu_topology() local 78 cpu_top->core_info = malloc(sizeof(struct cpupower_topology) * cpus); in get_cpu_topology() 82 for (cpu = 0; cpu < cpus; cpu++) { in get_cpu_topology() 95 qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), in get_cpu_topology() 101 for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) { in get_cpu_topology() 105 return cpus; in get_cpu_topology()
|
/linux-3.3/arch/mn10300/mm/ |
D | cache-smp-flush.c | 18 * Flush the data cache on all CPUs. 34 * Flush a range of addresses in the data cache on all CPUs covering 54 * Flush a range of addresses in the data cache on all CPUs, between start and 72 * Flush a range of addresses in the data cache on all CPUs, between start and 88 * Flush and invalidate the data cache on all CPUs. 105 * Flush and invalidate a range of addresses in the data cache on all CPUs 126 * Flush and invalidate a range of addresses in the data cache on all CPUs, 145 * Flush and invalidate a range of addresses in the data cache on all CPUs,
|
D | cache-smp-inv.c | 18 * Invalidate the instruction cache on all CPUs. 34 * Invalidate a range of addresses in the instruction cache on all CPUs 54 * Invalidate a range of addresses in the instruction cache on all CPUs, 72 * Invalidate a range of addresses in the instruction cache on all CPUs, 88 * Invalidate the data cache on all CPUs. 104 * Invalidate a range of addresses in the data cache on all CPUs covering the 124 * Invalidate a range of addresses in the data cache on all CPUs, between start 142 * Invalidate a range of addresses in the data cache on all CPUs, between start
|
/linux-3.3/tools/perf/Documentation/ |
D | perf-stat.txt | 46 --all-cpus:: 47 system-wide collection from all CPUs 63 Count only on the list of CPUs provided. Multiple CPUs can be provided as a 64 comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. 66 to activate system-wide monitoring. Default is to count on all CPUs. 70 Do not aggregate counts across all monitored CPUs in system-wide mode (-a). 90 container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
|
D | perf-record.txt | 47 --all-cpus:: 48 System-wide collection from all CPUs. 129 Collect samples only on the list of CPUs provided. Multiple CPUs can be provided as a 130 comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. 132 the thread executes on the designated CPUs. Default is to monitor all CPUs. 144 container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
|
/linux-3.3/arch/mn10300/kernel/ |
D | smp.c | 84 static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */ 85 static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */ 94 static int cpucount; /* The count of boot CPUs */ 298 * send_IPI_mask - Send IPIs to all CPUs in list 299 * @cpumask: The list of CPUs to target. 302 * Send the specified IPI to all the CPUs in the list, not waiting for them to 334 * send_IPI_allbutself - Send IPIs to all the other CPUs. 337 * Send the specified IPI to all CPUs in the system barring the current one, 371 * smp_nmi_call_function - Send a call function NMI IPI to all CPUs 374 * @wait: If true, wait (atomically) until function is run on all CPUs. [all …]
|
/linux-3.3/tools/perf/python/ |
D | twatch.py | 19 cpus = perf.cpu_map() 25 evsel.open(cpus = cpus, threads = threads); 26 evlist = perf.evlist(cpus, threads) 31 for cpu in cpus:
|
/linux-3.3/lib/ |
D | cpu_rmap.c | 17 * These functions maintain a mapping from CPUs to some ordered set of 20 * cover all CPUs in the system. For those CPUs not directly covered 50 /* Initially assign CPUs to objects on a rota, since we have in alloc_cpu_rmap() 53 * CPUs that are not present/online, since we definitely want in alloc_cpu_rmap() 54 * any newly-hotplugged CPUs to have some object assigned. in alloc_cpu_rmap() 139 /* Invalidate distance for all CPUs for which this used to be in cpu_rmap_update() 140 * the nearest object. Mark those CPUs for update. in cpu_rmap_update() 151 /* Set distance to 0 for all CPUs in the new affinity mask. in cpu_rmap_update() 152 * Mark all CPUs within their NUMA nodes for update. in cpu_rmap_update()
|