/linux/include/linux/ ! |
H A D | context_tracking_state.h | 66 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_rcu_watching_cpu() 73 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_rcu_watching_cpu_acquire() 85 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_nesting_cpu() 97 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_nmi_nesting_cpu()
|
/linux/drivers/infiniband/ulp/rtrs/ ! |
H A D | rtrs-clt-stats.c | 27 s = per_cpu_ptr(stats->pcpu_stats, con->cpu); in rtrs_clt_update_wc_stats() 47 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_stats_migration_from_cnt_to_str() 66 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_stats_migration_to_cnt_to_str() 90 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; in rtrs_clt_stats_rdma_to_str() 119 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_reset_rdma_stats() 135 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_reset_cpu_migr_stats()
|
H A D | rtrs-srv-stats.c | 21 r = per_cpu_ptr(stats->rdma_stats, cpu); in rtrs_srv_reset_rdma_stats() 40 r = per_cpu_ptr(stats->rdma_stats, cpu); in rtrs_srv_stats_rdma_to_str()
|
/linux/kernel/ ! |
H A D | relay.c | 203 *per_cpu_ptr(chan->buf, buf->cpu) = NULL; in relay_destroy_buf() 333 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { in relay_reset() 340 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_reset() 387 return *per_cpu_ptr(chan->buf, 0); in relay_open_buf() 411 *per_cpu_ptr(chan->buf, 0) = buf; in relay_open_buf() 445 if (*per_cpu_ptr(chan->buf, cpu)) in relay_prepare_cpu() 453 *per_cpu_ptr(chan->buf, cpu) = buf; in relay_prepare_cpu() 521 *per_cpu_ptr(chan->buf, i) = buf; in relay_open() 530 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_open() 633 buf = *per_cpu_ptr(cha in relay_subbufs_consumed() [all...] |
H A D | cpu.c | 174 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback() 312 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_wait_for_sync_state() 363 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_bp_sync_dead() 405 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_can_boot_ap() 796 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap_online() 829 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_bringup_ap() 861 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_cpu() 1131 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback() 1180 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work() 1211 st = per_cpu_ptr( in cpuhp_init_state() [all...] |
/linux/kernel/irq/ ! |
H A D | matrix.c | 72 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_alloc_matrix() 144 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu() 165 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu_managed() 221 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_reserve_managed() 262 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_remove_managed() 305 cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc_managed() 400 cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc() 427 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_free() 510 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_debug_show()
|
/linux/kernel/sched/ ! |
H A D | cpuacct.c | 99 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read() 100 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read() 139 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write() 140 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_write() 272 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_stats_show() 280 cputime.sum_exec_runtime += *per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_stats_show() 344 *per_cpu_ptr(ca->cpuusage, cpu) += cputime; in cpuacct_charge()
|
H A D | topology.c | 921 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask() 982 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group() 1042 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups() 1192 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group() 1200 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group() 1201 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group() 1577 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations() 1578 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations() 1580 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations() 1581 *per_cpu_ptr(sd in claim_allocations() [all...] |
/linux/kernel/bpf/ ! |
H A D | percpu_freelist.c | 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() 58 head = per_cpu_ptr(s->freelist, cpu); in __pcpu_freelist_push() 89 head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_populate() 107 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop()
|
H A D | bpf_lru_list.c | 413 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free() 446 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_common_lru_pop_free() 475 steal_loc_l = per_cpu_ptr(clru->local_list, steal); in bpf_common_lru_pop_free() 520 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free() 547 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free() 600 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate() 667 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init() 681 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_lru_init()
|
/linux/arch/x86/events/amd/ ! |
H A D | uncore.c | 166 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_start() 184 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_stop() 205 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_add() 252 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_del() 280 ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_event_init() 454 union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); in amd_uncore_ctx_cid() 461 union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); in amd_uncore_ctx_gid() 468 union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); in amd_uncore_ctx_num_pmcs() 483 ctx = *per_cpu_ptr(pmu->ctx, cpu); in amd_uncore_ctx_free() 495 *per_cpu_ptr(pm in amd_uncore_ctx_free() [all...] |
/linux/arch/s390/kernel/ ! |
H A D | smp.c | 295 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0); in pcpu_start_fn() 402 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu))) in arch_vcpu_is_preempted() 435 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_emergency_stop() 444 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu))) in smp_emergency_stop() 475 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_send_stop() 516 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_ipi_mask() 521 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_single_ipi() 531 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule); in arch_smp_send_reschedule() 549 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_store_status() 721 pcpu = per_cpu_ptr( in smp_add_core() [all...] |
H A D | wti.c | 104 struct wti_state *st = per_cpu_ptr(&wti_state, cpu); in wti_pending() 133 st = per_cpu_ptr(&wti_state, cpu); in wti_show() 144 struct wti_state *st = per_cpu_ptr(&wti_state, cpu); in wti_thread_fn() 179 st = per_cpu_ptr(&wti_state, cpu); in wti_init()
|
/linux/drivers/irqchip/ ! |
H A D | irq-riscv-imsic-state.c | 186 tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu); in __imsic_local_sync() 190 mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); in __imsic_local_sync() 195 mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); in __imsic_local_sync() 296 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_mask() 318 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_unmask() 342 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_force_move_cleanup() 387 old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu); in imsic_vector_move() 391 new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu); in imsic_vector_move() 412 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_debug_show() 439 struct imsic_local_priv *lpriv = per_cpu_ptr(imsi in imsic_vector_from_local_id() [all...] |
H A D | irq-loongarch-avec.c | 83 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); in pending_list_init() 93 plist = per_cpu_ptr(&pending_list, adata->prev_cpu); in avecintc_sync() 123 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data); in avecintc_set_affinity() 150 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); in avecintc_cpu_offline() 266 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd); in avecintc_alloc_vector()
|
/linux/fs/squashfs/ ! |
H A D | decompressor_multi_percpu.c | 39 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
|
/linux/drivers/clocksource/ ! |
H A D | timer-mp-csky.c | 78 struct timer_of *to = per_cpu_ptr(&csky_to, cpu); in csky_mptimer_starting_cpu() 145 to = per_cpu_ptr(&csky_to, cpu); in csky_mptimer_init() 168 to = per_cpu_ptr(&csky_to, cpu_rollback); in csky_mptimer_init()
|
/linux/drivers/powercap/ ! |
H A D | idle_inject.c | 108 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_wakeup() 154 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_fn() 280 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_stop() 311 per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_should_run()
|
/linux/tools/testing/shared/linux/ ! |
H A D | percpu.h | 10 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) macro 11 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
|
/linux/arch/x86/kernel/ ! |
H A D | irq_64.c | 38 char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu); in map_irq_stack() 64 void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); in map_irq_stack()
|
H A D | kgdb.c | 208 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); in kgdb_correct_hw_break() 237 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 249 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 264 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_release_slot() 304 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_remove_all_hw_break() 397 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_disable_hw_debug() 666 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_arch_late()
|
/linux/drivers/platform/x86/amd/hfi/ ! |
H A D | hfi.c | 187 info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index); in amd_hfi_fill_metadata() 191 info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index); in amd_hfi_fill_metadata() 235 hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, idx); in amd_hfi_alloc_class_data() 281 struct amd_hfi_cpuinfo *hfi_info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu); in amd_hfi_online() 343 struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu); in update_hfi_ipcc_scores() 412 struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu); in class_capabilities_show()
|
/linux/drivers/hwtracing/coresight/ ! |
H A D | coresight-trace-id.c | 51 return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu)); in _coresight_trace_id_read_cpu_id() 137 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); in coresight_trace_id_release_all() 171 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id); in _coresight_trace_id_get_cpu_id() 194 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); in _coresight_trace_id_put_cpu_id()
|
/linux/net/netfilter/ ! |
H A D | nf_flow_table_procfs.c | 18 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_start() 33 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_next()
|
/linux/drivers/hv/ ! |
H A D | hv.c | 110 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_alloc() 122 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_alloc() 214 per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_free() 267 per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_enable_regs() 340 per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_disable_regs()
|