Lines Matching defs:cpuc
649 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
653 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
656 if (!test_bit(idx, cpuc->active_mask))
683 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
688 if (!cpuc->enabled)
691 cpuc->n_added = 0;
692 cpuc->enabled = 0;
700 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
704 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
706 if (!test_bit(idx, cpuc->active_mask))
923 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
934 * cpuc->n_events hasn't been updated yet, while for the latter
935 * cpuc->n_txn contains the number of events added in the current
938 n0 = cpuc->n_events;
939 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
940 n0 -= cpuc->n_txn;
942 static_call_cond(x86_pmu_start_scheduling)(cpuc);
945 c = cpuc->event_constraint[i];
959 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]);
960 cpuc->event_constraint[i] = c;
973 hwc = &cpuc->event_list[i]->hw;
974 c = cpuc->event_constraint[i];
1012 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
1013 READ_ONCE(cpuc->excl_cntrs->exclusive_present))
1021 gpmax = x86_pmu.num_counters - cpuc->n_pair;
1025 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
1041 e = cpuc->event_list[i];
1042 static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
1046 e = cpuc->event_list[i];
1051 static_call_cond(x86_pmu_put_event_constraints)(cpuc, e);
1053 cpuc->event_constraint[i] = NULL;
1057 static_call_cond(x86_pmu_stop_scheduling)(cpuc);
1062 static int add_nr_metric_event(struct cpu_hw_events *cpuc,
1066 if (cpuc->n_metric == INTEL_TD_METRIC_NUM)
1068 cpuc->n_metric++;
1069 cpuc->n_txn_metric++;
1075 static void del_nr_metric_event(struct cpu_hw_events *cpuc,
1079 cpuc->n_metric--;
1082 static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
1086 if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event))
1089 if (n >= max_count + cpuc->n_metric)
1092 cpuc->event_list[n] = event;
1094 cpuc->n_pair++;
1095 cpuc->n_txn_pair++;
1105 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1113 n = cpuc->n_events;
1114 if (!cpuc->n_events)
1115 cpuc->pebs_output = 0;
1117 if (!cpuc->is_fake && leader->attr.precise_ip) {
1129 if (cpuc->pebs_output &&
1130 cpuc->pebs_output != is_pebs_pt(leader) + 1)
1133 cpuc->pebs_output = is_pebs_pt(leader) + 1;
1137 if (collect_event(cpuc, leader, max_count, n))
1149 if (collect_event(cpuc, event, max_count, n))
1158 struct cpu_hw_events *cpuc, int i)
1163 idx = hwc->idx = cpuc->assign[i];
1165 hwc->last_tag = ++cpuc->tags[i];
1216 struct cpu_hw_events *cpuc,
1219 return hwc->idx == cpuc->assign[i] &&
1221 hwc->last_tag == cpuc->tags[i];
1228 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1231 int i, added = cpuc->n_added;
1236 if (cpuc->enabled)
1239 if (cpuc->n_added) {
1240 int n_running = cpuc->n_events - cpuc->n_added;
1248 event = cpuc->event_list[i];
1258 match_prev_assignment(hwc, cpuc, i))
1274 for (i = 0; i < cpuc->n_events; i++) {
1275 event = cpuc->event_list[i];
1278 if (!match_prev_assignment(hwc, cpuc, i))
1279 x86_assign_hw_event(event, cpuc, i);
1288 cpuc->n_added = 0;
1292 cpuc->enabled = 1;
1393 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1400 n0 = cpuc->n_events;
1401 ret = n = collect_events(cpuc, event, false);
1417 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1420 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
1427 memcpy(cpuc->assign, assign, n*sizeof(int));
1434 cpuc->n_events = n;
1435 cpuc->n_added += n - n0;
1436 cpuc->n_txn += n - n0;
1451 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1467 cpuc->events[idx] = event;
1468 __set_bit(idx, cpuc->active_mask);
1469 __set_bit(idx, cpuc->running);
1478 struct cpu_hw_events *cpuc;
1488 cpuc = &per_cpu(cpu_hw_events, cpu);
1510 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1536 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1539 if (test_bit(hwc->idx, cpuc->active_mask)) {
1541 __clear_bit(hwc->idx, cpuc->active_mask);
1542 cpuc->events[hwc->idx] = NULL;
1559 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1570 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1578 for (i = 0; i < cpuc->n_events; i++) {
1579 if (event == cpuc->event_list[i])
1583 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1587 if (i >= cpuc->n_events - cpuc->n_added)
1588 --cpuc->n_added;
1590 static_call_cond(x86_pmu_put_event_constraints)(cpuc, event);
1593 while (++i < cpuc->n_events) {
1594 cpuc->event_list[i-1] = cpuc->event_list[i];
1595 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1597 cpuc->event_constraint[i-1] = NULL;
1598 --cpuc->n_events;
1600 del_nr_metric_event(cpuc, event);
1616 struct cpu_hw_events *cpuc;
1621 cpuc = this_cpu_ptr(&cpu_hw_events);
1634 if (!test_bit(idx, cpuc->active_mask))
1637 event = cpuc->events[idx];
1702 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1706 cpuc->kfree_on_online[i] = NULL;
1721 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1725 kfree(cpuc->kfree_on_online[i]);
1726 cpuc->kfree_on_online[i] = NULL;
2058 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2060 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */
2062 cpuc->txn_flags = txn_flags;
2080 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2082 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
2084 txn_flags = cpuc->txn_flags;
2085 cpuc->txn_flags = 0;
2109 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2113 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
2115 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
2116 cpuc->txn_flags = 0;
2120 n = cpuc->n_events;
2125 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
2133 memcpy(cpuc->assign, assign, n*sizeof(int));
2135 cpuc->txn_flags = 0;
2147 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
2149 intel_cpuc_finish(cpuc);
2150 kfree(cpuc);
2155 struct cpu_hw_events *cpuc;
2158 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
2159 if (!cpuc)
2161 cpuc->is_fake = 1;
2163 if (intel_cpuc_prepare(cpuc, cpu))
2166 return cpuc;
2168 free_fake_cpuc(cpuc);