| /linux/drivers/tee/qcomtee/ |
| H A D | qcomtee_msg.h | 136 u32 counts; member 160 u32 counts; member 172 static inline unsigned int qcomtee_msg_num_ib(u32 counts) in qcomtee_msg_num_ib() argument 174 return FIELD_GET(QCOMTEE_MASK_IB, counts); in qcomtee_msg_num_ib() 177 static inline unsigned int qcomtee_msg_num_ob(u32 counts) in qcomtee_msg_num_ob() argument 179 return FIELD_GET(QCOMTEE_MASK_OB, counts); in qcomtee_msg_num_ob() 182 static inline unsigned int qcomtee_msg_num_io(u32 counts) in qcomtee_msg_num_io() argument 184 return FIELD_GET(QCOMTEE_MASK_IO, counts); in qcomtee_msg_num_io() 187 static inline unsigned int qcomtee_msg_num_oo(u32 counts) in qcomtee_msg_num_oo() argument 189 return FIELD_GET(QCOMTEE_MASK_OO, counts); in qcomtee_msg_num_oo() [all …]
|
| H A D | async.c | 51 u32 counts; member 52 u32 object_ids[] __counted_by(counts); 115 for (i = 0; i < msg->counts; i++) { in async_release() 120 return struct_size(msg, object_ids, msg->counts); in async_release()
|
| /linux/tools/perf/util/ |
| H A D | counts.c | 12 struct perf_counts *counts = zalloc(sizeof(*counts)); in perf_counts__new() local 14 if (counts) { in perf_counts__new() 19 free(counts); in perf_counts__new() 23 counts->values = values; in perf_counts__new() 27 xyarray__delete(counts->values); in perf_counts__new() 28 free(counts); in perf_counts__new() 32 counts->loaded = values; in perf_counts__new() 35 return counts; in perf_counts__new() 38 void perf_counts__delete(struct perf_counts *counts) in perf_counts__delete() argument 40 if (counts) { in perf_counts__delete() [all …]
|
| H A D | counts.h | 20 perf_counts(struct perf_counts *counts, int cpu_map_idx, int thread) in perf_counts() argument 22 return xyarray__entry(counts->values, cpu_map_idx, thread); in perf_counts() 26 perf_counts__is_loaded(struct perf_counts *counts, int cpu_map_idx, int thread) in perf_counts__is_loaded() argument 28 return *((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread)); in perf_counts__is_loaded() 32 perf_counts__set_loaded(struct perf_counts *counts, int cpu_map_idx, int thread, bool loaded) in perf_counts__set_loaded() argument 34 *((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread)) = loaded; in perf_counts__set_loaded() 38 void perf_counts__delete(struct perf_counts *counts); 39 void perf_counts__reset(struct perf_counts *counts);
|
| H A D | stat.c | 158 struct perf_counts *counts; in evsel__alloc_prev_raw_counts() local 160 counts = perf_counts__new(cpu_map_nr, nthreads); in evsel__alloc_prev_raw_counts() 161 if (counts) in evsel__alloc_prev_raw_counts() 162 evsel->prev_raw_counts = counts; in evsel__alloc_prev_raw_counts() 164 return counts ? 0 : -ENOMEM; in evsel__alloc_prev_raw_counts() 253 *perf_counts(evsel->counts, idx, thread) = in evsel__copy_prev_raw_counts() 275 *ps->aggr[0].counts.values = avg_stats(&ps->res_stats); in evsel__copy_res_stats() 371 if (evsel->err || evsel->counts->scaled == -1) in evsel__count_has_error() 407 struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts; in process_counter_values() 443 ps_aggr->counts.val = 0; in process_counter_values() [all …]
|
| H A D | bpf_counter.c | 294 struct perf_counts_values *counts; in bpf_program_profiler__read() local 303 counts = perf_counts(evsel->counts, idx, 0); in bpf_program_profiler__read() 304 counts->val = 0; in bpf_program_profiler__read() 305 counts->ena = 0; in bpf_program_profiler__read() 306 counts->run = 0; in bpf_program_profiler__read() 325 counts = perf_counts(evsel->counts, idx, 0); in bpf_program_profiler__read() 326 counts->val += values[bpf_cpu].counter; in bpf_program_profiler__read() 327 counts->ena += values[bpf_cpu].enabled; in bpf_program_profiler__read() 328 counts->run += values[bpf_cpu].running; in bpf_program_profiler__read() 691 struct perf_counts_values *counts; in bperf__read() local [all …]
|
| H A D | branch.c | 27 st->counts[flags->type]++; in branch_type_count() 118 total += st->counts[i]; in branch_type_stat_display() 152 if (st->counts[i] > 0) in branch_type_stat_display() 156 (double)st->counts[i] / (double)total); in branch_type_stat_display() 180 total += st->counts[i]; in branch_type_str() 198 if (st->counts[i] > 0) in branch_type_str()
|
| /linux/tools/lib/perf/tests/ |
| H A D | test-evsel.c | 40 struct perf_counts_values counts = { .val = 0 }; in test_stat_cpu() local 42 perf_evsel__read(evsel, idx, 0, &counts); in test_stat_cpu() 43 __T("failed to read value for evsel", counts.val != 0); in test_stat_cpu() 55 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread() local 75 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread() 76 __T("failed to read value for evsel", counts.val != 0); in test_stat_thread() 87 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread_enable() local 108 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() 109 __T("failed to read value for evsel", counts.val == 0); in test_stat_thread_enable() 114 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() [all …]
|
| H A D | test-evlist.c | 78 struct perf_counts_values counts = { .val = 0 }; in test_stat_cpu() local 80 perf_evsel__read(evsel, idx, 0, &counts); in test_stat_cpu() 81 __T("failed to read value for evsel", counts.val != 0); in test_stat_cpu() 94 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread() local 136 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread() 137 __T("failed to read value for evsel", counts.val != 0); in test_stat_thread() 149 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread_enable() local 193 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() 194 __T("failed to read value for evsel", counts.val == 0); in test_stat_thread_enable() 200 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() [all …]
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | sock_iter_batch.c | 25 static int insert(__u64 cookie, struct sock_count counts[], int counts_len) in insert() argument 31 if (!counts[i].cookie) { in insert() 33 } else if (counts[i].cookie == cookie) { in insert() 41 counts[insert].cookie = cookie; in insert() 42 counts[insert].count++; in insert() 44 return counts[insert].count; in insert() 47 static int read_n(int iter_fd, int n, struct sock_count counts[], in read_n() argument 58 ASSERT_GE(insert(out.cookie, counts, counts_len), 0, "insert"); in read_n() 77 static bool was_seen(int fd, struct sock_count counts[], int counts_len) in was_seen() argument 83 if (cookie == counts[i].cookie) in was_seen() [all …]
|
| /linux/arch/x86/kernel/cpu/resctrl/ |
| H A D | pseudo_lock.c | 310 struct residency_counts *counts) in measure_residency_fn() argument 418 counts->miss_before = miss_before; in measure_residency_fn() 419 counts->hits_before = hits_before; in measure_residency_fn() 420 counts->miss_after = miss_after; in measure_residency_fn() 421 counts->hits_after = hits_after; in measure_residency_fn() 428 struct residency_counts counts = {0}; in resctrl_arch_measure_l2_residency() local 450 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in resctrl_arch_measure_l2_residency() 455 trace_pseudo_lock_l2(counts.hits_after - counts.hits_before, in resctrl_arch_measure_l2_residency() 456 counts.miss_after - counts.miss_before); in resctrl_arch_measure_l2_residency() 466 struct residency_counts counts = {0}; in resctrl_arch_measure_l3_residency() local [all …]
|
| /linux/Documentation/admin-guide/perf/ |
| H A D | fujitsu_uncore_pmu.rst | 32 This event counts MAC cycles at MAC frequency. 34 This event counts the number of read requests to MAC. 36 This event counts the number of read requests including retry to MAC. 38 This event counts the number of responses to read requests to MAC. 40 This event counts the number of read requests including retry with PFTGT 43 This event counts the number of read requests including retry without PFTGT 46 This event counts the number of responses to read requests which hit the 49 This event counts the number of responses to read requests which miss the 52 This event counts outstanding read requests issued by DDR memory controller 55 This event counts the number of write requests to MAC (including zero write, [all …]
|
| /linux/arch/powerpc/platforms/pseries/ |
| H A D | msi.c | 225 struct msi_counts *counts = data; in count_non_bridge_devices() local 235 counts->num_devices++; in count_non_bridge_devices() 242 struct msi_counts *counts = data; in count_spare_msis() local 246 if (dn == counts->requestor) in count_spare_msis() 247 req = counts->request; in count_spare_msis() 261 if (req < counts->quota) in count_spare_msis() 262 counts->spare += counts->quota - req; in count_spare_msis() 263 else if (req > counts->quota) in count_spare_msis() 264 counts->over_quota++; in count_spare_msis() 272 struct msi_counts counts; in msi_quota_for_device() local [all …]
|
| /linux/samples/bpf/ |
| H A D | sampleip_user.c | 82 struct ipcount counts[MAX_IPS]; variable 102 counts[i].ip = next_key; in print_ip_map() 103 counts[i++].count = value; in print_ip_map() 109 qsort(counts, max, sizeof(struct ipcount), count_cmp); in print_ip_map() 111 if (counts[i].ip > _text_addr) { in print_ip_map() 112 sym = ksym_search(counts[i].ip); in print_ip_map() 118 printf("0x%-17llx %-32s %u\n", counts[i].ip, sym->name, in print_ip_map() 119 counts[i].count); in print_ip_map() 121 printf("0x%-17llx %-32s %u\n", counts[i].ip, "(user)", in print_ip_map() 122 counts[i].count); in print_ip_map()
|
| /linux/drivers/media/v4l2-core/ |
| H A D | v4l2-vp9.c | 1674 const struct v4l2_vp9_frame_symbol_counts *counts, in _adapt_coeff() argument 1683 *counts->eob[i][j][k][l][m][1], in _adapt_coeff() 1684 *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1], in _adapt_coeff() 1687 adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf); in _adapt_coeff() 1694 const struct v4l2_vp9_frame_symbol_counts *counts, in _adapt_coef_probs() argument 1702 _adapt_coeff(i, j, k, probs, counts, uf); in _adapt_coef_probs() 1706 struct v4l2_vp9_frame_symbol_counts *counts, in v4l2_vp9_adapt_coef_probs() argument 1711 _adapt_coef_probs(probs, counts, 112); in v4l2_vp9_adapt_coef_probs() 1714 _adapt_coef_probs(probs, counts, 128); in v4l2_vp9_adapt_coef_probs() 1716 _adapt_coef_probs(probs, counts, 112); in v4l2_vp9_adapt_coef_probs() [all …]
|
| /linux/drivers/md/ |
| H A D | md-bitmap.c | 165 } counts; member 1154 unsigned long chunk = block >> bitmap->counts.chunkshift; in md_bitmap_file_set_bit() 1185 unsigned long chunk = block >> bitmap->counts.chunkshift; in md_bitmap_file_clear_bit() 1215 unsigned long chunk = block >> bitmap->counts.chunkshift; in md_bitmap_file_test_bit() 1326 unsigned long chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk() 1340 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) in md_bitmap_init_from_disk() 1343 (sector_t)i << bitmap->counts.chunkshift, in md_bitmap_init_from_disk() 1423 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift in md_bitmap_init_from_disk() 1426 (sector_t)i << bitmap->counts.chunkshift, in md_bitmap_init_from_disk() 1513 struct bitmap_counts *counts; in bitmap_daemon_work() local [all …]
|
| /linux/drivers/hv/ |
| H A D | hv_proc.c | 22 int *counts; in hv_call_deposit_pages() local 43 counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL); in hv_call_deposit_pages() 44 if (!counts) { in hv_call_deposit_pages() 69 counts[i] = 1 << order; in hv_call_deposit_pages() 70 num_pages -= counts[i]; in hv_call_deposit_pages() 84 for (j = 0; j < counts[i]; ++j, ++page_count) in hv_call_deposit_pages() 102 for (j = 0; j < counts[i]; ++j) in hv_call_deposit_pages() 108 kfree(counts); in hv_call_deposit_pages()
|
| /linux/drivers/scsi/ |
| H A D | ch.c | 117 u_int counts[CH_TYPES]; member 235 ch->counts[i]) in ch_elem_to_typecode() 323 ch->counts[CHET_MT] = in ch_readconfig() 327 ch->counts[CHET_ST] = in ch_readconfig() 331 ch->counts[CHET_IE] = in ch_readconfig() 335 ch->counts[CHET_DT] = in ch_readconfig() 339 ch->counts[CHET_MT]); in ch_readconfig() 342 ch->counts[CHET_ST]); in ch_readconfig() 345 ch->counts[CHET_IE]); in ch_readconfig() 348 ch->counts[CHET_DT]); in ch_readconfig() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | test_btf_nokv.c | 21 struct ipv_counts *counts; in test_long_fname_2() local 24 counts = bpf_map_lookup_elem(&btf_map, &key); in test_long_fname_2() 25 if (!counts) in test_long_fname_2() 28 counts->v6++; in test_long_fname_2()
|
| H A D | test_btf_newkv.c | 22 struct ipv_counts *counts; in test_long_fname_2() local 25 counts = bpf_map_lookup_elem(&btf_map, &key); in test_long_fname_2() 26 if (!counts) in test_long_fname_2() 29 counts->v6++; in test_long_fname_2()
|
| /linux/tools/lib/perf/Documentation/examples/ |
| H A D | counting.c | 24 struct perf_counts_values counts; in main() local 73 perf_evsel__read(evsel, 0, 0, &counts); in main() 75 counts.val, counts.ena, counts.run); in main()
|
| /linux/tools/testing/kunit/ |
| H A D | kunit_parser.py | 46 self.counts = TestCounts() 59 self.counts.errors += 1 102 def add_subtest_counts(self, counts: TestCounts) -> None: 112 self.passed += counts.passed 113 self.failed += counts.failed 114 self.crashed += counts.crashed 115 self.skipped += counts.skipped 116 self.errors += counts.errors 663 if test.ok_status() or test.counts.total() < 100: 683 counts = test.counts [all …]
|
| /linux/drivers/gpu/drm/amd/display/dc/link/accessories/ |
| H A D | link_dp_trace.c | 64 link->dp_trace.detect_lt_trace.counts.fail = fail_count; in dp_trace_lt_fail_count_update() 66 link->dp_trace.commit_lt_trace.counts.fail = fail_count; in dp_trace_lt_fail_count_update() 73 link->dp_trace.detect_lt_trace.counts.total++; in dp_trace_lt_total_count_increment() 75 link->dp_trace.commit_lt_trace.counts.total++; in dp_trace_lt_total_count_increment() 137 return &link->dp_trace.detect_lt_trace.counts; in dp_trace_get_lt_counts() 139 return &link->dp_trace.commit_lt_trace.counts; in dp_trace_get_lt_counts()
|
| /linux/drivers/media/platform/mediatek/vcodec/decoder/vdec/ |
| H A D | vdec_vp9_req_lat_if.c | 363 struct vdec_vp9_slice_mem counts; member 475 struct mtk_vcodec_mem counts; member 620 if (!instance->counts.va) { in vdec_vp9_slice_alloc_working_buffer() 621 instance->counts.size = VP9_COUNTS_BUF_SIZE; in vdec_vp9_slice_alloc_working_buffer() 622 if (mtk_vcodec_mem_alloc(ctx, &instance->counts)) in vdec_vp9_slice_alloc_working_buffer() 651 if (instance->counts.va) in vdec_vp9_slice_free_working_buffer() 652 mtk_vcodec_mem_free(ctx, &instance->counts); in vdec_vp9_slice_free_working_buffer() 1001 vsi->counts.dma_addr = instance->counts.dma_addr; in vdec_vp9_slice_setup_lat_buffer() 1002 vsi->counts.size = instance->counts.size; in vdec_vp9_slice_setup_lat_buffer() 1194 struct vdec_vp9_slice_frame_counts *counts, in vdec_vp9_slice_map_counts_eob_coef() argument [all …]
|
| /linux/fs/xfs/scrub/ |
| H A D | quotacheck.c | 130 struct xfarray *counts, in xqcheck_update_incore_counts() argument 139 error = xfarray_load_sparse(counts, id, &xcdq); in xqcheck_update_incore_counts() 148 error = xfarray_store(counts, id, &xcdq); in xqcheck_update_incore_counts() 322 struct xfarray *counts; in xqcheck_apply_live_dqtrx() local 330 counts = xqc->ucounts; in xqcheck_apply_live_dqtrx() 333 counts = xqc->gcounts; in xqcheck_apply_live_dqtrx() 336 counts = xqc->pcounts; in xqcheck_apply_live_dqtrx() 342 if (xchk_iscan_aborted(&xqc->iscan) || counts == NULL) in xqcheck_apply_live_dqtrx() 360 error = xqcheck_update_incore_counts(xqc, counts, p->q_id, in xqcheck_apply_live_dqtrx() 558 struct xfarray *counts = xqcheck_counters_for(xqc, dqtype); in xqcheck_compare_dquot() local [all …]
|