/linux-6.15/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_crat.c | 57 .cache_size = 16, 67 .cache_size = 16, 77 .cache_size = 8, 93 .cache_size = 16, 103 .cache_size = 32, 113 .cache_size = 16, 135 * newer ASICs. The unit for cache_size is KiB. 143 .cache_size = 16, 153 .cache_size = 32, 163 .cache_size = 16, [all …]
|
D | kfd_crat.h | 165 uint32_t cache_size; member 302 uint32_t cache_size; member
|
D | kfd_topology.h | 104 uint32_t cache_size; member
|
/linux-6.15/arch/m68k/include/asm/ |
D | m53xxacr.h | 56 #define CACHE_SIZE 0x2000 /* 8k of unified cache */ macro 57 #define ICACHE_SIZE CACHE_SIZE 58 #define DCACHE_SIZE CACHE_SIZE 60 #define CACHE_SIZE 0x4000 /* 16k of unified cache */ macro 61 #define ICACHE_SIZE CACHE_SIZE 62 #define DCACHE_SIZE CACHE_SIZE
|
/linux-6.15/drivers/mtd/ |
D | mtdblock.c | 30 unsigned int cache_size; member 88 mtdblk->cache_offset, mtdblk->cache_size); in write_cached_data() 91 mtdblk->cache_size, mtdblk->cache_data); in write_cached_data() 114 unsigned int sect_size = mtdblk->cache_size; in do_cached_write() 162 mtdblk->cache_size = sect_size; in do_cached_write() 184 unsigned int sect_size = mtdblk->cache_size; in do_cached_read() 241 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { in mtdblock_writesect() 273 mtdblk->cache_size = mbd->mtd->erasesize; in mtdblock_open()
|
/linux-6.15/drivers/infiniband/hw/hfi1/ |
D | user_pages.c | 13 static unsigned long cache_size = 256; variable 14 module_param(cache_size, ulong, S_IRUGO | S_IWUSR); 15 MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)"); 73 cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE; in hfi1_can_pin_pages()
|
/linux-6.15/tools/testing/selftests/resctrl/ |
D | resctrl.h | 200 int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size); 219 * @cache_size: Total cache size in bytes 225 static inline unsigned long cache_portion_size(unsigned long cache_size, in cache_portion_size() argument 233 * smaller portions. To avoid divide by zero, return cache_size. in cache_portion_size() 236 return cache_size; in cache_portion_size() 238 return cache_size * count_bits(portion_mask) / bits; in cache_portion_size()
|
D | resctrlfs.c | 257 * @cache_size: pointer to cache_size 261 int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size) in get_cache_size() argument 280 ksft_perror("Could not get cache_size"); in get_cache_size() 289 *cache_size = 0; in get_cache_size() 294 *cache_size = *cache_size * 10 + (cache_str[i] - '0'); in get_cache_size() 298 *cache_size = *cache_size * 1024; in get_cache_size() 302 *cache_size = *cache_size * 1024 * 1024; in get_cache_size() 318 *cache_size /= snc_nodes_per_l3_cache(); in get_cache_size()
|
/linux-6.15/drivers/misc/lkdtm/ |
D | usercopy.c | 20 * and making sure "cache_size" isn't optimized into a const. 23 static volatile size_t cache_size = 1024; variable 234 memset(buf, 'B', cache_size); in do_usercopy_slab_whitelist() 237 offset = (cache_size / 4) + unconst; in do_usercopy_slab_whitelist() 238 size = (cache_size / 16) + unconst; in do_usercopy_slab_whitelist() 429 kmem_cache_create_usercopy("lkdtm-usercopy", cache_size, in lkdtm_usercopy_init() 431 cache_size / 4, in lkdtm_usercopy_init() 432 cache_size / 16, in lkdtm_usercopy_init()
|
/linux-6.15/drivers/md/ |
D | dm-cache-policy-smq.c | 802 dm_cblock_t cache_size; member 1118 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period() 1138 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target() 1164 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met() 1732 __smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size, in __smq_create() argument 1744 mq->cache_size = cache_size; in __smq_create() 1747 calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size), in __smq_create() 1752 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { in __smq_create() 1770 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); in __smq_create() 1779 if (from_cblock(cache_size)) { in __smq_create() [all …]
|
D | dm-cache-policy.c | 113 dm_cblock_t cache_size, in dm_cache_policy_create() argument 126 p = type->create(cache_size, origin_size, cache_block_size); in dm_cache_policy_create()
|
D | dm-cache-target.c | 333 dm_cblock_t cache_size; member 377 * cache_size entries, dirty if set 411 * Cache_size entries. Set bits indicate blocks mapped beyond the 2333 cache->cache_size, in create_cache_policy() 2376 if (nr_blocks > (1 << 20) && cache->cache_size != size) in set_cache_size() 2382 cache->cache_size = size; in set_cache_size() 2437 dm_block_t cache_size = ca->cache_sectors; in cache_create() local 2440 cache_size = block_div(cache_size, ca->block_size); in cache_create() 2441 set_cache_size(cache, to_cblock(cache_size)); in cache_create() 2503 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create() [all …]
|
/linux-6.15/arch/sh/kernel/cpu/ |
D | proc.c | 68 unsigned int cache_size; in show_cacheinfo() local 70 cache_size = info.ways * info.sets * info.linesz; in show_cacheinfo() 73 type, cache_size >> 10, info.ways); in show_cacheinfo()
|
/linux-6.15/arch/arm/mm/ |
D | cache-uniphier.c | 319 u32 level, cache_size; in __uniphier_cache_init() local 365 if (of_property_read_u32(np, "cache-size", &cache_size) || in __uniphier_cache_init() 366 cache_size == 0 || cache_size % (data->nsets * data->line_size)) { in __uniphier_cache_init() 373 data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1, in __uniphier_cache_init()
|
/linux-6.15/arch/microblaze/kernel/cpu/ |
D | cache.c | 92 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ argument 95 if (start < UINT_MAX - cache_size) \ 96 end = min(start + cache_size, end); \ 101 * Helper macro to loop over the specified cache_size/line_length and 104 #define CACHE_ALL_LOOP(cache_size, line_length, op) \ argument 106 unsigned int len = cache_size - line_length; \
|
/linux-6.15/arch/powerpc/kernel/ |
D | cacheinfo.c | 208 static int cache_size(const struct cache *cache, unsigned int *ret) in cache_size() function 211 const __be32 *cache_size; in cache_size() local 215 cache_size = of_get_property(cache->ofnode, propname, NULL); in cache_size() 216 if (!cache_size) in cache_size() 219 *ret = of_read_number(cache_size, 1); in cache_size() 227 if (cache_size(cache, &size)) in cache_size_kb() 292 if (cache_size(cache, &size)) in cache_associativity()
|
/linux-6.15/sound/firewire/motu/ |
D | amdtp-motu.c | 307 const unsigned int cache_size = cache->size; in cache_event_offsets() local 323 cache_tail = (cache_tail + 1) % cache_size; in cache_event_offsets() 377 const unsigned int cache_size = cache->size; in write_sph() local 387 cache_head = (cache_head + 1) % cache_size; in write_sph()
|
/linux-6.15/drivers/acpi/numa/ |
D | hmat.c | 115 * @cache_size: (Output) size of extended linear cache. 121 resource_size_t *cache_size) in hmat_get_extended_linear_cache_size() argument 141 *cache_size = tcache->cache_attrs.size; in hmat_get_extended_linear_cache_size() 145 *cache_size = 0; in hmat_get_extended_linear_cache_size() 528 cache->memory_PD, cache->cache_size, attrs, in hmat_parse_cache() 541 tcache->cache_attrs.size = cache->cache_size; in hmat_parse_cache()
|
/linux-6.15/net/openvswitch/ |
D | flow_table.c | 375 new->cache_size = size; in tbl_mask_cache_alloc() 376 if (new->cache_size > 0) { in tbl_mask_cache_alloc() 378 new->cache_size), in tbl_mask_cache_alloc() 394 if (size == mc->cache_size) in ovs_flow_tbl_masks_cache_resize() 799 if (unlikely(!skb_hash || mc->cache_size == 0)) { in ovs_flow_tbl_lookup_stats() 819 int index = hash & (mc->cache_size - 1); in ovs_flow_tbl_lookup_stats() 946 return READ_ONCE(mc->cache_size); in ovs_flow_tbl_masks_cache_size()
|
D | flow_table.h | 32 u32 cache_size; /* Must be ^2 value. */ member
|
/linux-6.15/tools/perf/scripts/python/ |
D | arm-cs-trace-disasm.py | 88 cache_size = 64*1024 variable 150 if (len(disasm_cache) > cache_size): 241 global cache_size
|
/linux-6.15/drivers/block/null_blk/ |
D | main.c | 225 module_param_named(cache_size, g_cache_size, ulong, 0444); 230 MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true"); 461 NULLB_DEVICE_ATTR(cache_size, ulong, NULL); 803 dev->cache_size = g_cache_size; in null_alloc_dev() 1086 if ((nullb->dev->cache_size * 1024 * 1024) > in null_make_cache_space() 1227 nullb->dev->cache_size * 1024 * 1024); in null_handle_flush() 1905 dev->cache_size = 0; in null_validate_conf() 1906 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, in null_validate_conf() 1907 dev->cache_size); in null_validate_conf() 1988 if (dev->cache_size > 0) { in null_add_dev()
|
/linux-6.15/drivers/cxl/core/ |
D | region.c | 846 return p->res->start + p->cache_size == range->start && in region_res_match_cxl_range() 1977 if (resource_size(cxled->dpa_res) * p->interleave_ways + p->cache_size != in cxl_region_attach() 1983 (u64)p->cache_size, (u64)resource_size(p->res)); in cxl_region_attach() 2937 hpa = hpa_offset + p->res->start + p->cache_size; in cxl_dpa_to_hpa() 3239 resource_size_t cache_size, start; in cxl_extended_linear_cache_resize() local 3242 rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size); in cxl_extended_linear_cache_resize() 3246 if (!cache_size) in cxl_extended_linear_cache_resize() 3249 if (size != cache_size) { in cxl_extended_linear_cache_resize() 3252 &cache_size, &size); in cxl_extended_linear_cache_resize() 3265 start = res->start - cache_size; in cxl_extended_linear_cache_resize() [all …]
|
/linux-6.15/arch/mips/kernel/ |
D | pm-cps.c | 193 unsigned cache_size = cache->ways << cache->waybit; in cps_gen_cache_routine() local 205 if (cache_size < 0x8000) in cps_gen_cache_routine() 206 uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size); in cps_gen_cache_routine() 208 UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size)); in cps_gen_cache_routine()
|
/linux-6.15/sound/firewire/ |
D | amdtp-stream.c | 511 const unsigned int cache_size = s->ctx_data.tx.cache.size; in calculate_cached_cycle_count() local 515 cycles += cache_size; in calculate_cached_cycle_count() 524 const unsigned int cache_size = s->ctx_data.tx.cache.size; in cache_seq() local 539 cache_pos = (cache_pos + 1) % cache_size; in cache_seq() 562 const unsigned int cache_size = target->ctx_data.tx.cache.size; in pool_replayed_seq() local 568 cache_pos = (cache_pos + 1) % cache_size; in pool_replayed_seq() 589 const unsigned int cache_size = tx->ctx_data.tx.cache.size; in pool_seq_descs() local 593 if (cached_cycles > count && cached_cycles > cache_size / 2) in pool_seq_descs()
|