Lines Matching full:r

13  * More information about RDT be found in the Intel (R) x86 Architecture
53 struct rdt_resource *r);
55 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
58 struct rdt_resource *r);
177 static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid) in cbm_idx() argument
179 return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset; in cbm_idx()
188 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
189 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
190 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
191 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
192 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
193 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
202 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; in cache_alloc_hsw_probe() local
214 r->num_closid = 4; in cache_alloc_hsw_probe()
215 r->default_ctrl = max_cbm; in cache_alloc_hsw_probe()
216 r->cache.cbm_len = 20; in cache_alloc_hsw_probe()
217 r->cache.shareable_bits = 0xc0000; in cache_alloc_hsw_probe()
218 r->cache.min_cbm_bits = 2; in cache_alloc_hsw_probe()
219 r->alloc_capable = true; in cache_alloc_hsw_probe()
220 r->alloc_enabled = true; in cache_alloc_hsw_probe()
225 bool is_mba_sc(struct rdt_resource *r) in is_mba_sc() argument
227 if (!r) in is_mba_sc()
230 return r->membw.mba_sc; in is_mba_sc()
243 static inline bool rdt_get_mb_table(struct rdt_resource *r) in rdt_get_mb_table() argument
254 static bool __get_mem_config_intel(struct rdt_resource *r) in __get_mem_config_intel() argument
261 r->num_closid = edx.split.cos_max + 1; in __get_mem_config_intel()
263 r->default_ctrl = MAX_MBA_BW; in __get_mem_config_intel()
264 r->membw.arch_needs_linear = true; in __get_mem_config_intel()
266 r->membw.delay_linear = true; in __get_mem_config_intel()
267 r->membw.min_bw = MAX_MBA_BW - max_delay; in __get_mem_config_intel()
268 r->membw.bw_gran = MAX_MBA_BW - max_delay; in __get_mem_config_intel()
270 if (!rdt_get_mb_table(r)) in __get_mem_config_intel()
272 r->membw.arch_needs_linear = false; in __get_mem_config_intel()
274 r->data_width = 3; in __get_mem_config_intel()
277 r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; in __get_mem_config_intel()
279 r->membw.throttle_mode = THREAD_THROTTLE_MAX; in __get_mem_config_intel()
282 r->alloc_capable = true; in __get_mem_config_intel()
283 r->alloc_enabled = true; in __get_mem_config_intel()
288 static bool __rdt_get_mem_config_amd(struct rdt_resource *r) in __rdt_get_mem_config_amd() argument
295 r->num_closid = edx.split.cos_max + 1; in __rdt_get_mem_config_amd()
296 r->default_ctrl = MAX_MBA_BW_AMD; in __rdt_get_mem_config_amd()
299 r->membw.delay_linear = false; in __rdt_get_mem_config_amd()
300 r->membw.arch_needs_linear = false; in __rdt_get_mem_config_amd()
306 r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; in __rdt_get_mem_config_amd()
307 r->membw.min_bw = 0; in __rdt_get_mem_config_amd()
308 r->membw.bw_gran = 1; in __rdt_get_mem_config_amd()
310 r->data_width = 4; in __rdt_get_mem_config_amd()
312 r->alloc_capable = true; in __rdt_get_mem_config_amd()
313 r->alloc_enabled = true; in __rdt_get_mem_config_amd()
318 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) in rdt_get_cache_alloc_cfg() argument
325 r->num_closid = edx.split.cos_max + 1; in rdt_get_cache_alloc_cfg()
326 r->cache.cbm_len = eax.split.cbm_len + 1; in rdt_get_cache_alloc_cfg()
327 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; in rdt_get_cache_alloc_cfg()
328 r->cache.shareable_bits = ebx & r->default_ctrl; in rdt_get_cache_alloc_cfg()
329 r->data_width = (r->cache.cbm_len + 3) / 4; in rdt_get_cache_alloc_cfg()
330 r->alloc_capable = true; in rdt_get_cache_alloc_cfg()
331 r->alloc_enabled = true; in rdt_get_cache_alloc_cfg()
337 struct rdt_resource *r = &rdt_resources_all[type]; in rdt_get_cdp_config() local
339 r->num_closid = r_l->num_closid / 2; in rdt_get_cdp_config()
340 r->cache.cbm_len = r_l->cache.cbm_len; in rdt_get_cdp_config()
341 r->default_ctrl = r_l->default_ctrl; in rdt_get_cdp_config()
342 r->cache.shareable_bits = r_l->cache.shareable_bits; in rdt_get_cdp_config()
343 r->data_width = (r->cache.cbm_len + 3) / 4; in rdt_get_cdp_config()
344 r->alloc_capable = true; in rdt_get_cdp_config()
349 r->alloc_enabled = false; in rdt_get_cdp_config()
365 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) in mba_wrmsr_amd() argument
370 wrmsrl(r->msr_base + i, d->ctrl_val[i]); in mba_wrmsr_amd()
378 u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) in delay_bw_map() argument
380 if (r->membw.delay_linear) in delay_bw_map()
384 return r->default_ctrl; in delay_bw_map()
389 struct rdt_resource *r) in mba_wrmsr_intel() argument
395 wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r)); in mba_wrmsr_intel()
399 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) in cat_wrmsr() argument
404 wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]); in cat_wrmsr()
407 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) in get_domain_from_cpu() argument
411 list_for_each_entry(d, &r->domains, list) { in get_domain_from_cpu()
423 struct rdt_resource *r = m->res; in rdt_ctrl_update() local
427 d = get_domain_from_cpu(cpu, r); in rdt_ctrl_update()
429 r->msr_update(d, m, r); in rdt_ctrl_update()
433 cpu, r->name); in rdt_ctrl_update()
439 * Search resource r's domain list to find the resource id. If the resource
444 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, in rdt_find_domain() argument
453 list_for_each(l, &r->domains) { in rdt_find_domain()
469 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm) in setup_default_ctrlval() argument
479 for (i = 0; i < r->num_closid; i++, dc++, dm++) { in setup_default_ctrlval()
480 *dc = r->default_ctrl; in setup_default_ctrlval()
485 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) in domain_setup_ctrlval() argument
490 dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL); in domain_setup_ctrlval()
494 dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL); in domain_setup_ctrlval()
502 setup_default_ctrlval(r, dc, dm); in domain_setup_ctrlval()
505 m.high = r->num_closid; in domain_setup_ctrlval()
506 r->msr_update(d, &m, r); in domain_setup_ctrlval()
510 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) in domain_setup_mon_state() argument
515 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); in domain_setup_mon_state()
522 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); in domain_setup_mon_state()
530 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); in domain_setup_mon_state()
549 * If an existing domain in the resource r's domain list matches the cpu's
559 static void domain_add_cpu(int cpu, struct rdt_resource *r) in domain_add_cpu() argument
561 int id = get_cpu_cacheinfo_id(cpu, r->cache_level); in domain_add_cpu()
565 d = rdt_find_domain(r, id, &add_pos); in domain_add_cpu()
573 if (r->cache.arch_has_per_cpu_cfg) in domain_add_cpu()
574 rdt_domain_reconfigure_cdp(r); in domain_add_cpu()
585 rdt_domain_reconfigure_cdp(r); in domain_add_cpu()
587 if (r->alloc_capable && domain_setup_ctrlval(r, d)) { in domain_add_cpu()
592 if (r->mon_capable && domain_setup_mon_state(r, d)) { in domain_add_cpu()
604 mkdir_mondata_subdir_allrdtgrp(r, d); in domain_add_cpu()
607 static void domain_remove_cpu(int cpu, struct rdt_resource *r) in domain_remove_cpu() argument
609 int id = get_cpu_cacheinfo_id(cpu, r->cache_level); in domain_remove_cpu()
612 d = rdt_find_domain(r, id, NULL); in domain_remove_cpu()
625 rmdir_mondata_subdir_allrdtgrp(r, d->id); in domain_remove_cpu()
627 if (r->mon_capable && is_mbm_enabled()) in domain_remove_cpu()
629 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { in domain_remove_cpu()
658 if (r == &rdt_resources_all[RDT_RESOURCE_L3]) { in domain_remove_cpu()
664 has_busy_rmid(r, d)) { in domain_remove_cpu()
684 struct rdt_resource *r; in resctrl_online_cpu() local
687 for_each_capable_rdt_resource(r) in resctrl_online_cpu()
688 domain_add_cpu(cpu, r); in resctrl_online_cpu()
697 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) in clear_childcpus() argument
701 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { in clear_childcpus()
711 struct rdt_resource *r; in resctrl_offline_cpu() local
714 for_each_capable_rdt_resource(r) in resctrl_offline_cpu()
715 domain_remove_cpu(cpu, r); in resctrl_offline_cpu()
734 struct rdt_resource *r; in rdt_init_padding() local
737 for_each_alloc_capable_rdt_resource(r) { in rdt_init_padding()
738 cl = strlen(r->name); in rdt_init_padding()
742 if (r->data_width > max_data_width) in rdt_init_padding()
743 max_data_width = r->data_width; in rdt_init_padding()
917 struct rdt_resource *r; in rdt_init_res_defs_intel() local
919 for_each_rdt_resource(r) { in rdt_init_res_defs_intel()
920 if (r->rid == RDT_RESOURCE_L3 || in rdt_init_res_defs_intel()
921 r->rid == RDT_RESOURCE_L3DATA || in rdt_init_res_defs_intel()
922 r->rid == RDT_RESOURCE_L3CODE || in rdt_init_res_defs_intel()
923 r->rid == RDT_RESOURCE_L2 || in rdt_init_res_defs_intel()
924 r->rid == RDT_RESOURCE_L2DATA || in rdt_init_res_defs_intel()
925 r->rid == RDT_RESOURCE_L2CODE) { in rdt_init_res_defs_intel()
926 r->cache.arch_has_sparse_bitmaps = false; in rdt_init_res_defs_intel()
927 r->cache.arch_has_empty_bitmaps = false; in rdt_init_res_defs_intel()
928 r->cache.arch_has_per_cpu_cfg = false; in rdt_init_res_defs_intel()
929 } else if (r->rid == RDT_RESOURCE_MBA) { in rdt_init_res_defs_intel()
930 r->msr_base = MSR_IA32_MBA_THRTL_BASE; in rdt_init_res_defs_intel()
931 r->msr_update = mba_wrmsr_intel; in rdt_init_res_defs_intel()
938 struct rdt_resource *r; in rdt_init_res_defs_amd() local
940 for_each_rdt_resource(r) { in rdt_init_res_defs_amd()
941 if (r->rid == RDT_RESOURCE_L3 || in rdt_init_res_defs_amd()
942 r->rid == RDT_RESOURCE_L3DATA || in rdt_init_res_defs_amd()
943 r->rid == RDT_RESOURCE_L3CODE || in rdt_init_res_defs_amd()
944 r->rid == RDT_RESOURCE_L2 || in rdt_init_res_defs_amd()
945 r->rid == RDT_RESOURCE_L2DATA || in rdt_init_res_defs_amd()
946 r->rid == RDT_RESOURCE_L2CODE) { in rdt_init_res_defs_amd()
947 r->cache.arch_has_sparse_bitmaps = true; in rdt_init_res_defs_amd()
948 r->cache.arch_has_empty_bitmaps = true; in rdt_init_res_defs_amd()
949 r->cache.arch_has_per_cpu_cfg = true; in rdt_init_res_defs_amd()
950 } else if (r->rid == RDT_RESOURCE_MBA) { in rdt_init_res_defs_amd()
951 r->msr_base = MSR_IA32_MBA_BW_BASE; in rdt_init_res_defs_amd()
952 r->msr_update = mba_wrmsr_amd; in rdt_init_res_defs_amd()
999 struct rdt_resource *r; in resctrl_late_init() local
1028 for_each_alloc_capable_rdt_resource(r) in resctrl_late_init()
1029 pr_info("%s allocation detected\n", r->name); in resctrl_late_init()
1031 for_each_mon_capable_rdt_resource(r) in resctrl_late_init()
1032 pr_info("%s monitoring detected\n", r->name); in resctrl_late_init()