Lines Matching full:r
13 * More information about RDT be found in the Intel (R) x86 Architecture
53 struct rdt_resource *r);
55 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
58 struct rdt_resource *r);
123 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
124 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
125 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
126 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
127 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
128 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
138 struct rdt_resource *r = &hw_res->r_resctrl; in cache_alloc_hsw_probe() local
151 r->default_ctrl = max_cbm; in cache_alloc_hsw_probe()
152 r->cache.cbm_len = 20; in cache_alloc_hsw_probe()
153 r->cache.shareable_bits = 0xc0000; in cache_alloc_hsw_probe()
154 r->cache.min_cbm_bits = 2; in cache_alloc_hsw_probe()
155 r->cache.arch_has_sparse_bitmasks = false; in cache_alloc_hsw_probe()
156 r->alloc_capable = true; in cache_alloc_hsw_probe()
161 bool is_mba_sc(struct rdt_resource *r) in is_mba_sc() argument
163 if (!r) in is_mba_sc()
170 if (r->rid != RDT_RESOURCE_MBA) in is_mba_sc()
173 return r->membw.mba_sc; in is_mba_sc()
186 static inline bool rdt_get_mb_table(struct rdt_resource *r) in rdt_get_mb_table() argument
197 static bool __get_mem_config_intel(struct rdt_resource *r) in __get_mem_config_intel() argument
199 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in __get_mem_config_intel()
207 r->default_ctrl = MAX_MBA_BW; in __get_mem_config_intel()
208 r->membw.arch_needs_linear = true; in __get_mem_config_intel()
210 r->membw.delay_linear = true; in __get_mem_config_intel()
211 r->membw.min_bw = MAX_MBA_BW - max_delay; in __get_mem_config_intel()
212 r->membw.bw_gran = MAX_MBA_BW - max_delay; in __get_mem_config_intel()
214 if (!rdt_get_mb_table(r)) in __get_mem_config_intel()
216 r->membw.arch_needs_linear = false; in __get_mem_config_intel()
218 r->data_width = 3; in __get_mem_config_intel()
221 r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; in __get_mem_config_intel()
223 r->membw.throttle_mode = THREAD_THROTTLE_MAX; in __get_mem_config_intel()
226 r->alloc_capable = true; in __get_mem_config_intel()
231 static bool __rdt_get_mem_config_amd(struct rdt_resource *r) in __rdt_get_mem_config_amd() argument
233 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in __rdt_get_mem_config_amd()
242 subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1; in __rdt_get_mem_config_amd()
246 r->default_ctrl = MAX_MBA_BW_AMD; in __rdt_get_mem_config_amd()
249 r->membw.delay_linear = false; in __rdt_get_mem_config_amd()
250 r->membw.arch_needs_linear = false; in __rdt_get_mem_config_amd()
256 r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; in __rdt_get_mem_config_amd()
257 r->membw.min_bw = 0; in __rdt_get_mem_config_amd()
258 r->membw.bw_gran = 1; in __rdt_get_mem_config_amd()
260 r->data_width = 4; in __rdt_get_mem_config_amd()
262 r->alloc_capable = true; in __rdt_get_mem_config_amd()
267 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) in rdt_get_cache_alloc_cfg() argument
269 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in rdt_get_cache_alloc_cfg()
277 r->cache.cbm_len = eax.split.cbm_len + 1; in rdt_get_cache_alloc_cfg()
278 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; in rdt_get_cache_alloc_cfg()
279 r->cache.shareable_bits = ebx & r->default_ctrl; in rdt_get_cache_alloc_cfg()
280 r->data_width = (r->cache.cbm_len + 3) / 4; in rdt_get_cache_alloc_cfg()
282 r->cache.arch_has_sparse_bitmasks = ecx.split.noncont; in rdt_get_cache_alloc_cfg()
283 r->alloc_capable = true; in rdt_get_cache_alloc_cfg()
307 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) in mba_wrmsr_amd() argument
311 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in mba_wrmsr_amd()
322 static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) in delay_bw_map() argument
324 if (r->membw.delay_linear) in delay_bw_map()
328 return r->default_ctrl; in delay_bw_map()
333 struct rdt_resource *r) in mba_wrmsr_intel() argument
337 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in mba_wrmsr_intel()
341 wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r)); in mba_wrmsr_intel()
345 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) in cat_wrmsr() argument
349 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in cat_wrmsr()
355 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) in get_domain_from_cpu() argument
359 list_for_each_entry(d, &r->domains, list) { in get_domain_from_cpu()
368 u32 resctrl_arch_get_num_closid(struct rdt_resource *r) in resctrl_arch_get_num_closid() argument
370 return resctrl_to_arch_res(r)->num_closid; in resctrl_arch_get_num_closid()
377 struct rdt_resource *r = m->res; in rdt_ctrl_update() local
381 d = get_domain_from_cpu(cpu, r); in rdt_ctrl_update()
383 hw_res->msr_update(d, m, r); in rdt_ctrl_update()
387 cpu, r->name); in rdt_ctrl_update()
393 * Search resource r's domain list to find the resource id. If the resource
398 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, in rdt_find_domain() argument
407 list_for_each(l, &r->domains) { in rdt_find_domain()
423 static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) in setup_default_ctrlval() argument
425 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in setup_default_ctrlval()
434 *dc = r->default_ctrl; in setup_default_ctrlval()
445 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) in domain_setup_ctrlval() argument
447 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); in domain_setup_ctrlval()
458 setup_default_ctrlval(r, dc); in domain_setup_ctrlval()
462 hw_res->msr_update(d, &m, r); in domain_setup_ctrlval()
497 * If an existing domain in the resource r's domain list matches the cpu's
507 static void domain_add_cpu(int cpu, struct rdt_resource *r) in domain_add_cpu() argument
509 int id = get_cpu_cacheinfo_id(cpu, r->cache_level); in domain_add_cpu()
515 d = rdt_find_domain(r, id, &add_pos); in domain_add_cpu()
523 if (r->cache.arch_has_per_cpu_cfg) in domain_add_cpu()
524 rdt_domain_reconfigure_cdp(r); in domain_add_cpu()
536 rdt_domain_reconfigure_cdp(r); in domain_add_cpu()
538 if (r->alloc_capable && domain_setup_ctrlval(r, d)) { in domain_add_cpu()
543 if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) { in domain_add_cpu()
550 err = resctrl_online_domain(r, d); in domain_add_cpu()
557 static void domain_remove_cpu(int cpu, struct rdt_resource *r) in domain_remove_cpu() argument
559 int id = get_cpu_cacheinfo_id(cpu, r->cache_level); in domain_remove_cpu()
563 d = rdt_find_domain(r, id, NULL); in domain_remove_cpu()
572 resctrl_offline_domain(r, d); in domain_remove_cpu()
586 if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { in domain_remove_cpu()
592 has_busy_rmid(r, d)) { in domain_remove_cpu()
612 struct rdt_resource *r; in resctrl_online_cpu() local
615 for_each_capable_rdt_resource(r) in resctrl_online_cpu()
616 domain_add_cpu(cpu, r); in resctrl_online_cpu()
625 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) in clear_childcpus() argument
629 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { in clear_childcpus()
639 struct rdt_resource *r; in resctrl_offline_cpu() local
642 for_each_capable_rdt_resource(r) in resctrl_offline_cpu()
643 domain_remove_cpu(cpu, r); in resctrl_offline_cpu()
662 struct rdt_resource *r; in rdt_init_padding() local
664 for_each_alloc_capable_rdt_resource(r) { in rdt_init_padding()
665 if (r->data_width > max_data_width) in rdt_init_padding()
666 max_data_width = r->data_width; in rdt_init_padding()
785 struct rdt_resource *r; in get_rdt_alloc_resources() local
795 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; in get_rdt_alloc_resources()
796 rdt_get_cache_alloc_cfg(1, r); in get_rdt_alloc_resources()
803 r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl; in get_rdt_alloc_resources()
804 rdt_get_cache_alloc_cfg(2, r); in get_rdt_alloc_resources()
821 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; in get_rdt_mon_resources() local
833 return !rdt_get_mon_l3_config(r); in get_rdt_mon_resources()
872 struct rdt_resource *r; in rdt_init_res_defs_intel() local
874 for_each_rdt_resource(r) { in rdt_init_res_defs_intel()
875 hw_res = resctrl_to_arch_res(r); in rdt_init_res_defs_intel()
877 if (r->rid == RDT_RESOURCE_L3 || in rdt_init_res_defs_intel()
878 r->rid == RDT_RESOURCE_L2) { in rdt_init_res_defs_intel()
879 r->cache.arch_has_per_cpu_cfg = false; in rdt_init_res_defs_intel()
880 r->cache.min_cbm_bits = 1; in rdt_init_res_defs_intel()
881 } else if (r->rid == RDT_RESOURCE_MBA) { in rdt_init_res_defs_intel()
891 struct rdt_resource *r; in rdt_init_res_defs_amd() local
893 for_each_rdt_resource(r) { in rdt_init_res_defs_amd()
894 hw_res = resctrl_to_arch_res(r); in rdt_init_res_defs_amd()
896 if (r->rid == RDT_RESOURCE_L3 || in rdt_init_res_defs_amd()
897 r->rid == RDT_RESOURCE_L2) { in rdt_init_res_defs_amd()
898 r->cache.arch_has_sparse_bitmasks = true; in rdt_init_res_defs_amd()
899 r->cache.arch_has_per_cpu_cfg = true; in rdt_init_res_defs_amd()
900 r->cache.min_cbm_bits = 0; in rdt_init_res_defs_amd()
901 } else if (r->rid == RDT_RESOURCE_MBA) { in rdt_init_res_defs_amd()
904 } else if (r->rid == RDT_RESOURCE_SMBA) { in rdt_init_res_defs_amd()
953 struct rdt_resource *r; in resctrl_late_init() local
982 for_each_alloc_capable_rdt_resource(r) in resctrl_late_init()
983 pr_info("%s allocation detected\n", r->name); in resctrl_late_init()
985 for_each_mon_capable_rdt_resource(r) in resctrl_late_init()
986 pr_info("%s monitoring detected\n", r->name); in resctrl_late_init()