1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2025 Arm Ltd. */
3
4 #ifndef __ASM__MPAM_H
5 #define __ASM__MPAM_H
6
7 #include <linux/arm_mpam.h>
8 #include <linux/bitfield.h>
9 #include <linux/jump_label.h>
10 #include <linux/percpu.h>
11 #include <linux/sched.h>
12
13 #include <asm/sysreg.h>
14
15 DECLARE_STATIC_KEY_FALSE(mpam_enabled);
16 DECLARE_PER_CPU(u64, arm64_mpam_default);
17 DECLARE_PER_CPU(u64, arm64_mpam_current);
18
19 /*
20 * The value of the MPAM0_EL1 sysreg when a task is in resctrl's default group.
21 * This is used by the context switch code to use the resctrl CPU property
22 * instead. The value is modified when CDP is enabled/disabled by mounting
23 * the resctrl filesystem.
24 */
25 extern u64 arm64_mpam_global_default;
26
27 #ifdef CONFIG_ARM64_MPAM
__mpam_regval(u16 partid_d,u16 partid_i,u8 pmg_d,u8 pmg_i)28 static inline u64 __mpam_regval(u16 partid_d, u16 partid_i, u8 pmg_d, u8 pmg_i)
29 {
30 return FIELD_PREP(MPAM0_EL1_PARTID_D, partid_d) |
31 FIELD_PREP(MPAM0_EL1_PARTID_I, partid_i) |
32 FIELD_PREP(MPAM0_EL1_PMG_D, pmg_d) |
33 FIELD_PREP(MPAM0_EL1_PMG_I, pmg_i);
34 }
35
mpam_set_cpu_defaults(int cpu,u16 partid_d,u16 partid_i,u8 pmg_d,u8 pmg_i)36 static inline void mpam_set_cpu_defaults(int cpu, u16 partid_d, u16 partid_i,
37 u8 pmg_d, u8 pmg_i)
38 {
39 u64 default_val = __mpam_regval(partid_d, partid_i, pmg_d, pmg_i);
40
41 WRITE_ONCE(per_cpu(arm64_mpam_default, cpu), default_val);
42 }
43
44 /*
45 * The resctrl filesystem writes to the partid/pmg values for threads and CPUs,
46 * which may race with reads in mpam_thread_switch(). Ensure only one of the old
47 * or new values are used. Particular care should be taken with the pmg field as
48 * mpam_thread_switch() may read a partid and pmg that don't match, causing this
49 * value to be stored with cache allocations, despite being considered 'free' by
50 * resctrl.
51 */
mpam_get_regval(struct task_struct * tsk)52 static inline u64 mpam_get_regval(struct task_struct *tsk)
53 {
54 return READ_ONCE(task_thread_info(tsk)->mpam_partid_pmg);
55 }
56
mpam_set_task_partid_pmg(struct task_struct * tsk,u16 partid_d,u16 partid_i,u8 pmg_d,u8 pmg_i)57 static inline void mpam_set_task_partid_pmg(struct task_struct *tsk,
58 u16 partid_d, u16 partid_i,
59 u8 pmg_d, u8 pmg_i)
60 {
61 u64 regval = __mpam_regval(partid_d, partid_i, pmg_d, pmg_i);
62
63 WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval);
64 }
65
mpam_thread_switch(struct task_struct * tsk)66 static inline void mpam_thread_switch(struct task_struct *tsk)
67 {
68 u64 oldregval;
69 int cpu = smp_processor_id();
70 u64 regval = mpam_get_regval(tsk);
71
72 if (!static_branch_likely(&mpam_enabled))
73 return;
74
75 if (regval == READ_ONCE(arm64_mpam_global_default))
76 regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu));
77
78 oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu));
79 if (oldregval == regval)
80 return;
81
82 write_sysreg_s(regval | MPAM1_EL1_MPAMEN, SYS_MPAM1_EL1);
83 if (system_supports_sme())
84 write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D), SYS_MPAMSM_EL1);
85 isb();
86
87 /* Synchronising the EL0 write is left until the ERET to EL0 */
88 write_sysreg_s(regval, SYS_MPAM0_EL1);
89
90 WRITE_ONCE(per_cpu(arm64_mpam_current, cpu), regval);
91 }
92 #else
mpam_thread_switch(struct task_struct * tsk)93 static inline void mpam_thread_switch(struct task_struct *tsk) {}
94 #endif /* CONFIG_ARM64_MPAM */
95
96 #endif /* __ASM__MPAM_H */
97