xref: /linux/arch/x86/kernel/amd_node.c (revision 0d7bee10beeb59b1133bf5a4749b17a4ef3bbb01)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Node helper functions and common defines
4  *
5  * Copyright (c) 2024, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
9  */
10 
11 #include <linux/debugfs.h>
12 #include <asm/amd/node.h>
13 
14 /*
15  * AMD Nodes are a physical collection of I/O devices within an SoC. There can be one
16  * or more nodes per package.
17  *
18  * The nodes are software-visible through PCI config space. All nodes are enumerated
19  * on segment 0 bus 0. The device (slot) numbers range from 0x18 to 0x1F (maximum 8
20  * nodes) with 0x18 corresponding to node 0, 0x19 to node 1, etc. Each node can be a
21  * multi-function device.
22  *
23  * On legacy systems, these node devices represent integrated Northbridge functionality.
24  * On Zen-based systems, these node devices represent Data Fabric functionality.
25  *
26  * See "Configuration Space Accesses" section in BKDGs or
27  * "Processor x86 Core" -> "Configuration Space" section in PPRs.
28  */
amd_node_get_func(u16 node,u8 func)29 struct pci_dev *amd_node_get_func(u16 node, u8 func)
30 {
31 	if (node >= MAX_AMD_NUM_NODES)
32 		return NULL;
33 
34 	return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
35 }
36 
37 static struct pci_dev **amd_roots;
38 
39 /* Protect the PCI config register pairs used for SMN. */
40 static DEFINE_MUTEX(smn_mutex);
41 static bool smn_exclusive;
42 
43 #define SMN_INDEX_OFFSET	0x60
44 #define SMN_DATA_OFFSET		0x64
45 
46 #define HSMP_INDEX_OFFSET	0xc4
47 #define HSMP_DATA_OFFSET	0xc8
48 
49 /*
50  * SMN accesses may fail in ways that are difficult to detect here in the called
51  * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
52  * their own checking based on what behavior they expect.
53  *
54  * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
55  * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
56  * can be checked here, and a proper error code can be returned.
57  *
58  * But the Read-as-Zero response cannot be verified here. A value of 0 may be
59  * correct in some cases, so callers must check that this correct is for the
60  * register/fields they need.
61  *
62  * For SMN writes, success can be determined through a "write and read back"
63  * However, this is not robust when done here.
64  *
65  * Possible issues:
66  *
67  * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
68  *    *not* match the write value.
69  *
70  * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
71  *    known here.
72  *
73  * 3) Bits that are "Reserved / Set to 1". Ditto above.
74  *
75  * Callers of amd_smn_write() should do the "write and read back" check
76  * themselves, if needed.
77  *
78  * For #1, they can see if their target bits got cleared.
79  *
80  * For #2 and #3, they can check if their target bits got set as intended.
81  *
82  * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
83  * the operation is considered a success, and the caller does their own
84  * checking.
85  */
__amd_smn_rw(u8 i_off,u8 d_off,u16 node,u32 address,u32 * value,bool write)86 static int __amd_smn_rw(u8 i_off, u8 d_off, u16 node, u32 address, u32 *value, bool write)
87 {
88 	struct pci_dev *root;
89 	int err = -ENODEV;
90 
91 	if (node >= amd_num_nodes())
92 		return err;
93 
94 	root = amd_roots[node];
95 	if (!root)
96 		return err;
97 
98 	if (!smn_exclusive)
99 		return err;
100 
101 	guard(mutex)(&smn_mutex);
102 
103 	err = pci_write_config_dword(root, i_off, address);
104 	if (err) {
105 		pr_warn("Error programming SMN address 0x%x.\n", address);
106 		return pcibios_err_to_errno(err);
107 	}
108 
109 	err = (write ? pci_write_config_dword(root, d_off, *value)
110 		     : pci_read_config_dword(root, d_off, value));
111 
112 	return pcibios_err_to_errno(err);
113 }
114 
amd_smn_read(u16 node,u32 address,u32 * value)115 int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
116 {
117 	int err = __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, value, false);
118 
119 	if (PCI_POSSIBLE_ERROR(*value)) {
120 		err = -ENODEV;
121 		*value = 0;
122 	}
123 
124 	return err;
125 }
126 EXPORT_SYMBOL_GPL(amd_smn_read);
127 
amd_smn_write(u16 node,u32 address,u32 value)128 int __must_check amd_smn_write(u16 node, u32 address, u32 value)
129 {
130 	return __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, &value, true);
131 }
132 EXPORT_SYMBOL_GPL(amd_smn_write);
133 
amd_smn_hsmp_rdwr(u16 node,u32 address,u32 * value,bool write)134 int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
135 {
136 	return __amd_smn_rw(HSMP_INDEX_OFFSET, HSMP_DATA_OFFSET, node, address, value, write);
137 }
138 EXPORT_SYMBOL_GPL(amd_smn_hsmp_rdwr);
139 
140 static struct dentry *debugfs_dir;
141 static u16 debug_node;
142 static u32 debug_address;
143 
smn_node_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)144 static ssize_t smn_node_write(struct file *file, const char __user *userbuf,
145 			      size_t count, loff_t *ppos)
146 {
147 	u16 node;
148 	int ret;
149 
150 	ret = kstrtou16_from_user(userbuf, count, 0, &node);
151 	if (ret)
152 		return ret;
153 
154 	if (node >= amd_num_nodes())
155 		return -ENODEV;
156 
157 	debug_node = node;
158 	return count;
159 }
160 
smn_node_show(struct seq_file * m,void * v)161 static int smn_node_show(struct seq_file *m, void *v)
162 {
163 	seq_printf(m, "0x%08x\n", debug_node);
164 	return 0;
165 }
166 
smn_address_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)167 static ssize_t smn_address_write(struct file *file, const char __user *userbuf,
168 				 size_t count, loff_t *ppos)
169 {
170 	int ret;
171 
172 	ret = kstrtouint_from_user(userbuf, count, 0, &debug_address);
173 	if (ret)
174 		return ret;
175 
176 	return count;
177 }
178 
smn_address_show(struct seq_file * m,void * v)179 static int smn_address_show(struct seq_file *m, void *v)
180 {
181 	seq_printf(m, "0x%08x\n", debug_address);
182 	return 0;
183 }
184 
smn_value_show(struct seq_file * m,void * v)185 static int smn_value_show(struct seq_file *m, void *v)
186 {
187 	u32 val;
188 	int ret;
189 
190 	ret = amd_smn_read(debug_node, debug_address, &val);
191 	if (ret)
192 		return ret;
193 
194 	seq_printf(m, "0x%08x\n", val);
195 	return 0;
196 }
197 
smn_value_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)198 static ssize_t smn_value_write(struct file *file, const char __user *userbuf,
199 			       size_t count, loff_t *ppos)
200 {
201 	u32 val;
202 	int ret;
203 
204 	ret = kstrtouint_from_user(userbuf, count, 0, &val);
205 	if (ret)
206 		return ret;
207 
208 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
209 
210 	ret = amd_smn_write(debug_node, debug_address, val);
211 	if (ret)
212 		return ret;
213 
214 	return count;
215 }
216 
217 DEFINE_SHOW_STORE_ATTRIBUTE(smn_node);
218 DEFINE_SHOW_STORE_ATTRIBUTE(smn_address);
219 DEFINE_SHOW_STORE_ATTRIBUTE(smn_value);
220 
get_next_root(struct pci_dev * root)221 static struct pci_dev *get_next_root(struct pci_dev *root)
222 {
223 	while ((root = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, root))) {
224 		/* Root device is Device 0 Function 0. */
225 		if (root->devfn)
226 			continue;
227 
228 		if (root->vendor != PCI_VENDOR_ID_AMD &&
229 		    root->vendor != PCI_VENDOR_ID_HYGON)
230 			continue;
231 
232 		break;
233 	}
234 
235 	return root;
236 }
237 
238 static bool enable_dfs;
239 
amd_smn_enable_dfs(char * str)240 static int __init amd_smn_enable_dfs(char *str)
241 {
242 	enable_dfs = true;
243 	return 1;
244 }
245 __setup("amd_smn_debugfs_enable", amd_smn_enable_dfs);
246 
amd_smn_init(void)247 static int __init amd_smn_init(void)
248 {
249 	u16 count, num_roots, roots_per_node, node, num_nodes;
250 	struct pci_dev *root;
251 
252 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
253 		return 0;
254 
255 	guard(mutex)(&smn_mutex);
256 
257 	if (amd_roots)
258 		return 0;
259 
260 	num_roots = 0;
261 	root = NULL;
262 	while ((root = get_next_root(root))) {
263 		pci_dbg(root, "Reserving PCI config space\n");
264 
265 		/*
266 		 * There are a few SMN index/data pairs and other registers
267 		 * that shouldn't be accessed by user space. So reserve the
268 		 * entire PCI config space for simplicity rather than covering
269 		 * specific registers piecemeal.
270 		 */
271 		if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
272 			pci_err(root, "Failed to reserve config space\n");
273 			return -EEXIST;
274 		}
275 
276 		num_roots++;
277 	}
278 
279 	pr_debug("Found %d AMD root devices\n", num_roots);
280 
281 	if (!num_roots)
282 		return -ENODEV;
283 
284 	num_nodes = amd_num_nodes();
285 	amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
286 	if (!amd_roots)
287 		return -ENOMEM;
288 
289 	roots_per_node = num_roots / num_nodes;
290 
291 	count = 0;
292 	node = 0;
293 	root = NULL;
294 	while (node < num_nodes && (root = get_next_root(root))) {
295 		/* Use one root for each node and skip the rest. */
296 		if (count++ % roots_per_node)
297 			continue;
298 
299 		pci_dbg(root, "is root for AMD node %u\n", node);
300 		amd_roots[node++] = root;
301 	}
302 
303 	if (enable_dfs) {
304 		debugfs_dir = debugfs_create_dir("amd_smn", arch_debugfs_dir);
305 
306 		debugfs_create_file("node",	0600, debugfs_dir, NULL, &smn_node_fops);
307 		debugfs_create_file("address",	0600, debugfs_dir, NULL, &smn_address_fops);
308 		debugfs_create_file("value",	0600, debugfs_dir, NULL, &smn_value_fops);
309 	}
310 
311 	smn_exclusive = true;
312 
313 	return 0;
314 }
315 
316 fs_initcall(amd_smn_init);
317