1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Node helper functions and common defines
4  *
5  * Copyright (c) 2024, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
9  */
10 
11 #include <linux/debugfs.h>
12 #include <asm/amd_node.h>
13 
14 /*
15  * AMD Nodes are a physical collection of I/O devices within an SoC. There can be one
16  * or more nodes per package.
17  *
18  * The nodes are software-visible through PCI config space. All nodes are enumerated
19  * on segment 0 bus 0. The device (slot) numbers range from 0x18 to 0x1F (maximum 8
20  * nodes) with 0x18 corresponding to node 0, 0x19 to node 1, etc. Each node can be a
21  * multi-function device.
22  *
23  * On legacy systems, these node devices represent integrated Northbridge functionality.
24  * On Zen-based systems, these node devices represent Data Fabric functionality.
25  *
26  * See "Configuration Space Accesses" section in BKDGs or
27  * "Processor x86 Core" -> "Configuration Space" section in PPRs.
28  */
amd_node_get_func(u16 node,u8 func)29 struct pci_dev *amd_node_get_func(u16 node, u8 func)
30 {
31 	if (node >= MAX_AMD_NUM_NODES)
32 		return NULL;
33 
34 	return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
35 }
36 
37 #define DF_BLK_INST_CNT		0x040
38 #define	DF_CFG_ADDR_CNTL_LEGACY	0x084
39 #define	DF_CFG_ADDR_CNTL_DF4	0xC04
40 
41 #define DF_MAJOR_REVISION	GENMASK(27, 24)
42 
get_cfg_addr_cntl_offset(struct pci_dev * df_f0)43 static u16 get_cfg_addr_cntl_offset(struct pci_dev *df_f0)
44 {
45 	u32 reg;
46 
47 	/*
48 	 * Revision fields added for DF4 and later.
49 	 *
50 	 * Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
51 	 */
52 	if (pci_read_config_dword(df_f0, DF_BLK_INST_CNT, &reg))
53 		return 0;
54 
55 	if (reg & DF_MAJOR_REVISION)
56 		return DF_CFG_ADDR_CNTL_DF4;
57 
58 	return DF_CFG_ADDR_CNTL_LEGACY;
59 }
60 
amd_node_get_root(u16 node)61 struct pci_dev *amd_node_get_root(u16 node)
62 {
63 	struct pci_dev *root;
64 	u16 cntl_off;
65 	u8 bus;
66 
67 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
68 		return NULL;
69 
70 	/*
71 	 * D18F0xXXX [Config Address Control] (DF::CfgAddressCntl)
72 	 * Bits [7:0] (SecBusNum) holds the bus number of the root device for
73 	 * this Data Fabric instance. The segment, device, and function will be 0.
74 	 */
75 	struct pci_dev *df_f0 __free(pci_dev_put) = amd_node_get_func(node, 0);
76 	if (!df_f0)
77 		return NULL;
78 
79 	cntl_off = get_cfg_addr_cntl_offset(df_f0);
80 	if (!cntl_off)
81 		return NULL;
82 
83 	if (pci_read_config_byte(df_f0, cntl_off, &bus))
84 		return NULL;
85 
86 	/* Grab the pointer for the actual root device instance. */
87 	root = pci_get_domain_bus_and_slot(0, bus, 0);
88 
89 	pci_dbg(root, "is root for AMD node %u\n", node);
90 	return root;
91 }
92 
93 static struct pci_dev **amd_roots;
94 
95 /* Protect the PCI config register pairs used for SMN. */
96 static DEFINE_MUTEX(smn_mutex);
97 static bool smn_exclusive;
98 
99 #define SMN_INDEX_OFFSET	0x60
100 #define SMN_DATA_OFFSET		0x64
101 
102 #define HSMP_INDEX_OFFSET	0xc4
103 #define HSMP_DATA_OFFSET	0xc8
104 
105 /*
106  * SMN accesses may fail in ways that are difficult to detect here in the called
107  * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
108  * their own checking based on what behavior they expect.
109  *
110  * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
111  * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
112  * can be checked here, and a proper error code can be returned.
113  *
114  * But the Read-as-Zero response cannot be verified here. A value of 0 may be
115  * correct in some cases, so callers must check that this correct is for the
116  * register/fields they need.
117  *
118  * For SMN writes, success can be determined through a "write and read back"
119  * However, this is not robust when done here.
120  *
121  * Possible issues:
122  *
123  * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
124  *    *not* match the write value.
125  *
126  * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
127  *    known here.
128  *
129  * 3) Bits that are "Reserved / Set to 1". Ditto above.
130  *
131  * Callers of amd_smn_write() should do the "write and read back" check
132  * themselves, if needed.
133  *
134  * For #1, they can see if their target bits got cleared.
135  *
136  * For #2 and #3, they can check if their target bits got set as intended.
137  *
138  * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
139  * the operation is considered a success, and the caller does their own
140  * checking.
141  */
__amd_smn_rw(u8 i_off,u8 d_off,u16 node,u32 address,u32 * value,bool write)142 static int __amd_smn_rw(u8 i_off, u8 d_off, u16 node, u32 address, u32 *value, bool write)
143 {
144 	struct pci_dev *root;
145 	int err = -ENODEV;
146 
147 	if (node >= amd_num_nodes())
148 		return err;
149 
150 	root = amd_roots[node];
151 	if (!root)
152 		return err;
153 
154 	if (!smn_exclusive)
155 		return err;
156 
157 	guard(mutex)(&smn_mutex);
158 
159 	err = pci_write_config_dword(root, i_off, address);
160 	if (err) {
161 		pr_warn("Error programming SMN address 0x%x.\n", address);
162 		return pcibios_err_to_errno(err);
163 	}
164 
165 	err = (write ? pci_write_config_dword(root, d_off, *value)
166 		     : pci_read_config_dword(root, d_off, value));
167 
168 	return pcibios_err_to_errno(err);
169 }
170 
amd_smn_read(u16 node,u32 address,u32 * value)171 int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
172 {
173 	int err = __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, value, false);
174 
175 	if (PCI_POSSIBLE_ERROR(*value)) {
176 		err = -ENODEV;
177 		*value = 0;
178 	}
179 
180 	return err;
181 }
182 EXPORT_SYMBOL_GPL(amd_smn_read);
183 
amd_smn_write(u16 node,u32 address,u32 value)184 int __must_check amd_smn_write(u16 node, u32 address, u32 value)
185 {
186 	return __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, &value, true);
187 }
188 EXPORT_SYMBOL_GPL(amd_smn_write);
189 
amd_smn_hsmp_rdwr(u16 node,u32 address,u32 * value,bool write)190 int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
191 {
192 	return __amd_smn_rw(HSMP_INDEX_OFFSET, HSMP_DATA_OFFSET, node, address, value, write);
193 }
194 EXPORT_SYMBOL_GPL(amd_smn_hsmp_rdwr);
195 
196 static struct dentry *debugfs_dir;
197 static u16 debug_node;
198 static u32 debug_address;
199 
smn_node_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)200 static ssize_t smn_node_write(struct file *file, const char __user *userbuf,
201 			      size_t count, loff_t *ppos)
202 {
203 	u16 node;
204 	int ret;
205 
206 	ret = kstrtou16_from_user(userbuf, count, 0, &node);
207 	if (ret)
208 		return ret;
209 
210 	if (node >= amd_num_nodes())
211 		return -ENODEV;
212 
213 	debug_node = node;
214 	return count;
215 }
216 
smn_node_show(struct seq_file * m,void * v)217 static int smn_node_show(struct seq_file *m, void *v)
218 {
219 	seq_printf(m, "0x%08x\n", debug_node);
220 	return 0;
221 }
222 
smn_address_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)223 static ssize_t smn_address_write(struct file *file, const char __user *userbuf,
224 				 size_t count, loff_t *ppos)
225 {
226 	int ret;
227 
228 	ret = kstrtouint_from_user(userbuf, count, 0, &debug_address);
229 	if (ret)
230 		return ret;
231 
232 	return count;
233 }
234 
smn_address_show(struct seq_file * m,void * v)235 static int smn_address_show(struct seq_file *m, void *v)
236 {
237 	seq_printf(m, "0x%08x\n", debug_address);
238 	return 0;
239 }
240 
smn_value_show(struct seq_file * m,void * v)241 static int smn_value_show(struct seq_file *m, void *v)
242 {
243 	u32 val;
244 	int ret;
245 
246 	ret = amd_smn_read(debug_node, debug_address, &val);
247 	if (ret)
248 		return ret;
249 
250 	seq_printf(m, "0x%08x\n", val);
251 	return 0;
252 }
253 
smn_value_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)254 static ssize_t smn_value_write(struct file *file, const char __user *userbuf,
255 			       size_t count, loff_t *ppos)
256 {
257 	u32 val;
258 	int ret;
259 
260 	ret = kstrtouint_from_user(userbuf, count, 0, &val);
261 	if (ret)
262 		return ret;
263 
264 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
265 
266 	ret = amd_smn_write(debug_node, debug_address, val);
267 	if (ret)
268 		return ret;
269 
270 	return count;
271 }
272 
273 DEFINE_SHOW_STORE_ATTRIBUTE(smn_node);
274 DEFINE_SHOW_STORE_ATTRIBUTE(smn_address);
275 DEFINE_SHOW_STORE_ATTRIBUTE(smn_value);
276 
amd_cache_roots(void)277 static int amd_cache_roots(void)
278 {
279 	u16 node, num_nodes = amd_num_nodes();
280 
281 	amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
282 	if (!amd_roots)
283 		return -ENOMEM;
284 
285 	for (node = 0; node < num_nodes; node++)
286 		amd_roots[node] = amd_node_get_root(node);
287 
288 	return 0;
289 }
290 
reserve_root_config_spaces(void)291 static int reserve_root_config_spaces(void)
292 {
293 	struct pci_dev *root = NULL;
294 	struct pci_bus *bus = NULL;
295 
296 	while ((bus = pci_find_next_bus(bus))) {
297 		/* Root device is Device 0 Function 0 on each Primary Bus. */
298 		root = pci_get_slot(bus, 0);
299 		if (!root)
300 			continue;
301 
302 		if (root->vendor != PCI_VENDOR_ID_AMD &&
303 		    root->vendor != PCI_VENDOR_ID_HYGON)
304 			continue;
305 
306 		pci_dbg(root, "Reserving PCI config space\n");
307 
308 		/*
309 		 * There are a few SMN index/data pairs and other registers
310 		 * that shouldn't be accessed by user space.
311 		 * So reserve the entire PCI config space for simplicity rather
312 		 * than covering specific registers piecemeal.
313 		 */
314 		if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
315 			pci_err(root, "Failed to reserve config space\n");
316 			return -EEXIST;
317 		}
318 	}
319 
320 	smn_exclusive = true;
321 	return 0;
322 }
323 
324 static bool enable_dfs;
325 
amd_smn_enable_dfs(char * str)326 static int __init amd_smn_enable_dfs(char *str)
327 {
328 	enable_dfs = true;
329 	return 1;
330 }
331 __setup("amd_smn_debugfs_enable", amd_smn_enable_dfs);
332 
amd_smn_init(void)333 static int __init amd_smn_init(void)
334 {
335 	int err;
336 
337 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
338 		return 0;
339 
340 	guard(mutex)(&smn_mutex);
341 
342 	if (amd_roots)
343 		return 0;
344 
345 	err = amd_cache_roots();
346 	if (err)
347 		return err;
348 
349 	err = reserve_root_config_spaces();
350 	if (err)
351 		return err;
352 
353 	if (enable_dfs) {
354 		debugfs_dir = debugfs_create_dir("amd_smn", arch_debugfs_dir);
355 
356 		debugfs_create_file("node",	0600, debugfs_dir, NULL, &smn_node_fops);
357 		debugfs_create_file("address",	0600, debugfs_dir, NULL, &smn_address_fops);
358 		debugfs_create_file("value",	0600, debugfs_dir, NULL, &smn_value_fops);
359 	}
360 
361 	return 0;
362 }
363 
364 fs_initcall(amd_smn_init);
365