1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains work-arounds for x86 and x86_64 platform bugs.
4  */
5 #include <linux/dmi.h>
6 #include <linux/pci.h>
7 #include <linux/irq.h>
8 
9 #include <asm/hpet.h>
10 #include <asm/setup.h>
11 #include <asm/mce.h>
12 
13 #include <linux/platform_data/x86/apple.h>
14 
15 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
16 
quirk_intel_irqbalance(struct pci_dev * dev)17 static void quirk_intel_irqbalance(struct pci_dev *dev)
18 {
19 	u8 config;
20 	u16 word;
21 
22 	/* BIOS may enable hardware IRQ balancing for
23 	 * E7520/E7320/E7525(revision ID 0x9 and below)
24 	 * based platforms.
25 	 * Disable SW irqbalance/affinity on those platforms.
26 	 */
27 	if (dev->revision > 0x9)
28 		return;
29 
30 	/* enable access to config space*/
31 	pci_read_config_byte(dev, 0xf4, &config);
32 	pci_write_config_byte(dev, 0xf4, config|0x2);
33 
34 	/*
35 	 * read xTPR register.  We may not have a pci_dev for device 8
36 	 * because it might be hidden until the above write.
37 	 */
38 	pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
39 
40 	if (!(word & (1 << 13))) {
41 		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
42 			"disabling irq balancing and affinity\n");
43 		noirqdebug_setup("");
44 #ifdef CONFIG_PROC_FS
45 		no_irq_affinity = 1;
46 #endif
47 	}
48 
49 	/* put back the original value for config space*/
50 	if (!(config & 0x2))
51 		pci_write_config_byte(dev, 0xf4, config);
52 }
53 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
54 			quirk_intel_irqbalance);
55 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
56 			quirk_intel_irqbalance);
57 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
58 			quirk_intel_irqbalance);
59 #endif
60 
61 #if defined(CONFIG_HPET_TIMER)
62 unsigned long force_hpet_address;
63 
64 static enum {
65 	NONE_FORCE_HPET_RESUME,
66 	OLD_ICH_FORCE_HPET_RESUME,
67 	ICH_FORCE_HPET_RESUME,
68 	VT8237_FORCE_HPET_RESUME,
69 	NVIDIA_FORCE_HPET_RESUME,
70 	ATI_FORCE_HPET_RESUME,
71 } force_hpet_resume_type;
72 
73 static void __iomem *rcba_base;
74 
ich_force_hpet_resume(void)75 static void ich_force_hpet_resume(void)
76 {
77 	u32 val;
78 
79 	if (!force_hpet_address)
80 		return;
81 
82 	BUG_ON(rcba_base == NULL);
83 
84 	/* read the Function Disable register, dword mode only */
85 	val = readl(rcba_base + 0x3404);
86 	if (!(val & 0x80)) {
87 		/* HPET disabled in HPTC. Trying to enable */
88 		writel(val | 0x80, rcba_base + 0x3404);
89 	}
90 
91 	val = readl(rcba_base + 0x3404);
92 	if (!(val & 0x80))
93 		BUG();
94 	else
95 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
96 }
97 
ich_force_enable_hpet(struct pci_dev * dev)98 static void ich_force_enable_hpet(struct pci_dev *dev)
99 {
100 	u32 val;
101 	u32 rcba;
102 	int err = 0;
103 
104 	if (hpet_address || force_hpet_address)
105 		return;
106 
107 	pci_read_config_dword(dev, 0xF0, &rcba);
108 	rcba &= 0xFFFFC000;
109 	if (rcba == 0) {
110 		dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
111 			"cannot force enable HPET\n");
112 		return;
113 	}
114 
115 	/* use bits 31:14, 16 kB aligned */
116 	rcba_base = ioremap(rcba, 0x4000);
117 	if (rcba_base == NULL) {
118 		dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
119 			"cannot force enable HPET\n");
120 		return;
121 	}
122 
123 	/* read the Function Disable register, dword mode only */
124 	val = readl(rcba_base + 0x3404);
125 
126 	if (val & 0x80) {
127 		/* HPET is enabled in HPTC. Just not reported by BIOS */
128 		val = val & 0x3;
129 		force_hpet_address = 0xFED00000 | (val << 12);
130 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
131 			"0x%lx\n", force_hpet_address);
132 		iounmap(rcba_base);
133 		return;
134 	}
135 
136 	/* HPET disabled in HPTC. Trying to enable */
137 	writel(val | 0x80, rcba_base + 0x3404);
138 
139 	val = readl(rcba_base + 0x3404);
140 	if (!(val & 0x80)) {
141 		err = 1;
142 	} else {
143 		val = val & 0x3;
144 		force_hpet_address = 0xFED00000 | (val << 12);
145 	}
146 
147 	if (err) {
148 		force_hpet_address = 0;
149 		iounmap(rcba_base);
150 		dev_printk(KERN_DEBUG, &dev->dev,
151 			"Failed to force enable HPET\n");
152 	} else {
153 		force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
154 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
155 			"0x%lx\n", force_hpet_address);
156 	}
157 }
158 
159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
160 			 ich_force_enable_hpet);
161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
162 			 ich_force_enable_hpet);
163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
164 			 ich_force_enable_hpet);
165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
166 			 ich_force_enable_hpet);
167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
168 			 ich_force_enable_hpet);
169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
170 			 ich_force_enable_hpet);
171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
172 			 ich_force_enable_hpet);
173 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
174 			 ich_force_enable_hpet);
175 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
176 			 ich_force_enable_hpet);
177 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,	/* ICH10 */
178 			 ich_force_enable_hpet);
179 
180 static struct pci_dev *cached_dev;
181 
hpet_print_force_info(void)182 static void hpet_print_force_info(void)
183 {
184 	printk(KERN_INFO "HPET not enabled in BIOS. "
185 	       "You might try hpet=force boot option\n");
186 }
187 
old_ich_force_hpet_resume(void)188 static void old_ich_force_hpet_resume(void)
189 {
190 	u32 val;
191 	u32 gen_cntl;
192 
193 	if (!force_hpet_address || !cached_dev)
194 		return;
195 
196 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
197 	gen_cntl &= (~(0x7 << 15));
198 	gen_cntl |= (0x4 << 15);
199 
200 	pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
201 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
202 	val = gen_cntl >> 15;
203 	val &= 0x7;
204 	if (val == 0x4)
205 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
206 	else
207 		BUG();
208 }
209 
old_ich_force_enable_hpet(struct pci_dev * dev)210 static void old_ich_force_enable_hpet(struct pci_dev *dev)
211 {
212 	u32 val;
213 	u32 gen_cntl;
214 
215 	if (hpet_address || force_hpet_address)
216 		return;
217 
218 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
219 	/*
220 	 * Bit 17 is HPET enable bit.
221 	 * Bit 16:15 control the HPET base address.
222 	 */
223 	val = gen_cntl >> 15;
224 	val &= 0x7;
225 	if (val & 0x4) {
226 		val &= 0x3;
227 		force_hpet_address = 0xFED00000 | (val << 12);
228 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
229 			force_hpet_address);
230 		return;
231 	}
232 
233 	/*
234 	 * HPET is disabled. Trying enabling at FED00000 and check
235 	 * whether it sticks
236 	 */
237 	gen_cntl &= (~(0x7 << 15));
238 	gen_cntl |= (0x4 << 15);
239 	pci_write_config_dword(dev, 0xD0, gen_cntl);
240 
241 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
242 
243 	val = gen_cntl >> 15;
244 	val &= 0x7;
245 	if (val & 0x4) {
246 		/* HPET is enabled in HPTC. Just not reported by BIOS */
247 		val &= 0x3;
248 		force_hpet_address = 0xFED00000 | (val << 12);
249 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
250 			"0x%lx\n", force_hpet_address);
251 		cached_dev = dev;
252 		force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
253 		return;
254 	}
255 
256 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
257 }
258 
259 /*
260  * Undocumented chipset features. Make sure that the user enforced
261  * this.
262  */
old_ich_force_enable_hpet_user(struct pci_dev * dev)263 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
264 {
265 	if (hpet_force_user)
266 		old_ich_force_enable_hpet(dev);
267 }
268 
269 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
270 			 old_ich_force_enable_hpet_user);
271 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
272 			 old_ich_force_enable_hpet_user);
273 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
274 			 old_ich_force_enable_hpet_user);
275 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
276 			 old_ich_force_enable_hpet_user);
277 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
278 			 old_ich_force_enable_hpet_user);
279 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
280 			 old_ich_force_enable_hpet);
281 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
282 			 old_ich_force_enable_hpet);
283 
284 
vt8237_force_hpet_resume(void)285 static void vt8237_force_hpet_resume(void)
286 {
287 	u32 val;
288 
289 	if (!force_hpet_address || !cached_dev)
290 		return;
291 
292 	val = 0xfed00000 | 0x80;
293 	pci_write_config_dword(cached_dev, 0x68, val);
294 
295 	pci_read_config_dword(cached_dev, 0x68, &val);
296 	if (val & 0x80)
297 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
298 	else
299 		BUG();
300 }
301 
vt8237_force_enable_hpet(struct pci_dev * dev)302 static void vt8237_force_enable_hpet(struct pci_dev *dev)
303 {
304 	u32 val;
305 
306 	if (hpet_address || force_hpet_address)
307 		return;
308 
309 	if (!hpet_force_user) {
310 		hpet_print_force_info();
311 		return;
312 	}
313 
314 	pci_read_config_dword(dev, 0x68, &val);
315 	/*
316 	 * Bit 7 is HPET enable bit.
317 	 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
318 	 */
319 	if (val & 0x80) {
320 		force_hpet_address = (val & ~0x3ff);
321 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
322 			force_hpet_address);
323 		return;
324 	}
325 
326 	/*
327 	 * HPET is disabled. Trying enabling at FED00000 and check
328 	 * whether it sticks
329 	 */
330 	val = 0xfed00000 | 0x80;
331 	pci_write_config_dword(dev, 0x68, val);
332 
333 	pci_read_config_dword(dev, 0x68, &val);
334 	if (val & 0x80) {
335 		force_hpet_address = (val & ~0x3ff);
336 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
337 			"0x%lx\n", force_hpet_address);
338 		cached_dev = dev;
339 		force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
340 		return;
341 	}
342 
343 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
344 }
345 
346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
347 			 vt8237_force_enable_hpet);
348 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
349 			 vt8237_force_enable_hpet);
350 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
351 			 vt8237_force_enable_hpet);
352 
ati_force_hpet_resume(void)353 static void ati_force_hpet_resume(void)
354 {
355 	pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
356 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
357 }
358 
ati_ixp4x0_rev(struct pci_dev * dev)359 static u32 ati_ixp4x0_rev(struct pci_dev *dev)
360 {
361 	int err = 0;
362 	u32 d = 0;
363 	u8  b = 0;
364 
365 	err = pci_read_config_byte(dev, 0xac, &b);
366 	b &= ~(1<<5);
367 	err |= pci_write_config_byte(dev, 0xac, b);
368 	err |= pci_read_config_dword(dev, 0x70, &d);
369 	d |= 1<<8;
370 	err |= pci_write_config_dword(dev, 0x70, d);
371 	err |= pci_read_config_dword(dev, 0x8, &d);
372 	d &= 0xff;
373 	dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
374 
375 	WARN_ON_ONCE(err);
376 
377 	return d;
378 }
379 
ati_force_enable_hpet(struct pci_dev * dev)380 static void ati_force_enable_hpet(struct pci_dev *dev)
381 {
382 	u32 d, val;
383 	u8  b;
384 
385 	if (hpet_address || force_hpet_address)
386 		return;
387 
388 	if (!hpet_force_user) {
389 		hpet_print_force_info();
390 		return;
391 	}
392 
393 	d = ati_ixp4x0_rev(dev);
394 	if (d  < 0x82)
395 		return;
396 
397 	/* base address */
398 	pci_write_config_dword(dev, 0x14, 0xfed00000);
399 	pci_read_config_dword(dev, 0x14, &val);
400 
401 	/* enable interrupt */
402 	outb(0x72, 0xcd6); b = inb(0xcd7);
403 	b |= 0x1;
404 	outb(0x72, 0xcd6); outb(b, 0xcd7);
405 	outb(0x72, 0xcd6); b = inb(0xcd7);
406 	if (!(b & 0x1))
407 		return;
408 	pci_read_config_dword(dev, 0x64, &d);
409 	d |= (1<<10);
410 	pci_write_config_dword(dev, 0x64, d);
411 	pci_read_config_dword(dev, 0x64, &d);
412 	if (!(d & (1<<10)))
413 		return;
414 
415 	force_hpet_address = val;
416 	force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
417 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
418 		   force_hpet_address);
419 	cached_dev = dev;
420 }
421 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
422 			 ati_force_enable_hpet);
423 
424 /*
425  * Undocumented chipset feature taken from LinuxBIOS.
426  */
nvidia_force_hpet_resume(void)427 static void nvidia_force_hpet_resume(void)
428 {
429 	pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
430 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
431 }
432 
nvidia_force_enable_hpet(struct pci_dev * dev)433 static void nvidia_force_enable_hpet(struct pci_dev *dev)
434 {
435 	u32 val;
436 
437 	if (hpet_address || force_hpet_address)
438 		return;
439 
440 	if (!hpet_force_user) {
441 		hpet_print_force_info();
442 		return;
443 	}
444 
445 	pci_write_config_dword(dev, 0x44, 0xfed00001);
446 	pci_read_config_dword(dev, 0x44, &val);
447 	force_hpet_address = val & 0xfffffffe;
448 	force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
449 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
450 		force_hpet_address);
451 	cached_dev = dev;
452 }
453 
454 /* ISA Bridges */
455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
456 			nvidia_force_enable_hpet);
457 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
458 			nvidia_force_enable_hpet);
459 
460 /* LPC bridges */
461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
462 			nvidia_force_enable_hpet);
463 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
464 			nvidia_force_enable_hpet);
465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
466 			nvidia_force_enable_hpet);
467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
468 			nvidia_force_enable_hpet);
469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
470 			nvidia_force_enable_hpet);
471 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
472 			nvidia_force_enable_hpet);
473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
474 			nvidia_force_enable_hpet);
475 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
476 			nvidia_force_enable_hpet);
477 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
478 			nvidia_force_enable_hpet);
479 
force_hpet_resume(void)480 void force_hpet_resume(void)
481 {
482 	switch (force_hpet_resume_type) {
483 	case ICH_FORCE_HPET_RESUME:
484 		ich_force_hpet_resume();
485 		return;
486 	case OLD_ICH_FORCE_HPET_RESUME:
487 		old_ich_force_hpet_resume();
488 		return;
489 	case VT8237_FORCE_HPET_RESUME:
490 		vt8237_force_hpet_resume();
491 		return;
492 	case NVIDIA_FORCE_HPET_RESUME:
493 		nvidia_force_hpet_resume();
494 		return;
495 	case ATI_FORCE_HPET_RESUME:
496 		ati_force_hpet_resume();
497 		return;
498 	default:
499 		break;
500 	}
501 }
502 
503 /*
504  * According to the datasheet e6xx systems have the HPET hardwired to
505  * 0xfed00000
506  */
e6xx_force_enable_hpet(struct pci_dev * dev)507 static void e6xx_force_enable_hpet(struct pci_dev *dev)
508 {
509 	if (hpet_address || force_hpet_address)
510 		return;
511 
512 	force_hpet_address = 0xFED00000;
513 	force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
514 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
515 		"0x%lx\n", force_hpet_address);
516 }
517 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
518 			 e6xx_force_enable_hpet);
519 
520 /*
521  * HPET MSI on some boards (ATI SB700/SB800) has side effect on
522  * floppy DMA. Disable HPET MSI on such platforms.
523  * See erratum #27 (Misinterpreted MSI Requests May Result in
524  * Corrupted LPC DMA Data) in AMD Publication #46837,
525  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
526  */
force_disable_hpet_msi(struct pci_dev * unused)527 static void force_disable_hpet_msi(struct pci_dev *unused)
528 {
529 	hpet_msi_disable = true;
530 }
531 
532 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
533 			 force_disable_hpet_msi);
534 
535 #endif
536 
537 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
538 /* Set correct numa_node information for AMD NB functions */
quirk_amd_nb_node(struct pci_dev * dev)539 static void quirk_amd_nb_node(struct pci_dev *dev)
540 {
541 	struct pci_dev *nb_ht;
542 	unsigned int devfn;
543 	u32 node;
544 	u32 val;
545 
546 	devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
547 	nb_ht = pci_get_slot(dev->bus, devfn);
548 	if (!nb_ht)
549 		return;
550 
551 	pci_read_config_dword(nb_ht, 0x60, &val);
552 	node = pcibus_to_node(dev->bus) | (val & 7);
553 	/*
554 	 * Some hardware may return an invalid node ID,
555 	 * so check it first:
556 	 */
557 	if (node_online(node))
558 		set_dev_node(&dev->dev, node);
559 	pci_dev_put(nb_ht);
560 }
561 
562 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
563 			quirk_amd_nb_node);
564 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
565 			quirk_amd_nb_node);
566 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
567 			quirk_amd_nb_node);
568 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
569 			quirk_amd_nb_node);
570 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
571 			quirk_amd_nb_node);
572 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
573 			quirk_amd_nb_node);
574 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
575 			quirk_amd_nb_node);
576 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
577 			quirk_amd_nb_node);
578 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
579 			quirk_amd_nb_node);
580 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
581 			quirk_amd_nb_node);
582 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
583 			quirk_amd_nb_node);
584 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
585 			quirk_amd_nb_node);
586 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
587 			quirk_amd_nb_node);
588 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
589 			quirk_amd_nb_node);
590 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
591 			quirk_amd_nb_node);
592 
593 #endif
594 
595 #ifdef CONFIG_PCI
596 /*
597  * Processor does not ensure DRAM scrub read/write sequence
598  * is atomic wrt accesses to CC6 save state area. Therefore
599  * if a concurrent scrub read/write access is to same address
600  * the entry may appear as if it is not written. This quirk
601  * applies to Fam16h models 00h-0Fh
602  *
603  * See "Revision Guide" for AMD F16h models 00h-0fh,
604  * document 51810 rev. 3.04, Nov 2013
605  */
amd_disable_seq_and_redirect_scrub(struct pci_dev * dev)606 static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
607 {
608 	u32 val;
609 
610 	/*
611 	 * Suggested workaround:
612 	 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
613 	 */
614 	pci_read_config_dword(dev, 0x58, &val);
615 	if (val & 0x1F) {
616 		val &= ~(0x1F);
617 		pci_write_config_dword(dev, 0x58, val);
618 	}
619 
620 	pci_read_config_dword(dev, 0x5C, &val);
621 	if (val & BIT(0)) {
622 		val &= ~BIT(0);
623 		pci_write_config_dword(dev, 0x5c, val);
624 	}
625 }
626 
627 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
628 			amd_disable_seq_and_redirect_scrub);
629 
630 /* Ivy Bridge, Haswell, Broadwell */
quirk_intel_brickland_xeon_ras_cap(struct pci_dev * pdev)631 static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
632 {
633 	u32 capid0;
634 
635 	pci_read_config_dword(pdev, 0x84, &capid0);
636 
637 	if (capid0 & 0x10)
638 		enable_copy_mc_fragile();
639 }
640 
641 /* Skylake */
quirk_intel_purley_xeon_ras_cap(struct pci_dev * pdev)642 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
643 {
644 	u32 capid0, capid5;
645 
646 	pci_read_config_dword(pdev, 0x84, &capid0);
647 	pci_read_config_dword(pdev, 0x98, &capid5);
648 
649 	/*
650 	 * CAPID0{7:6} indicate whether this is an advanced RAS SKU
651 	 * CAPID5{8:5} indicate that various NVDIMM usage modes are
652 	 * enabled, so memory machine check recovery is also enabled.
653 	 */
654 	if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
655 		enable_copy_mc_fragile();
656 
657 }
658 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
659 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
660 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
661 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
662 #endif
663 
664 bool x86_apple_machine;
665 EXPORT_SYMBOL(x86_apple_machine);
666 
early_platform_quirks(void)667 void __init early_platform_quirks(void)
668 {
669 	x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
670 			    dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
671 }
672