1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
10
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/interrupt.h>
17 #include <linux/msi.h>
18 #include <linux/irq.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/export.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cc_platform.h>
23 #include <linux/iopoll.h>
24 #include <asm/pci-direct.h>
25 #include <asm/iommu.h>
26 #include <asm/apic.h>
27 #include <asm/gart.h>
28 #include <asm/x86_init.h>
29 #include <asm/io_apic.h>
30 #include <asm/irq_remapping.h>
31 #include <asm/set_memory.h>
32 #include <asm/sev.h>
33
34 #include <linux/crash_dump.h>
35
36 #include "amd_iommu.h"
37 #include "../irq_remapping.h"
38 #include "../iommu-pages.h"
39
40 /*
41 * definitions for the ACPI scanning code
42 */
43 #define IVRS_HEADER_LENGTH 48
44
45 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46 #define ACPI_IVMD_TYPE_ALL 0x20
47 #define ACPI_IVMD_TYPE 0x21
48 #define ACPI_IVMD_TYPE_RANGE 0x22
49
50 #define IVHD_DEV_ALL 0x01
51 #define IVHD_DEV_SELECT 0x02
52 #define IVHD_DEV_SELECT_RANGE_START 0x03
53 #define IVHD_DEV_RANGE_END 0x04
54 #define IVHD_DEV_ALIAS 0x42
55 #define IVHD_DEV_ALIAS_RANGE 0x43
56 #define IVHD_DEV_EXT_SELECT 0x46
57 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
58 #define IVHD_DEV_SPECIAL 0x48
59 #define IVHD_DEV_ACPI_HID 0xf0
60
61 #define UID_NOT_PRESENT 0
62 #define UID_IS_INTEGER 1
63 #define UID_IS_CHARACTER 2
64
65 #define IVHD_SPECIAL_IOAPIC 1
66 #define IVHD_SPECIAL_HPET 2
67
68 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
70 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71 #define IVHD_FLAG_ISOC_EN_MASK 0x08
72
73 #define IVMD_FLAG_EXCL_RANGE 0x08
74 #define IVMD_FLAG_IW 0x04
75 #define IVMD_FLAG_IR 0x02
76 #define IVMD_FLAG_UNITY_MAP 0x01
77
78 #define ACPI_DEVFLAG_INITPASS 0x01
79 #define ACPI_DEVFLAG_EXTINT 0x02
80 #define ACPI_DEVFLAG_NMI 0x04
81 #define ACPI_DEVFLAG_SYSMGT1 0x10
82 #define ACPI_DEVFLAG_SYSMGT2 0x20
83 #define ACPI_DEVFLAG_LINT0 0x40
84 #define ACPI_DEVFLAG_LINT1 0x80
85 #define ACPI_DEVFLAG_ATSDIS 0x10000000
86
87 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
88 | ((dev & 0x1f) << 3) | (fn & 0x7))
89
90 /*
91 * ACPI table definitions
92 *
93 * These data structures are laid over the table to parse the important values
94 * out of it.
95 */
96
97 /*
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
99 * or more ivhd_entrys.
100 */
101 struct ivhd_header {
102 u8 type;
103 u8 flags;
104 u16 length;
105 u16 devid;
106 u16 cap_ptr;
107 u64 mmio_phys;
108 u16 pci_seg;
109 u16 info;
110 u32 efr_attr;
111
112 /* Following only valid on IVHD type 11h and 40h */
113 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
114 u64 efr_reg2;
115 } __attribute__((packed));
116
117 /*
118 * A device entry describing which devices a specific IOMMU translates and
119 * which requestor ids they use.
120 */
121 struct ivhd_entry {
122 u8 type;
123 u16 devid;
124 u8 flags;
125 struct_group(ext_hid,
126 u32 ext;
127 u32 hidh;
128 );
129 u64 cid;
130 u8 uidf;
131 u8 uidl;
132 u8 uid;
133 } __attribute__((packed));
134
135 /*
136 * An AMD IOMMU memory definition structure. It defines things like exclusion
137 * ranges for devices and regions that should be unity mapped.
138 */
139 struct ivmd_header {
140 u8 type;
141 u8 flags;
142 u16 length;
143 u16 devid;
144 u16 aux;
145 u16 pci_seg;
146 u8 resv[6];
147 u64 range_start;
148 u64 range_length;
149 } __attribute__((packed));
150
151 bool amd_iommu_dump;
152 bool amd_iommu_irq_remap __read_mostly;
153
154 enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
155 /* Host page table level */
156 u8 amd_iommu_hpt_level;
157 /* Guest page table level */
158 int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
159
160 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
161 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
162
163 static bool amd_iommu_detected;
164 static bool amd_iommu_disabled __initdata;
165 static bool amd_iommu_force_enable __initdata;
166 static bool amd_iommu_irtcachedis;
167 static int amd_iommu_target_ivhd_type;
168
169 /* Global EFR and EFR2 registers */
170 u64 amd_iommu_efr;
171 u64 amd_iommu_efr2;
172
173 /* Host (v1) page table is not supported*/
174 bool amd_iommu_hatdis;
175
176 /* SNP is enabled on the system? */
177 bool amd_iommu_snp_en;
178 EXPORT_SYMBOL(amd_iommu_snp_en);
179
180 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
181 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */
182 LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */
183
184 /* Number of IOMMUs present in the system */
185 static int amd_iommus_present;
186
187 /* IOMMUs have a non-present cache? */
188 bool amd_iommu_np_cache __read_mostly;
189 bool amd_iommu_iotlb_sup __read_mostly = true;
190
191 static bool amd_iommu_pc_present __read_mostly;
192 bool amdr_ivrs_remap_support __read_mostly;
193
194 bool amd_iommu_force_isolation __read_mostly;
195
196 unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES;
197
198 enum iommu_init_state {
199 IOMMU_START_STATE,
200 IOMMU_IVRS_DETECTED,
201 IOMMU_ACPI_FINISHED,
202 IOMMU_ENABLED,
203 IOMMU_PCI_INIT,
204 IOMMU_INTERRUPTS_EN,
205 IOMMU_INITIALIZED,
206 IOMMU_NOT_FOUND,
207 IOMMU_INIT_ERROR,
208 IOMMU_CMDLINE_DISABLED,
209 };
210
211 /* Early ioapic and hpet maps from kernel command line */
212 #define EARLY_MAP_SIZE 4
213 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
214 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
215 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
216
217 static int __initdata early_ioapic_map_size;
218 static int __initdata early_hpet_map_size;
219 static int __initdata early_acpihid_map_size;
220
221 static bool __initdata cmdline_maps;
222
223 static enum iommu_init_state init_state = IOMMU_START_STATE;
224
225 static int amd_iommu_enable_interrupts(void);
226 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
227
228 static bool amd_iommu_pre_enabled = true;
229
230 static u32 amd_iommu_ivinfo __initdata;
231
translation_pre_enabled(struct amd_iommu * iommu)232 bool translation_pre_enabled(struct amd_iommu *iommu)
233 {
234 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
235 }
236
clear_translation_pre_enabled(struct amd_iommu * iommu)237 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
238 {
239 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
240 }
241
init_translation_status(struct amd_iommu * iommu)242 static void init_translation_status(struct amd_iommu *iommu)
243 {
244 u64 ctrl;
245
246 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
247 if (ctrl & (1<<CONTROL_IOMMU_EN))
248 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
249 }
250
amd_iommu_get_num_iommus(void)251 int amd_iommu_get_num_iommus(void)
252 {
253 return amd_iommus_present;
254 }
255
amd_iommu_ht_range_ignore(void)256 bool amd_iommu_ht_range_ignore(void)
257 {
258 return check_feature2(FEATURE_HT_RANGE_IGNORE);
259 }
260
261 /*
262 * Iterate through all the IOMMUs to get common EFR
263 * masks among all IOMMUs and warn if found inconsistency.
264 */
get_global_efr(void)265 static __init void get_global_efr(void)
266 {
267 struct amd_iommu *iommu;
268
269 for_each_iommu(iommu) {
270 u64 tmp = iommu->features;
271 u64 tmp2 = iommu->features2;
272
273 if (list_is_first(&iommu->list, &amd_iommu_list)) {
274 amd_iommu_efr = tmp;
275 amd_iommu_efr2 = tmp2;
276 continue;
277 }
278
279 if (amd_iommu_efr == tmp &&
280 amd_iommu_efr2 == tmp2)
281 continue;
282
283 pr_err(FW_BUG
284 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
285 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
286 iommu->index, iommu->pci_seg->id,
287 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
288 PCI_FUNC(iommu->devid));
289
290 amd_iommu_efr &= tmp;
291 amd_iommu_efr2 &= tmp2;
292 }
293
294 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
295 }
296
297 /*
298 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
299 * Default to IVHD EFR since it is available sooner
300 * (i.e. before PCI init).
301 */
early_iommu_features_init(struct amd_iommu * iommu,struct ivhd_header * h)302 static void __init early_iommu_features_init(struct amd_iommu *iommu,
303 struct ivhd_header *h)
304 {
305 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
306 iommu->features = h->efr_reg;
307 iommu->features2 = h->efr_reg2;
308 }
309 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
310 amdr_ivrs_remap_support = true;
311 }
312
313 /* Access to l1 and l2 indexed register spaces */
314
iommu_read_l1(struct amd_iommu * iommu,u16 l1,u8 address)315 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
316 {
317 u32 val;
318
319 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
320 pci_read_config_dword(iommu->dev, 0xfc, &val);
321 return val;
322 }
323
iommu_write_l1(struct amd_iommu * iommu,u16 l1,u8 address,u32 val)324 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
325 {
326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
327 pci_write_config_dword(iommu->dev, 0xfc, val);
328 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
329 }
330
iommu_read_l2(struct amd_iommu * iommu,u8 address)331 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
332 {
333 u32 val;
334
335 pci_write_config_dword(iommu->dev, 0xf0, address);
336 pci_read_config_dword(iommu->dev, 0xf4, &val);
337 return val;
338 }
339
iommu_write_l2(struct amd_iommu * iommu,u8 address,u32 val)340 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
341 {
342 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
343 pci_write_config_dword(iommu->dev, 0xf4, val);
344 }
345
346 /****************************************************************************
347 *
348 * AMD IOMMU MMIO register space handling functions
349 *
350 * These functions are used to program the IOMMU device registers in
351 * MMIO space required for that driver.
352 *
353 ****************************************************************************/
354
355 /*
356 * This function set the exclusion range in the IOMMU. DMA accesses to the
357 * exclusion range are passed through untranslated
358 */
iommu_set_exclusion_range(struct amd_iommu * iommu)359 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
360 {
361 u64 start = iommu->exclusion_start & PAGE_MASK;
362 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
363 u64 entry;
364
365 if (!iommu->exclusion_start)
366 return;
367
368 entry = start | MMIO_EXCL_ENABLE_MASK;
369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
370 &entry, sizeof(entry));
371
372 entry = limit;
373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
374 &entry, sizeof(entry));
375 }
376
iommu_set_cwwb_range(struct amd_iommu * iommu)377 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
378 {
379 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
380 u64 entry = start & PM_ADDR_MASK;
381
382 if (!check_feature(FEATURE_SNP))
383 return;
384
385 /* Note:
386 * Re-purpose Exclusion base/limit registers for Completion wait
387 * write-back base/limit.
388 */
389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
390 &entry, sizeof(entry));
391
392 /* Note:
393 * Default to 4 Kbytes, which can be specified by setting base
394 * address equal to the limit address.
395 */
396 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
397 &entry, sizeof(entry));
398 }
399
400 /* Programs the physical address of the device table into the IOMMU hardware */
iommu_set_device_table(struct amd_iommu * iommu)401 static void iommu_set_device_table(struct amd_iommu *iommu)
402 {
403 u64 entry;
404 u32 dev_table_size = iommu->pci_seg->dev_table_size;
405 void *dev_table = (void *)get_dev_table(iommu);
406
407 BUG_ON(iommu->mmio_base == NULL);
408
409 if (is_kdump_kernel())
410 return;
411
412 entry = iommu_virt_to_phys(dev_table);
413 entry |= (dev_table_size >> 12) - 1;
414 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
415 &entry, sizeof(entry));
416 }
417
iommu_feature_set(struct amd_iommu * iommu,u64 val,u64 mask,u8 shift)418 static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift)
419 {
420 u64 ctrl;
421
422 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
423 mask <<= shift;
424 ctrl &= ~mask;
425 ctrl |= (val << shift) & mask;
426 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
427 }
428
429 /* Generic functions to enable/disable certain features of the IOMMU. */
iommu_feature_enable(struct amd_iommu * iommu,u8 bit)430 void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
431 {
432 iommu_feature_set(iommu, 1ULL, 1ULL, bit);
433 }
434
iommu_feature_disable(struct amd_iommu * iommu,u8 bit)435 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
436 {
437 iommu_feature_set(iommu, 0ULL, 1ULL, bit);
438 }
439
440 /* Function to enable the hardware */
iommu_enable(struct amd_iommu * iommu)441 static void iommu_enable(struct amd_iommu *iommu)
442 {
443 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
444 }
445
iommu_disable(struct amd_iommu * iommu)446 static void iommu_disable(struct amd_iommu *iommu)
447 {
448 if (!iommu->mmio_base)
449 return;
450
451 /* Disable command buffer */
452 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
453
454 /* Disable event logging and event interrupts */
455 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
456 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
457
458 /* Disable IOMMU GA_LOG */
459 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
460 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
461
462 /* Disable IOMMU PPR logging */
463 iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
464 iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
465
466 /* Disable IOMMU hardware itself */
467 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
468
469 /* Clear IRTE cache disabling bit */
470 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
471 }
472
473 /*
474 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
475 * the system has one.
476 */
iommu_map_mmio_space(u64 address,u64 end)477 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
478 {
479 if (!request_mem_region(address, end, "amd_iommu")) {
480 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
481 address, end);
482 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
483 return NULL;
484 }
485
486 return (u8 __iomem *)ioremap(address, end);
487 }
488
iommu_unmap_mmio_space(struct amd_iommu * iommu)489 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
490 {
491 if (iommu->mmio_base)
492 iounmap(iommu->mmio_base);
493 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
494 }
495
get_ivhd_header_size(struct ivhd_header * h)496 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
497 {
498 u32 size = 0;
499
500 switch (h->type) {
501 case 0x10:
502 size = 24;
503 break;
504 case 0x11:
505 case 0x40:
506 size = 40;
507 break;
508 }
509 return size;
510 }
511
512 /****************************************************************************
513 *
514 * The functions below belong to the first pass of AMD IOMMU ACPI table
515 * parsing. In this pass we try to find out the highest device id this
516 * code has to handle. Upon this information the size of the shared data
517 * structures is determined later.
518 *
519 ****************************************************************************/
520
521 /*
522 * This function calculates the length of a given IVHD entry
523 */
ivhd_entry_length(u8 * ivhd)524 static inline int ivhd_entry_length(u8 *ivhd)
525 {
526 u32 type = ((struct ivhd_entry *)ivhd)->type;
527
528 if (type < 0x80) {
529 return 0x04 << (*ivhd >> 6);
530 } else if (type == IVHD_DEV_ACPI_HID) {
531 /* For ACPI_HID, offset 21 is uid len */
532 return *((u8 *)ivhd + 21) + 22;
533 }
534 return 0;
535 }
536
537 /*
538 * After reading the highest device id from the IOMMU PCI capability header
539 * this function looks if there is a higher device id defined in the ACPI table
540 */
find_last_devid_from_ivhd(struct ivhd_header * h)541 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
542 {
543 u8 *p = (void *)h, *end = (void *)h;
544 struct ivhd_entry *dev;
545 int last_devid = -EINVAL;
546
547 u32 ivhd_size = get_ivhd_header_size(h);
548
549 if (!ivhd_size) {
550 pr_err("Unsupported IVHD type %#x\n", h->type);
551 return -EINVAL;
552 }
553
554 p += ivhd_size;
555 end += h->length;
556
557 while (p < end) {
558 dev = (struct ivhd_entry *)p;
559 switch (dev->type) {
560 case IVHD_DEV_ALL:
561 /* Use maximum BDF value for DEV_ALL */
562 return 0xffff;
563 case IVHD_DEV_SELECT:
564 case IVHD_DEV_RANGE_END:
565 case IVHD_DEV_ALIAS:
566 case IVHD_DEV_EXT_SELECT:
567 /* all the above subfield types refer to device ids */
568 if (dev->devid > last_devid)
569 last_devid = dev->devid;
570 break;
571 default:
572 break;
573 }
574 p += ivhd_entry_length(p);
575 }
576
577 WARN_ON(p != end);
578
579 return last_devid;
580 }
581
check_ivrs_checksum(struct acpi_table_header * table)582 static int __init check_ivrs_checksum(struct acpi_table_header *table)
583 {
584 int i;
585 u8 checksum = 0, *p = (u8 *)table;
586
587 for (i = 0; i < table->length; ++i)
588 checksum += p[i];
589 if (checksum != 0) {
590 /* ACPI table corrupt */
591 pr_err(FW_BUG "IVRS invalid checksum\n");
592 return -ENODEV;
593 }
594
595 return 0;
596 }
597
598 /*
599 * Iterate over all IVHD entries in the ACPI table and find the highest device
600 * id which we need to handle. This is the first of three functions which parse
601 * the ACPI table. So we check the checksum here.
602 */
find_last_devid_acpi(struct acpi_table_header * table,u16 pci_seg)603 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
604 {
605 u8 *p = (u8 *)table, *end = (u8 *)table;
606 struct ivhd_header *h;
607 int last_devid, last_bdf = 0;
608
609 p += IVRS_HEADER_LENGTH;
610
611 end += table->length;
612 while (p < end) {
613 h = (struct ivhd_header *)p;
614 if (h->pci_seg == pci_seg &&
615 h->type == amd_iommu_target_ivhd_type) {
616 last_devid = find_last_devid_from_ivhd(h);
617
618 if (last_devid < 0)
619 return -EINVAL;
620 if (last_devid > last_bdf)
621 last_bdf = last_devid;
622 }
623 p += h->length;
624 }
625 WARN_ON(p != end);
626
627 return last_bdf;
628 }
629
630 /****************************************************************************
631 *
632 * The following functions belong to the code path which parses the ACPI table
633 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
634 * data structures, initialize the per PCI segment device/alias/rlookup table
635 * and also basically initialize the hardware.
636 *
637 ****************************************************************************/
638
639 /* Allocate per PCI segment device table */
alloc_dev_table(struct amd_iommu_pci_seg * pci_seg)640 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
641 {
642 pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
643 pci_seg->dev_table_size);
644 if (!pci_seg->dev_table)
645 return -ENOMEM;
646
647 return 0;
648 }
649
free_dev_table(struct amd_iommu_pci_seg * pci_seg)650 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
651 {
652 if (is_kdump_kernel())
653 memunmap((void *)pci_seg->dev_table);
654 else
655 iommu_free_pages(pci_seg->dev_table);
656 pci_seg->dev_table = NULL;
657 }
658
659 /* Allocate per PCI segment IOMMU rlookup table. */
alloc_rlookup_table(struct amd_iommu_pci_seg * pci_seg)660 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
661 {
662 pci_seg->rlookup_table = kvzalloc_objs(*pci_seg->rlookup_table,
663 pci_seg->last_bdf + 1);
664 if (pci_seg->rlookup_table == NULL)
665 return -ENOMEM;
666
667 return 0;
668 }
669
free_rlookup_table(struct amd_iommu_pci_seg * pci_seg)670 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
671 {
672 kvfree(pci_seg->rlookup_table);
673 pci_seg->rlookup_table = NULL;
674 }
675
alloc_irq_lookup_table(struct amd_iommu_pci_seg * pci_seg)676 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
677 {
678 pci_seg->irq_lookup_table = kvzalloc_objs(*pci_seg->irq_lookup_table,
679 pci_seg->last_bdf + 1);
680 if (pci_seg->irq_lookup_table == NULL)
681 return -ENOMEM;
682
683 return 0;
684 }
685
free_irq_lookup_table(struct amd_iommu_pci_seg * pci_seg)686 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
687 {
688 kvfree(pci_seg->irq_lookup_table);
689 pci_seg->irq_lookup_table = NULL;
690 }
691
alloc_alias_table(struct amd_iommu_pci_seg * pci_seg)692 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
693 {
694 int i;
695
696 pci_seg->alias_table = kvmalloc_objs(*pci_seg->alias_table,
697 pci_seg->last_bdf + 1);
698 if (!pci_seg->alias_table)
699 return -ENOMEM;
700
701 /*
702 * let all alias entries point to itself
703 */
704 for (i = 0; i <= pci_seg->last_bdf; ++i)
705 pci_seg->alias_table[i] = i;
706
707 return 0;
708 }
709
free_alias_table(struct amd_iommu_pci_seg * pci_seg)710 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
711 {
712 kvfree(pci_seg->alias_table);
713 pci_seg->alias_table = NULL;
714 }
715
iommu_memremap(unsigned long paddr,size_t size)716 static inline void *iommu_memremap(unsigned long paddr, size_t size)
717 {
718 phys_addr_t phys;
719
720 if (!paddr)
721 return NULL;
722
723 /*
724 * Obtain true physical address in kdump kernel when SME is enabled.
725 * Currently, previous kernel with SME enabled and kdump kernel
726 * with SME support disabled is not supported.
727 */
728 phys = __sme_clr(paddr);
729
730 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
731 return (__force void *)ioremap_encrypted(phys, size);
732 else
733 return memremap(phys, size, MEMREMAP_WB);
734 }
735
736 /*
737 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
738 * write commands to that buffer later and the IOMMU will execute them
739 * asynchronously
740 */
alloc_command_buffer(struct amd_iommu * iommu)741 static int __init alloc_command_buffer(struct amd_iommu *iommu)
742 {
743 iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
744
745 return iommu->cmd_buf ? 0 : -ENOMEM;
746 }
747
748 /*
749 * Interrupt handler has processed all pending events and adjusted head
750 * and tail pointer. Reset overflow mask and restart logging again.
751 */
amd_iommu_restart_log(struct amd_iommu * iommu,const char * evt_type,u8 cntrl_intr,u8 cntrl_log,u32 status_run_mask,u32 status_overflow_mask)752 void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
753 u8 cntrl_intr, u8 cntrl_log,
754 u32 status_run_mask, u32 status_overflow_mask)
755 {
756 u32 status;
757
758 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
759 if (status & status_run_mask)
760 return;
761
762 pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
763
764 iommu_feature_disable(iommu, cntrl_log);
765 iommu_feature_disable(iommu, cntrl_intr);
766
767 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
768
769 iommu_feature_enable(iommu, cntrl_intr);
770 iommu_feature_enable(iommu, cntrl_log);
771 }
772
773 /*
774 * This function restarts event logging in case the IOMMU experienced
775 * an event log buffer overflow.
776 */
amd_iommu_restart_event_logging(struct amd_iommu * iommu)777 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
778 {
779 amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
780 CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
781 MMIO_STATUS_EVT_OVERFLOW_MASK);
782 }
783
784 /*
785 * This function restarts event logging in case the IOMMU experienced
786 * GA log overflow.
787 */
amd_iommu_restart_ga_log(struct amd_iommu * iommu)788 void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
789 {
790 amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
791 CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
792 MMIO_STATUS_GALOG_OVERFLOW_MASK);
793 }
794
795 /*
796 * This function resets the command buffer if the IOMMU stopped fetching
797 * commands from it.
798 */
amd_iommu_reset_cmd_buffer(struct amd_iommu * iommu)799 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
800 {
801 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
802
803 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
804 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
805 iommu->cmd_buf_head = 0;
806 iommu->cmd_buf_tail = 0;
807
808 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
809 }
810
811 /*
812 * This function writes the command buffer address to the hardware and
813 * enables it.
814 */
iommu_enable_command_buffer(struct amd_iommu * iommu)815 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
816 {
817 u64 entry;
818
819 BUG_ON(iommu->cmd_buf == NULL);
820
821 if (!is_kdump_kernel()) {
822 /*
823 * Command buffer is re-used for kdump kernel and setting
824 * of MMIO register is not required.
825 */
826 entry = iommu_virt_to_phys(iommu->cmd_buf);
827 entry |= MMIO_CMD_SIZE_512;
828 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
829 &entry, sizeof(entry));
830 }
831
832 amd_iommu_reset_cmd_buffer(iommu);
833 }
834
835 /*
836 * This function disables the command buffer
837 */
iommu_disable_command_buffer(struct amd_iommu * iommu)838 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
839 {
840 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
841 }
842
free_command_buffer(struct amd_iommu * iommu)843 static void __init free_command_buffer(struct amd_iommu *iommu)
844 {
845 iommu_free_pages(iommu->cmd_buf);
846 }
847
iommu_alloc_4k_pages(struct amd_iommu * iommu,gfp_t gfp,size_t size)848 void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
849 size_t size)
850 {
851 int nid = iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
852 void *buf;
853
854 size = PAGE_ALIGN(size);
855 buf = iommu_alloc_pages_node_sz(nid, gfp, size);
856 if (!buf)
857 return NULL;
858 if (check_feature(FEATURE_SNP) &&
859 set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
860 iommu_free_pages(buf);
861 return NULL;
862 }
863
864 return buf;
865 }
866
867 /* allocates the memory where the IOMMU will log its events to */
alloc_event_buffer(struct amd_iommu * iommu)868 static int __init alloc_event_buffer(struct amd_iommu *iommu)
869 {
870 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
871 EVT_BUFFER_SIZE);
872
873 return iommu->evt_buf ? 0 : -ENOMEM;
874 }
875
iommu_enable_event_buffer(struct amd_iommu * iommu)876 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
877 {
878 u64 entry;
879
880 BUG_ON(iommu->evt_buf == NULL);
881
882 if (!is_kdump_kernel()) {
883 /*
884 * Event buffer is re-used for kdump kernel and setting
885 * of MMIO register is not required.
886 */
887 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
888 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
889 &entry, sizeof(entry));
890 }
891
892 /* set head and tail to zero manually */
893 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
894 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
895
896 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
897 }
898
899 /*
900 * This function disables the event log buffer
901 */
iommu_disable_event_buffer(struct amd_iommu * iommu)902 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
903 {
904 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
905 }
906
free_event_buffer(struct amd_iommu * iommu)907 static void __init free_event_buffer(struct amd_iommu *iommu)
908 {
909 iommu_free_pages(iommu->evt_buf);
910 }
911
free_ga_log(struct amd_iommu * iommu)912 static void free_ga_log(struct amd_iommu *iommu)
913 {
914 #ifdef CONFIG_IRQ_REMAP
915 iommu_free_pages(iommu->ga_log);
916 iommu_free_pages(iommu->ga_log_tail);
917 #endif
918 }
919
920 #ifdef CONFIG_IRQ_REMAP
iommu_ga_log_enable(struct amd_iommu * iommu)921 static int iommu_ga_log_enable(struct amd_iommu *iommu)
922 {
923 u32 status, i;
924 u64 entry;
925
926 if (!iommu->ga_log)
927 return -EINVAL;
928
929 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
930 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
931 &entry, sizeof(entry));
932 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
933 (BIT_ULL(52)-1)) & ~7ULL;
934 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
935 &entry, sizeof(entry));
936 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
937 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
938
939
940 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
941 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
942
943 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
944 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
945 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
946 break;
947 udelay(10);
948 }
949
950 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
951 return -EINVAL;
952
953 return 0;
954 }
955
iommu_init_ga_log(struct amd_iommu * iommu)956 static int iommu_init_ga_log(struct amd_iommu *iommu)
957 {
958 int nid = iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
959
960 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
961 return 0;
962
963 iommu->ga_log = iommu_alloc_pages_node_sz(nid, GFP_KERNEL, GA_LOG_SIZE);
964 if (!iommu->ga_log)
965 goto err_out;
966
967 iommu->ga_log_tail = iommu_alloc_pages_node_sz(nid, GFP_KERNEL, 8);
968 if (!iommu->ga_log_tail)
969 goto err_out;
970
971 return 0;
972 err_out:
973 free_ga_log(iommu);
974 return -EINVAL;
975 }
976 #endif /* CONFIG_IRQ_REMAP */
977
alloc_cwwb_sem(struct amd_iommu * iommu)978 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
979 {
980 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
981 if (!iommu->cmd_sem)
982 return -ENOMEM;
983 iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
984 return 0;
985 }
986
remap_event_buffer(struct amd_iommu * iommu)987 static int __init remap_event_buffer(struct amd_iommu *iommu)
988 {
989 u64 paddr;
990
991 pr_info_once("Re-using event buffer from the previous kernel\n");
992 paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
993 iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
994
995 return iommu->evt_buf ? 0 : -ENOMEM;
996 }
997
remap_command_buffer(struct amd_iommu * iommu)998 static int __init remap_command_buffer(struct amd_iommu *iommu)
999 {
1000 u64 paddr;
1001
1002 pr_info_once("Re-using command buffer from the previous kernel\n");
1003 paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK;
1004 iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE);
1005
1006 return iommu->cmd_buf ? 0 : -ENOMEM;
1007 }
1008
remap_or_alloc_cwwb_sem(struct amd_iommu * iommu)1009 static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu)
1010 {
1011 u64 paddr;
1012
1013 if (check_feature(FEATURE_SNP)) {
1014 /*
1015 * When SNP is enabled, the exclusion base register is used for the
1016 * completion wait buffer (CWB) address. Read and re-use it.
1017 */
1018 pr_info_once("Re-using CWB buffers from the previous kernel\n");
1019 paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK;
1020 iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
1021 if (!iommu->cmd_sem)
1022 return -ENOMEM;
1023 iommu->cmd_sem_paddr = paddr;
1024 } else {
1025 return alloc_cwwb_sem(iommu);
1026 }
1027
1028 return 0;
1029 }
1030
alloc_iommu_buffers(struct amd_iommu * iommu)1031 static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
1032 {
1033 int ret;
1034
1035 /*
1036 * Reuse/Remap the previous kernel's allocated completion wait
1037 * command and event buffers for kdump boot.
1038 */
1039 if (is_kdump_kernel()) {
1040 ret = remap_or_alloc_cwwb_sem(iommu);
1041 if (ret)
1042 return ret;
1043
1044 ret = remap_command_buffer(iommu);
1045 if (ret)
1046 return ret;
1047
1048 ret = remap_event_buffer(iommu);
1049 if (ret)
1050 return ret;
1051 } else {
1052 ret = alloc_cwwb_sem(iommu);
1053 if (ret)
1054 return ret;
1055
1056 ret = alloc_command_buffer(iommu);
1057 if (ret)
1058 return ret;
1059
1060 ret = alloc_event_buffer(iommu);
1061 if (ret)
1062 return ret;
1063 }
1064
1065 return 0;
1066 }
1067
free_cwwb_sem(struct amd_iommu * iommu)1068 static void __init free_cwwb_sem(struct amd_iommu *iommu)
1069 {
1070 if (iommu->cmd_sem)
1071 iommu_free_pages((void *)iommu->cmd_sem);
1072 }
unmap_cwwb_sem(struct amd_iommu * iommu)1073 static void __init unmap_cwwb_sem(struct amd_iommu *iommu)
1074 {
1075 if (iommu->cmd_sem) {
1076 if (check_feature(FEATURE_SNP))
1077 memunmap((void *)iommu->cmd_sem);
1078 else
1079 iommu_free_pages((void *)iommu->cmd_sem);
1080 }
1081 }
1082
unmap_command_buffer(struct amd_iommu * iommu)1083 static void __init unmap_command_buffer(struct amd_iommu *iommu)
1084 {
1085 memunmap((void *)iommu->cmd_buf);
1086 }
1087
unmap_event_buffer(struct amd_iommu * iommu)1088 static void __init unmap_event_buffer(struct amd_iommu *iommu)
1089 {
1090 memunmap(iommu->evt_buf);
1091 }
1092
free_iommu_buffers(struct amd_iommu * iommu)1093 static void __init free_iommu_buffers(struct amd_iommu *iommu)
1094 {
1095 if (is_kdump_kernel()) {
1096 unmap_cwwb_sem(iommu);
1097 unmap_command_buffer(iommu);
1098 unmap_event_buffer(iommu);
1099 } else {
1100 free_cwwb_sem(iommu);
1101 free_command_buffer(iommu);
1102 free_event_buffer(iommu);
1103 }
1104 }
1105
iommu_enable_xt(struct amd_iommu * iommu)1106 static void iommu_enable_xt(struct amd_iommu *iommu)
1107 {
1108 #ifdef CONFIG_IRQ_REMAP
1109 /*
1110 * XT mode (32-bit APIC destination ID) requires
1111 * GA mode (128-bit IRTE support) as a prerequisite.
1112 */
1113 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
1114 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1115 iommu_feature_enable(iommu, CONTROL_XT_EN);
1116 #endif /* CONFIG_IRQ_REMAP */
1117 }
1118
iommu_enable_gt(struct amd_iommu * iommu)1119 static void iommu_enable_gt(struct amd_iommu *iommu)
1120 {
1121 if (!check_feature(FEATURE_GT))
1122 return;
1123
1124 iommu_feature_enable(iommu, CONTROL_GT_EN);
1125
1126 /*
1127 * This feature needs to be enabled prior to a call
1128 * to iommu_snp_enable(). Since this function is called
1129 * in early_enable_iommu(), it is safe to enable here.
1130 */
1131 if (check_feature2(FEATURE_GCR3TRPMODE))
1132 iommu_feature_enable(iommu, CONTROL_GCR3TRPMODE);
1133 }
1134
1135 /* sets a specific bit in the device table entry. */
set_dte_bit(struct dev_table_entry * dte,u8 bit)1136 static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
1137 {
1138 int i = (bit >> 6) & 0x03;
1139 int _bit = bit & 0x3f;
1140
1141 dte->data[i] |= (1UL << _bit);
1142 }
1143
__reuse_device_table(struct amd_iommu * iommu)1144 static bool __reuse_device_table(struct amd_iommu *iommu)
1145 {
1146 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1147 struct dev_table_entry *old_dev_tbl_entry;
1148 u32 lo, hi, old_devtb_size, devid;
1149 phys_addr_t old_devtb_phys;
1150 u16 dom_id;
1151 bool dte_v;
1152 u64 entry;
1153
1154 /* Each IOMMU use separate device table with the same size */
1155 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
1156 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
1157 entry = (((u64) hi) << 32) + lo;
1158
1159 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
1160 if (old_devtb_size != pci_seg->dev_table_size) {
1161 pr_err("The device table size of IOMMU:%d is not expected!\n",
1162 iommu->index);
1163 return false;
1164 }
1165
1166 /*
1167 * When SME is enabled in the first kernel, the entry includes the
1168 * memory encryption mask(sme_me_mask), we must remove the memory
1169 * encryption mask to obtain the true physical address in kdump kernel.
1170 */
1171 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
1172
1173 if (old_devtb_phys >= 0x100000000ULL) {
1174 pr_err("The address of old device table is above 4G, not trustworthy!\n");
1175 return false;
1176 }
1177
1178 /*
1179 * Re-use the previous kernel's device table for kdump.
1180 */
1181 pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size);
1182 if (pci_seg->old_dev_tbl_cpy == NULL) {
1183 pr_err("Failed to remap memory for reusing old device table!\n");
1184 return false;
1185 }
1186
1187 for (devid = 0; devid <= pci_seg->last_bdf; devid++) {
1188 old_dev_tbl_entry = &pci_seg->old_dev_tbl_cpy[devid];
1189 dte_v = FIELD_GET(DTE_FLAG_V, old_dev_tbl_entry->data[0]);
1190 dom_id = FIELD_GET(DTE_DOMID_MASK, old_dev_tbl_entry->data[1]);
1191
1192 if (!dte_v || !dom_id)
1193 continue;
1194 /*
1195 * ID reservation can fail with -ENOSPC when there
1196 * are multiple devices present in the same domain,
1197 * hence check only for -ENOMEM.
1198 */
1199 if (amd_iommu_pdom_id_reserve(dom_id, GFP_KERNEL) == -ENOMEM)
1200 return false;
1201 }
1202
1203 return true;
1204 }
1205
reuse_device_table(void)1206 static bool reuse_device_table(void)
1207 {
1208 struct amd_iommu *iommu;
1209 struct amd_iommu_pci_seg *pci_seg;
1210
1211 if (!amd_iommu_pre_enabled)
1212 return false;
1213
1214 pr_warn("Translation is already enabled - trying to reuse translation structures\n");
1215
1216 /*
1217 * All IOMMUs within PCI segment shares common device table.
1218 * Hence reuse device table only once per PCI segment.
1219 */
1220 for_each_pci_segment(pci_seg) {
1221 for_each_iommu(iommu) {
1222 if (pci_seg->id != iommu->pci_seg->id)
1223 continue;
1224 if (!__reuse_device_table(iommu))
1225 return false;
1226 break;
1227 }
1228 }
1229
1230 return true;
1231 }
1232
amd_iommu_get_ivhd_dte_flags(u16 segid,u16 devid)1233 struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid)
1234 {
1235 struct ivhd_dte_flags *e;
1236 unsigned int best_len = UINT_MAX;
1237 struct dev_table_entry *dte = NULL;
1238
1239 for_each_ivhd_dte_flags(e) {
1240 /*
1241 * Need to go through the whole list to find the smallest range,
1242 * which contains the devid.
1243 */
1244 if ((e->segid == segid) &&
1245 (e->devid_first <= devid) && (devid <= e->devid_last)) {
1246 unsigned int len = e->devid_last - e->devid_first;
1247
1248 if (len < best_len) {
1249 dte = &(e->dte);
1250 best_len = len;
1251 }
1252 }
1253 }
1254 return dte;
1255 }
1256
search_ivhd_dte_flags(u16 segid,u16 first,u16 last)1257 static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last)
1258 {
1259 struct ivhd_dte_flags *e;
1260
1261 for_each_ivhd_dte_flags(e) {
1262 if ((e->segid == segid) &&
1263 (e->devid_first == first) &&
1264 (e->devid_last == last))
1265 return true;
1266 }
1267 return false;
1268 }
1269
1270 /*
1271 * This function takes the device specific flags read from the ACPI
1272 * table and sets up the device table entry with that information
1273 */
1274 static void __init
set_dev_entry_from_acpi_range(struct amd_iommu * iommu,u16 first,u16 last,u32 flags,u32 ext_flags)1275 set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last,
1276 u32 flags, u32 ext_flags)
1277 {
1278 int i;
1279 struct dev_table_entry dte = {};
1280
1281 /* Parse IVHD DTE setting flags and store information */
1282 if (flags) {
1283 struct ivhd_dte_flags *d;
1284
1285 if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last))
1286 return;
1287
1288 d = kzalloc_obj(struct ivhd_dte_flags);
1289 if (!d)
1290 return;
1291
1292 pr_debug("%s: devid range %#x:%#x\n", __func__, first, last);
1293
1294 if (flags & ACPI_DEVFLAG_INITPASS)
1295 set_dte_bit(&dte, DEV_ENTRY_INIT_PASS);
1296 if (flags & ACPI_DEVFLAG_EXTINT)
1297 set_dte_bit(&dte, DEV_ENTRY_EINT_PASS);
1298 if (flags & ACPI_DEVFLAG_NMI)
1299 set_dte_bit(&dte, DEV_ENTRY_NMI_PASS);
1300 if (flags & ACPI_DEVFLAG_SYSMGT1)
1301 set_dte_bit(&dte, DEV_ENTRY_SYSMGT1);
1302 if (flags & ACPI_DEVFLAG_SYSMGT2)
1303 set_dte_bit(&dte, DEV_ENTRY_SYSMGT2);
1304 if (flags & ACPI_DEVFLAG_LINT0)
1305 set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS);
1306 if (flags & ACPI_DEVFLAG_LINT1)
1307 set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS);
1308
1309 /* Apply erratum 63, which needs info in initial_dte */
1310 if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1)
1311 dte.data[0] |= DTE_FLAG_IW;
1312
1313 memcpy(&d->dte, &dte, sizeof(dte));
1314 d->segid = iommu->pci_seg->id;
1315 d->devid_first = first;
1316 d->devid_last = last;
1317 list_add_tail(&d->list, &amd_ivhd_dev_flags_list);
1318 }
1319
1320 for (i = first; i <= last; i++) {
1321 if (flags) {
1322 struct dev_table_entry *dev_table = get_dev_table(iommu);
1323
1324 memcpy(&dev_table[i], &dte, sizeof(dte));
1325 }
1326 amd_iommu_set_rlookup_table(iommu, i);
1327 }
1328 }
1329
set_dev_entry_from_acpi(struct amd_iommu * iommu,u16 devid,u32 flags,u32 ext_flags)1330 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1331 u16 devid, u32 flags, u32 ext_flags)
1332 {
1333 set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags);
1334 }
1335
add_special_device(u8 type,u8 id,u32 * devid,bool cmd_line)1336 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
1337 {
1338 struct devid_map *entry;
1339 struct list_head *list;
1340
1341 if (type == IVHD_SPECIAL_IOAPIC)
1342 list = &ioapic_map;
1343 else if (type == IVHD_SPECIAL_HPET)
1344 list = &hpet_map;
1345 else
1346 return -EINVAL;
1347
1348 list_for_each_entry(entry, list, list) {
1349 if (!(entry->id == id && entry->cmd_line))
1350 continue;
1351
1352 pr_info("Command-line override present for %s id %d - ignoring\n",
1353 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1354
1355 *devid = entry->devid;
1356
1357 return 0;
1358 }
1359
1360 entry = kzalloc_obj(*entry);
1361 if (!entry)
1362 return -ENOMEM;
1363
1364 entry->id = id;
1365 entry->devid = *devid;
1366 entry->cmd_line = cmd_line;
1367
1368 list_add_tail(&entry->list, list);
1369
1370 return 0;
1371 }
1372
add_acpi_hid_device(u8 * hid,u8 * uid,u32 * devid,bool cmd_line)1373 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
1374 bool cmd_line)
1375 {
1376 struct acpihid_map_entry *entry;
1377 struct list_head *list = &acpihid_map;
1378
1379 list_for_each_entry(entry, list, list) {
1380 if (strcmp(entry->hid, hid) ||
1381 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1382 !entry->cmd_line)
1383 continue;
1384
1385 pr_info("Command-line override for hid:%s uid:%s\n",
1386 hid, uid);
1387 *devid = entry->devid;
1388 return 0;
1389 }
1390
1391 entry = kzalloc_obj(*entry);
1392 if (!entry)
1393 return -ENOMEM;
1394
1395 memcpy(entry->uid, uid, strlen(uid));
1396 memcpy(entry->hid, hid, strlen(hid));
1397 entry->devid = *devid;
1398 entry->cmd_line = cmd_line;
1399 entry->root_devid = (entry->devid & (~0x7));
1400
1401 pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n",
1402 entry->cmd_line ? "cmd" : "ivrs",
1403 entry->hid, entry->uid, entry->root_devid);
1404
1405 list_add_tail(&entry->list, list);
1406 return 0;
1407 }
1408
add_early_maps(void)1409 static int __init add_early_maps(void)
1410 {
1411 int i, ret;
1412
1413 for (i = 0; i < early_ioapic_map_size; ++i) {
1414 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1415 early_ioapic_map[i].id,
1416 &early_ioapic_map[i].devid,
1417 early_ioapic_map[i].cmd_line);
1418 if (ret)
1419 return ret;
1420 }
1421
1422 for (i = 0; i < early_hpet_map_size; ++i) {
1423 ret = add_special_device(IVHD_SPECIAL_HPET,
1424 early_hpet_map[i].id,
1425 &early_hpet_map[i].devid,
1426 early_hpet_map[i].cmd_line);
1427 if (ret)
1428 return ret;
1429 }
1430
1431 for (i = 0; i < early_acpihid_map_size; ++i) {
1432 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1433 early_acpihid_map[i].uid,
1434 &early_acpihid_map[i].devid,
1435 early_acpihid_map[i].cmd_line);
1436 if (ret)
1437 return ret;
1438 }
1439
1440 return 0;
1441 }
1442
1443 /*
1444 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1445 * initializes the hardware and our data structures with it.
1446 */
init_iommu_from_acpi(struct amd_iommu * iommu,struct ivhd_header * h)1447 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1448 struct ivhd_header *h)
1449 {
1450 u8 *p = (u8 *)h;
1451 u8 *end = p, flags = 0;
1452 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
1453 u32 dev_i, ext_flags = 0;
1454 bool alias = false;
1455 struct ivhd_entry *e;
1456 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1457 u32 ivhd_size;
1458 int ret;
1459
1460
1461 ret = add_early_maps();
1462 if (ret)
1463 return ret;
1464
1465 amd_iommu_apply_ivrs_quirks();
1466
1467 /*
1468 * First save the recommended feature enable bits from ACPI
1469 */
1470 iommu->acpi_flags = h->flags;
1471
1472 /*
1473 * Done. Now parse the device entries
1474 */
1475 ivhd_size = get_ivhd_header_size(h);
1476 if (!ivhd_size) {
1477 pr_err("Unsupported IVHD type %#x\n", h->type);
1478 return -EINVAL;
1479 }
1480
1481 p += ivhd_size;
1482
1483 end += h->length;
1484
1485
1486 while (p < end) {
1487 e = (struct ivhd_entry *)p;
1488 seg_id = pci_seg->id;
1489
1490 switch (e->type) {
1491 case IVHD_DEV_ALL:
1492
1493 DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags);
1494 set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0);
1495 break;
1496 case IVHD_DEV_SELECT:
1497
1498 DUMP_printk(" DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
1499 seg_id, PCI_BUS_NUM(e->devid),
1500 PCI_SLOT(e->devid),
1501 PCI_FUNC(e->devid),
1502 e->flags);
1503
1504 devid = e->devid;
1505 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1506 break;
1507 case IVHD_DEV_SELECT_RANGE_START:
1508
1509 DUMP_printk(" DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
1510 seg_id, PCI_BUS_NUM(e->devid),
1511 PCI_SLOT(e->devid),
1512 PCI_FUNC(e->devid),
1513 e->flags);
1514
1515 devid_start = e->devid;
1516 flags = e->flags;
1517 ext_flags = 0;
1518 alias = false;
1519 break;
1520 case IVHD_DEV_ALIAS:
1521
1522 DUMP_printk(" DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n",
1523 seg_id, PCI_BUS_NUM(e->devid),
1524 PCI_SLOT(e->devid),
1525 PCI_FUNC(e->devid),
1526 e->flags,
1527 PCI_BUS_NUM(e->ext >> 8),
1528 PCI_SLOT(e->ext >> 8),
1529 PCI_FUNC(e->ext >> 8));
1530
1531 devid = e->devid;
1532 devid_to = e->ext >> 8;
1533 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1534 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1535 pci_seg->alias_table[devid] = devid_to;
1536 break;
1537 case IVHD_DEV_ALIAS_RANGE:
1538
1539 DUMP_printk(" DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n",
1540 seg_id, PCI_BUS_NUM(e->devid),
1541 PCI_SLOT(e->devid),
1542 PCI_FUNC(e->devid),
1543 e->flags,
1544 seg_id, PCI_BUS_NUM(e->ext >> 8),
1545 PCI_SLOT(e->ext >> 8),
1546 PCI_FUNC(e->ext >> 8));
1547
1548 devid_start = e->devid;
1549 flags = e->flags;
1550 devid_to = e->ext >> 8;
1551 ext_flags = 0;
1552 alias = true;
1553 break;
1554 case IVHD_DEV_EXT_SELECT:
1555
1556 DUMP_printk(" DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
1557 seg_id, PCI_BUS_NUM(e->devid),
1558 PCI_SLOT(e->devid),
1559 PCI_FUNC(e->devid),
1560 e->flags, e->ext);
1561
1562 devid = e->devid;
1563 set_dev_entry_from_acpi(iommu, devid, e->flags,
1564 e->ext);
1565 break;
1566 case IVHD_DEV_EXT_SELECT_RANGE:
1567
1568 DUMP_printk(" DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
1569 seg_id, PCI_BUS_NUM(e->devid),
1570 PCI_SLOT(e->devid),
1571 PCI_FUNC(e->devid),
1572 e->flags, e->ext);
1573
1574 devid_start = e->devid;
1575 flags = e->flags;
1576 ext_flags = e->ext;
1577 alias = false;
1578 break;
1579 case IVHD_DEV_RANGE_END:
1580
1581 DUMP_printk(" DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n",
1582 seg_id, PCI_BUS_NUM(e->devid),
1583 PCI_SLOT(e->devid),
1584 PCI_FUNC(e->devid));
1585
1586 devid = e->devid;
1587 if (alias) {
1588 for (dev_i = devid_start; dev_i <= devid; ++dev_i)
1589 pci_seg->alias_table[dev_i] = devid_to;
1590 set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
1591 }
1592 set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
1593 break;
1594 case IVHD_DEV_SPECIAL: {
1595 u8 handle, type;
1596 const char *var;
1597 u32 devid;
1598 int ret;
1599
1600 handle = e->ext & 0xff;
1601 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
1602 type = (e->ext >> 24) & 0xff;
1603
1604 if (type == IVHD_SPECIAL_IOAPIC)
1605 var = "IOAPIC";
1606 else if (type == IVHD_SPECIAL_HPET)
1607 var = "HPET";
1608 else
1609 var = "UNKNOWN";
1610
1611 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
1612 var, (int)handle,
1613 seg_id, PCI_BUS_NUM(devid),
1614 PCI_SLOT(devid),
1615 PCI_FUNC(devid),
1616 e->flags);
1617
1618 ret = add_special_device(type, handle, &devid, false);
1619 if (ret)
1620 return ret;
1621
1622 /*
1623 * add_special_device might update the devid in case a
1624 * command-line override is present. So call
1625 * set_dev_entry_from_acpi after add_special_device.
1626 */
1627 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1628
1629 break;
1630 }
1631 case IVHD_DEV_ACPI_HID: {
1632 u32 devid;
1633 u8 hid[ACPIHID_HID_LEN];
1634 u8 uid[ACPIHID_UID_LEN];
1635 int ret;
1636
1637 if (h->type != 0x40) {
1638 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1639 e->type);
1640 break;
1641 }
1642
1643 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1644 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
1645 hid[ACPIHID_HID_LEN - 1] = '\0';
1646
1647 if (!(*hid)) {
1648 pr_err(FW_BUG "Invalid HID.\n");
1649 break;
1650 }
1651
1652 uid[0] = '\0';
1653 switch (e->uidf) {
1654 case UID_NOT_PRESENT:
1655
1656 if (e->uidl != 0)
1657 pr_warn(FW_BUG "Invalid UID length.\n");
1658
1659 break;
1660 case UID_IS_INTEGER:
1661
1662 sprintf(uid, "%d", e->uid);
1663
1664 break;
1665 case UID_IS_CHARACTER:
1666
1667 memcpy(uid, &e->uid, e->uidl);
1668 uid[e->uidl] = '\0';
1669
1670 break;
1671 default:
1672 break;
1673 }
1674
1675 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
1676 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
1677 hid, uid, seg_id,
1678 PCI_BUS_NUM(devid),
1679 PCI_SLOT(devid),
1680 PCI_FUNC(devid),
1681 e->flags);
1682
1683 flags = e->flags;
1684
1685 ret = add_acpi_hid_device(hid, uid, &devid, false);
1686 if (ret)
1687 return ret;
1688
1689 /*
1690 * add_special_device might update the devid in case a
1691 * command-line override is present. So call
1692 * set_dev_entry_from_acpi after add_special_device.
1693 */
1694 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1695
1696 break;
1697 }
1698 default:
1699 break;
1700 }
1701
1702 p += ivhd_entry_length(p);
1703 }
1704
1705 return 0;
1706 }
1707
1708 /* Allocate PCI segment data structure */
alloc_pci_segment(u16 id,struct acpi_table_header * ivrs_base)1709 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
1710 struct acpi_table_header *ivrs_base)
1711 {
1712 struct amd_iommu_pci_seg *pci_seg;
1713 int last_bdf;
1714
1715 /*
1716 * First parse ACPI tables to find the largest Bus/Dev/Func we need to
1717 * handle in this PCI segment. Upon this information the shared data
1718 * structures for the PCI segments in the system will be allocated.
1719 */
1720 last_bdf = find_last_devid_acpi(ivrs_base, id);
1721 if (last_bdf < 0)
1722 return NULL;
1723
1724 pci_seg = kzalloc_obj(struct amd_iommu_pci_seg);
1725 if (pci_seg == NULL)
1726 return NULL;
1727
1728 pci_seg->last_bdf = last_bdf;
1729 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
1730 pci_seg->dev_table_size =
1731 max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
1732 SZ_4K);
1733
1734 pci_seg->id = id;
1735 init_llist_head(&pci_seg->dev_data_list);
1736 INIT_LIST_HEAD(&pci_seg->unity_map);
1737 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
1738
1739 if (alloc_dev_table(pci_seg))
1740 goto err_free_pci_seg;
1741 if (alloc_alias_table(pci_seg))
1742 goto err_free_dev_table;
1743 if (alloc_rlookup_table(pci_seg))
1744 goto err_free_alias_table;
1745
1746 return pci_seg;
1747
1748 err_free_alias_table:
1749 free_alias_table(pci_seg);
1750 err_free_dev_table:
1751 free_dev_table(pci_seg);
1752 err_free_pci_seg:
1753 list_del(&pci_seg->list);
1754 kfree(pci_seg);
1755 return NULL;
1756 }
1757
get_pci_segment(u16 id,struct acpi_table_header * ivrs_base)1758 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
1759 struct acpi_table_header *ivrs_base)
1760 {
1761 struct amd_iommu_pci_seg *pci_seg;
1762
1763 for_each_pci_segment(pci_seg) {
1764 if (pci_seg->id == id)
1765 return pci_seg;
1766 }
1767
1768 return alloc_pci_segment(id, ivrs_base);
1769 }
1770
free_pci_segments(void)1771 static void __init free_pci_segments(void)
1772 {
1773 struct amd_iommu_pci_seg *pci_seg, *next;
1774
1775 for_each_pci_segment_safe(pci_seg, next) {
1776 list_del(&pci_seg->list);
1777 free_irq_lookup_table(pci_seg);
1778 free_rlookup_table(pci_seg);
1779 free_alias_table(pci_seg);
1780 free_dev_table(pci_seg);
1781 kfree(pci_seg);
1782 }
1783 }
1784
free_sysfs(struct amd_iommu * iommu)1785 static void __init free_sysfs(struct amd_iommu *iommu)
1786 {
1787 if (iommu->iommu.dev) {
1788 iommu_device_unregister(&iommu->iommu);
1789 iommu_device_sysfs_remove(&iommu->iommu);
1790 }
1791 }
1792
free_iommu_one(struct amd_iommu * iommu)1793 static void __init free_iommu_one(struct amd_iommu *iommu)
1794 {
1795 free_sysfs(iommu);
1796 free_iommu_buffers(iommu);
1797 amd_iommu_free_ppr_log(iommu);
1798 free_ga_log(iommu);
1799 iommu_unmap_mmio_space(iommu);
1800 amd_iommu_iopf_uninit(iommu);
1801 }
1802
free_iommu_all(void)1803 static void __init free_iommu_all(void)
1804 {
1805 struct amd_iommu *iommu, *next;
1806
1807 for_each_iommu_safe(iommu, next) {
1808 list_del(&iommu->list);
1809 free_iommu_one(iommu);
1810 kfree(iommu);
1811 }
1812 }
1813
1814 /*
1815 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1816 * Workaround:
1817 * BIOS should disable L2B micellaneous clock gating by setting
1818 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1819 */
amd_iommu_erratum_746_workaround(struct amd_iommu * iommu)1820 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1821 {
1822 u32 value;
1823
1824 if ((boot_cpu_data.x86 != 0x15) ||
1825 (boot_cpu_data.x86_model < 0x10) ||
1826 (boot_cpu_data.x86_model > 0x1f))
1827 return;
1828
1829 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1830 pci_read_config_dword(iommu->dev, 0xf4, &value);
1831
1832 if (value & BIT(2))
1833 return;
1834
1835 /* Select NB indirect register 0x90 and enable writing */
1836 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1837
1838 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1839 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1840
1841 /* Clear the enable writing bit */
1842 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1843 }
1844
1845 /*
1846 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1847 * Workaround:
1848 * BIOS should enable ATS write permission check by setting
1849 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1850 */
amd_iommu_ats_write_check_workaround(struct amd_iommu * iommu)1851 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1852 {
1853 u32 value;
1854
1855 if ((boot_cpu_data.x86 != 0x15) ||
1856 (boot_cpu_data.x86_model < 0x30) ||
1857 (boot_cpu_data.x86_model > 0x3f))
1858 return;
1859
1860 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1861 value = iommu_read_l2(iommu, 0x47);
1862
1863 if (value & BIT(0))
1864 return;
1865
1866 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1867 iommu_write_l2(iommu, 0x47, value | BIT(0));
1868
1869 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1870 }
1871
1872 /*
1873 * This function glues the initialization function for one IOMMU
1874 * together and also allocates the command buffer and programs the
1875 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1876 */
init_iommu_one(struct amd_iommu * iommu,struct ivhd_header * h,struct acpi_table_header * ivrs_base)1877 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
1878 struct acpi_table_header *ivrs_base)
1879 {
1880 struct amd_iommu_pci_seg *pci_seg;
1881
1882 pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
1883 if (pci_seg == NULL)
1884 return -ENOMEM;
1885 iommu->pci_seg = pci_seg;
1886
1887 raw_spin_lock_init(&iommu->lock);
1888 iommu->cmd_sem_val = 0;
1889
1890 /* Add IOMMU to internal data structures */
1891 list_add_tail(&iommu->list, &amd_iommu_list);
1892 iommu->index = amd_iommus_present++;
1893
1894 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1895 WARN(1, "System has more IOMMUs than supported by this driver\n");
1896 return -ENOSYS;
1897 }
1898
1899 /*
1900 * Copy data from ACPI table entry to the iommu struct
1901 */
1902 iommu->devid = h->devid;
1903 iommu->cap_ptr = h->cap_ptr;
1904 iommu->mmio_phys = h->mmio_phys;
1905
1906 switch (h->type) {
1907 case 0x10:
1908 /* Check if IVHD EFR contains proper max banks/counters */
1909 if ((h->efr_attr != 0) &&
1910 ((h->efr_attr & (0xF << 13)) != 0) &&
1911 ((h->efr_attr & (0x3F << 17)) != 0))
1912 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1913 else
1914 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1915
1916 /* GAM requires GA mode. */
1917 if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)
1918 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1919 break;
1920 case 0x11:
1921 case 0x40:
1922 if (h->efr_reg & (1 << 9))
1923 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1924 else
1925 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1926
1927 /* XT and GAM require GA mode. */
1928 if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) {
1929 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1930 break;
1931 }
1932
1933 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1934 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1935
1936 if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) {
1937 pr_warn_once("Host Address Translation is not supported.\n");
1938 amd_iommu_hatdis = true;
1939 }
1940
1941 early_iommu_features_init(iommu, h);
1942
1943 break;
1944 default:
1945 return -EINVAL;
1946 }
1947
1948 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1949 iommu->mmio_phys_end);
1950 if (!iommu->mmio_base)
1951 return -ENOMEM;
1952
1953 return init_iommu_from_acpi(iommu, h);
1954 }
1955
init_iommu_one_late(struct amd_iommu * iommu)1956 static int __init init_iommu_one_late(struct amd_iommu *iommu)
1957 {
1958 int ret;
1959
1960 ret = alloc_iommu_buffers(iommu);
1961 if (ret)
1962 return ret;
1963
1964 iommu->int_enabled = false;
1965
1966 init_translation_status(iommu);
1967 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1968 iommu_disable(iommu);
1969 clear_translation_pre_enabled(iommu);
1970 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1971 iommu->index);
1972 }
1973 if (amd_iommu_pre_enabled)
1974 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1975
1976 if (amd_iommu_irq_remap) {
1977 ret = amd_iommu_create_irq_domain(iommu);
1978 if (ret)
1979 return ret;
1980 }
1981
1982 /*
1983 * Make sure IOMMU is not considered to translate itself. The IVRS
1984 * table tells us so, but this is a lie!
1985 */
1986 iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
1987
1988 return 0;
1989 }
1990
1991 /**
1992 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1993 * @ivrs: Pointer to the IVRS header
1994 *
1995 * This function search through all IVDB of the maximum supported IVHD
1996 */
get_highest_supported_ivhd_type(struct acpi_table_header * ivrs)1997 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1998 {
1999 u8 *base = (u8 *)ivrs;
2000 struct ivhd_header *ivhd = (struct ivhd_header *)
2001 (base + IVRS_HEADER_LENGTH);
2002 u8 last_type = ivhd->type;
2003 u16 devid = ivhd->devid;
2004
2005 while (((u8 *)ivhd - base < ivrs->length) &&
2006 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
2007 u8 *p = (u8 *) ivhd;
2008
2009 if (ivhd->devid == devid)
2010 last_type = ivhd->type;
2011 ivhd = (struct ivhd_header *)(p + ivhd->length);
2012 }
2013
2014 return last_type;
2015 }
2016
2017 /*
2018 * Iterates over all IOMMU entries in the ACPI table, allocates the
2019 * IOMMU structure and initializes it with init_iommu_one()
2020 */
init_iommu_all(struct acpi_table_header * table)2021 static int __init init_iommu_all(struct acpi_table_header *table)
2022 {
2023 u8 *p = (u8 *)table, *end = (u8 *)table;
2024 struct ivhd_header *h;
2025 struct amd_iommu *iommu;
2026 int ret;
2027
2028 end += table->length;
2029 p += IVRS_HEADER_LENGTH;
2030
2031 /* Phase 1: Process all IVHD blocks */
2032 while (p < end) {
2033 h = (struct ivhd_header *)p;
2034 if (*p == amd_iommu_target_ivhd_type) {
2035
2036 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
2037 "flags: %01x info %04x\n",
2038 h->pci_seg, PCI_BUS_NUM(h->devid),
2039 PCI_SLOT(h->devid), PCI_FUNC(h->devid),
2040 h->cap_ptr, h->flags, h->info);
2041 DUMP_printk(" mmio-addr: %016llx\n",
2042 h->mmio_phys);
2043
2044 iommu = kzalloc_obj(struct amd_iommu);
2045 if (iommu == NULL)
2046 return -ENOMEM;
2047
2048 ret = init_iommu_one(iommu, h, table);
2049 if (ret)
2050 return ret;
2051 }
2052 p += h->length;
2053
2054 }
2055 WARN_ON(p != end);
2056
2057 /* Phase 2 : Early feature support check */
2058 get_global_efr();
2059
2060 /* Phase 3 : Enabling IOMMU features */
2061 for_each_iommu(iommu) {
2062 ret = init_iommu_one_late(iommu);
2063 if (ret)
2064 return ret;
2065 }
2066
2067 return 0;
2068 }
2069
init_iommu_perf_ctr(struct amd_iommu * iommu)2070 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
2071 {
2072 u64 val;
2073 struct pci_dev *pdev = iommu->dev;
2074
2075 if (!check_feature(FEATURE_PC))
2076 return;
2077
2078 amd_iommu_pc_present = true;
2079
2080 pci_info(pdev, "IOMMU performance counters supported\n");
2081
2082 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
2083 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
2084 iommu->max_counters = (u8) ((val >> 7) & 0xf);
2085
2086 return;
2087 }
2088
amd_iommu_show_cap(struct device * dev,struct device_attribute * attr,char * buf)2089 static ssize_t amd_iommu_show_cap(struct device *dev,
2090 struct device_attribute *attr,
2091 char *buf)
2092 {
2093 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
2094 return sysfs_emit(buf, "%x\n", iommu->cap);
2095 }
2096 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
2097
amd_iommu_show_features(struct device * dev,struct device_attribute * attr,char * buf)2098 static ssize_t amd_iommu_show_features(struct device *dev,
2099 struct device_attribute *attr,
2100 char *buf)
2101 {
2102 return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
2103 }
2104 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
2105
2106 static struct attribute *amd_iommu_attrs[] = {
2107 &dev_attr_cap.attr,
2108 &dev_attr_features.attr,
2109 NULL,
2110 };
2111
2112 static struct attribute_group amd_iommu_group = {
2113 .name = "amd-iommu",
2114 .attrs = amd_iommu_attrs,
2115 };
2116
2117 static const struct attribute_group *amd_iommu_groups[] = {
2118 &amd_iommu_group,
2119 NULL,
2120 };
2121
2122 /*
2123 * Note: IVHD 0x11 and 0x40 also contains exact copy
2124 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
2125 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
2126 */
late_iommu_features_init(struct amd_iommu * iommu)2127 static void __init late_iommu_features_init(struct amd_iommu *iommu)
2128 {
2129 u64 features, features2;
2130
2131 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
2132 return;
2133
2134 /* read extended feature bits */
2135 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
2136 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
2137
2138 if (!amd_iommu_efr) {
2139 amd_iommu_efr = features;
2140 amd_iommu_efr2 = features2;
2141 return;
2142 }
2143
2144 /*
2145 * Sanity check and warn if EFR values from
2146 * IVHD and MMIO conflict.
2147 */
2148 if (features != amd_iommu_efr ||
2149 features2 != amd_iommu_efr2) {
2150 pr_warn(FW_WARN
2151 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
2152 features, amd_iommu_efr,
2153 features2, amd_iommu_efr2);
2154 }
2155 }
2156
iommu_init_pci(struct amd_iommu * iommu)2157 static int __init iommu_init_pci(struct amd_iommu *iommu)
2158 {
2159 int cap_ptr = iommu->cap_ptr;
2160 int ret;
2161
2162 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2163 PCI_BUS_NUM(iommu->devid),
2164 iommu->devid & 0xff);
2165 if (!iommu->dev)
2166 return -ENODEV;
2167
2168 /* ACPI _PRT won't have an IRQ for IOMMU */
2169 iommu->dev->irq_managed = 1;
2170
2171 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
2172 &iommu->cap);
2173
2174 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
2175 amd_iommu_iotlb_sup = false;
2176
2177 late_iommu_features_init(iommu);
2178
2179 if (check_feature(FEATURE_GT)) {
2180 int glxval;
2181 u64 pasmax;
2182
2183 pasmax = FIELD_GET(FEATURE_PASMAX, amd_iommu_efr);
2184 iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
2185
2186 BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
2187
2188 glxval = FIELD_GET(FEATURE_GLX, amd_iommu_efr);
2189
2190 if (amd_iommu_max_glx_val == -1)
2191 amd_iommu_max_glx_val = glxval;
2192 else
2193 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
2194
2195 iommu_enable_gt(iommu);
2196 }
2197
2198 if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu))
2199 return -ENOMEM;
2200
2201 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
2202 pr_info("Using strict mode due to virtualization\n");
2203 iommu_set_dma_strict();
2204 amd_iommu_np_cache = true;
2205 }
2206
2207 init_iommu_perf_ctr(iommu);
2208
2209 if (is_rd890_iommu(iommu->dev)) {
2210 int i, j;
2211
2212 iommu->root_pdev =
2213 pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2214 iommu->dev->bus->number,
2215 PCI_DEVFN(0, 0));
2216
2217 /*
2218 * Some rd890 systems may not be fully reconfigured by the
2219 * BIOS, so it's necessary for us to store this information so
2220 * it can be reprogrammed on resume
2221 */
2222 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
2223 &iommu->stored_addr_lo);
2224 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
2225 &iommu->stored_addr_hi);
2226
2227 /* Low bit locks writes to configuration space */
2228 iommu->stored_addr_lo &= ~1;
2229
2230 for (i = 0; i < 6; i++)
2231 for (j = 0; j < 0x12; j++)
2232 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
2233
2234 for (i = 0; i < 0x83; i++)
2235 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
2236 }
2237
2238 amd_iommu_erratum_746_workaround(iommu);
2239 amd_iommu_ats_write_check_workaround(iommu);
2240
2241 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
2242 amd_iommu_groups, "ivhd%d", iommu->index);
2243 if (ret)
2244 return ret;
2245
2246 /*
2247 * Allocate per IOMMU IOPF queue here so that in attach device path,
2248 * PRI capable device can be added to IOPF queue
2249 */
2250 if (amd_iommu_gt_ppr_supported()) {
2251 ret = amd_iommu_iopf_init(iommu);
2252 if (ret)
2253 return ret;
2254 }
2255
2256 ret = iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
2257 if (ret || amd_iommu_pgtable == PD_MODE_NONE) {
2258 /*
2259 * Remove sysfs if DMA translation is not supported by the
2260 * IOMMU. Do not return an error to enable IRQ remapping
2261 * in state_next(), DTE[V, TV] must eventually be set to 0.
2262 */
2263 iommu_device_sysfs_remove(&iommu->iommu);
2264 }
2265
2266 return pci_enable_device(iommu->dev);
2267 }
2268
print_iommu_info(void)2269 static void print_iommu_info(void)
2270 {
2271 int i;
2272 static const char * const feat_str[] = {
2273 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
2274 "IA", "GA", "HE", "PC"
2275 };
2276
2277 if (amd_iommu_efr) {
2278 pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
2279
2280 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
2281 if (check_feature(1ULL << i))
2282 pr_cont(" %s", feat_str[i]);
2283 }
2284
2285 if (check_feature(FEATURE_GAM_VAPIC))
2286 pr_cont(" GA_vAPIC");
2287
2288 if (check_feature(FEATURE_SNP))
2289 pr_cont(" SNP");
2290
2291 if (check_feature2(FEATURE_SEVSNPIO_SUP))
2292 pr_cont(" SEV-TIO");
2293
2294 pr_cont("\n");
2295 }
2296
2297 if (irq_remapping_enabled) {
2298 pr_info("Interrupt remapping enabled\n");
2299 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2300 pr_info("X2APIC enabled\n");
2301 }
2302 if (amd_iommu_pgtable == PD_MODE_V2) {
2303 pr_info("V2 page table enabled (Paging mode : %d level)\n",
2304 amd_iommu_gpt_level);
2305 }
2306 }
2307
amd_iommu_init_pci(void)2308 static int __init amd_iommu_init_pci(void)
2309 {
2310 struct amd_iommu *iommu;
2311 struct amd_iommu_pci_seg *pci_seg;
2312 int ret;
2313
2314 /* Init global identity domain before registering IOMMU */
2315 amd_iommu_init_identity_domain();
2316
2317 for_each_iommu(iommu) {
2318 ret = iommu_init_pci(iommu);
2319 if (ret) {
2320 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
2321 iommu->index, ret);
2322 goto out;
2323 }
2324 /* Need to setup range after PCI init */
2325 iommu_set_cwwb_range(iommu);
2326 }
2327
2328 /*
2329 * Order is important here to make sure any unity map requirements are
2330 * fulfilled. The unity mappings are created and written to the device
2331 * table during the iommu_init_pci() call.
2332 *
2333 * After that we call init_device_table_dma() to make sure any
2334 * uninitialized DTE will block DMA, and in the end we flush the caches
2335 * of all IOMMUs to make sure the changes to the device table are
2336 * active.
2337 */
2338 for_each_pci_segment(pci_seg)
2339 init_device_table_dma(pci_seg);
2340
2341 for_each_iommu(iommu)
2342 amd_iommu_flush_all_caches(iommu);
2343
2344 print_iommu_info();
2345
2346 out:
2347 return ret;
2348 }
2349
2350 /****************************************************************************
2351 *
2352 * The following functions initialize the MSI interrupts for all IOMMUs
2353 * in the system. It's a bit challenging because there could be multiple
2354 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2355 * pci_dev.
2356 *
2357 ****************************************************************************/
2358
iommu_setup_msi(struct amd_iommu * iommu)2359 static int iommu_setup_msi(struct amd_iommu *iommu)
2360 {
2361 int r;
2362
2363 r = pci_enable_msi(iommu->dev);
2364 if (r)
2365 return r;
2366
2367 r = request_threaded_irq(iommu->dev->irq, NULL, amd_iommu_int_thread,
2368 IRQF_ONESHOT, "AMD-Vi", iommu);
2369 if (r) {
2370 pci_disable_msi(iommu->dev);
2371 return r;
2372 }
2373
2374 return 0;
2375 }
2376
2377 union intcapxt {
2378 u64 capxt;
2379 struct {
2380 u64 reserved_0 : 2,
2381 dest_mode_logical : 1,
2382 reserved_1 : 5,
2383 destid_0_23 : 24,
2384 vector : 8,
2385 reserved_2 : 16,
2386 destid_24_31 : 8;
2387 };
2388 } __attribute__ ((packed));
2389
2390
2391 static struct irq_chip intcapxt_controller;
2392
intcapxt_irqdomain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)2393 static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2394 struct irq_data *irqd, bool reserve)
2395 {
2396 return 0;
2397 }
2398
intcapxt_irqdomain_deactivate(struct irq_domain * domain,struct irq_data * irqd)2399 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2400 struct irq_data *irqd)
2401 {
2402 }
2403
2404
intcapxt_irqdomain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)2405 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2406 unsigned int nr_irqs, void *arg)
2407 {
2408 struct irq_alloc_info *info = arg;
2409 int i, ret;
2410
2411 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2412 return -EINVAL;
2413
2414 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2415 if (ret < 0)
2416 return ret;
2417
2418 for (i = virq; i < virq + nr_irqs; i++) {
2419 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2420
2421 irqd->chip = &intcapxt_controller;
2422 irqd->hwirq = info->hwirq;
2423 irqd->chip_data = info->data;
2424 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2425 }
2426
2427 return ret;
2428 }
2429
intcapxt_irqdomain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2430 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2431 unsigned int nr_irqs)
2432 {
2433 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2434 }
2435
2436
intcapxt_unmask_irq(struct irq_data * irqd)2437 static void intcapxt_unmask_irq(struct irq_data *irqd)
2438 {
2439 struct amd_iommu *iommu = irqd->chip_data;
2440 struct irq_cfg *cfg = irqd_cfg(irqd);
2441 union intcapxt xt;
2442
2443 xt.capxt = 0ULL;
2444 xt.dest_mode_logical = apic->dest_mode_logical;
2445 xt.vector = cfg->vector;
2446 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2447 xt.destid_24_31 = cfg->dest_apicid >> 24;
2448
2449 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
2450 }
2451
intcapxt_mask_irq(struct irq_data * irqd)2452 static void intcapxt_mask_irq(struct irq_data *irqd)
2453 {
2454 struct amd_iommu *iommu = irqd->chip_data;
2455
2456 writeq(0, iommu->mmio_base + irqd->hwirq);
2457 }
2458
2459
intcapxt_set_affinity(struct irq_data * irqd,const struct cpumask * mask,bool force)2460 static int intcapxt_set_affinity(struct irq_data *irqd,
2461 const struct cpumask *mask, bool force)
2462 {
2463 struct irq_data *parent = irqd->parent_data;
2464 int ret;
2465
2466 ret = parent->chip->irq_set_affinity(parent, mask, force);
2467 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2468 return ret;
2469 return 0;
2470 }
2471
intcapxt_set_wake(struct irq_data * irqd,unsigned int on)2472 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2473 {
2474 return on ? -EOPNOTSUPP : 0;
2475 }
2476
2477 static struct irq_chip intcapxt_controller = {
2478 .name = "IOMMU-MSI",
2479 .irq_unmask = intcapxt_unmask_irq,
2480 .irq_mask = intcapxt_mask_irq,
2481 .irq_ack = irq_chip_ack_parent,
2482 .irq_retrigger = irq_chip_retrigger_hierarchy,
2483 .irq_set_affinity = intcapxt_set_affinity,
2484 .irq_set_wake = intcapxt_set_wake,
2485 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED,
2486 };
2487
2488 static const struct irq_domain_ops intcapxt_domain_ops = {
2489 .alloc = intcapxt_irqdomain_alloc,
2490 .free = intcapxt_irqdomain_free,
2491 .activate = intcapxt_irqdomain_activate,
2492 .deactivate = intcapxt_irqdomain_deactivate,
2493 };
2494
2495
2496 static struct irq_domain *iommu_irqdomain;
2497
iommu_get_irqdomain(void)2498 static struct irq_domain *iommu_get_irqdomain(void)
2499 {
2500 struct fwnode_handle *fn;
2501
2502 /* No need for locking here (yet) as the init is single-threaded */
2503 if (iommu_irqdomain)
2504 return iommu_irqdomain;
2505
2506 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2507 if (!fn)
2508 return NULL;
2509
2510 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2511 fn, &intcapxt_domain_ops,
2512 NULL);
2513 if (!iommu_irqdomain)
2514 irq_domain_free_fwnode(fn);
2515
2516 return iommu_irqdomain;
2517 }
2518
__iommu_setup_intcapxt(struct amd_iommu * iommu,const char * devname,int hwirq,irq_handler_t thread_fn)2519 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
2520 int hwirq, irq_handler_t thread_fn)
2521 {
2522 struct irq_domain *domain;
2523 struct irq_alloc_info info;
2524 int irq, ret;
2525 int node = dev_to_node(&iommu->dev->dev);
2526
2527 domain = iommu_get_irqdomain();
2528 if (!domain)
2529 return -ENXIO;
2530
2531 init_irq_alloc_info(&info, NULL);
2532 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2533 info.data = iommu;
2534 info.hwirq = hwirq;
2535
2536 irq = irq_domain_alloc_irqs(domain, 1, node, &info);
2537 if (irq < 0) {
2538 irq_domain_remove(domain);
2539 return irq;
2540 }
2541
2542 ret = request_threaded_irq(irq, NULL, thread_fn, IRQF_ONESHOT, devname,
2543 iommu);
2544 if (ret) {
2545 irq_domain_free_irqs(irq, 1);
2546 irq_domain_remove(domain);
2547 return ret;
2548 }
2549
2550 return 0;
2551 }
2552
iommu_setup_intcapxt(struct amd_iommu * iommu)2553 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2554 {
2555 int ret;
2556
2557 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
2558 "AMD-Vi%d-Evt", iommu->index);
2559 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
2560 MMIO_INTCAPXT_EVT_OFFSET,
2561 amd_iommu_int_thread_evtlog);
2562 if (ret)
2563 return ret;
2564
2565 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
2566 "AMD-Vi%d-PPR", iommu->index);
2567 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
2568 MMIO_INTCAPXT_PPR_OFFSET,
2569 amd_iommu_int_thread_pprlog);
2570 if (ret)
2571 return ret;
2572
2573 #ifdef CONFIG_IRQ_REMAP
2574 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
2575 "AMD-Vi%d-GA", iommu->index);
2576 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
2577 MMIO_INTCAPXT_GALOG_OFFSET,
2578 amd_iommu_int_thread_galog);
2579 #endif
2580
2581 return ret;
2582 }
2583
iommu_init_irq(struct amd_iommu * iommu)2584 static int iommu_init_irq(struct amd_iommu *iommu)
2585 {
2586 int ret;
2587
2588 if (iommu->int_enabled)
2589 goto enable_faults;
2590
2591 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2592 ret = iommu_setup_intcapxt(iommu);
2593 else if (iommu->dev->msi_cap)
2594 ret = iommu_setup_msi(iommu);
2595 else
2596 ret = -ENODEV;
2597
2598 if (ret)
2599 return ret;
2600
2601 iommu->int_enabled = true;
2602 enable_faults:
2603
2604 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2605 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2606
2607 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2608
2609 return 0;
2610 }
2611
2612 /****************************************************************************
2613 *
2614 * The next functions belong to the third pass of parsing the ACPI
2615 * table. In this last pass the memory mapping requirements are
2616 * gathered (like exclusion and unity mapping ranges).
2617 *
2618 ****************************************************************************/
2619
free_unity_maps(void)2620 static void __init free_unity_maps(void)
2621 {
2622 struct unity_map_entry *entry, *next;
2623 struct amd_iommu_pci_seg *p, *pci_seg;
2624
2625 for_each_pci_segment_safe(pci_seg, p) {
2626 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
2627 list_del(&entry->list);
2628 kfree(entry);
2629 }
2630 }
2631 }
2632
2633 /* called for unity map ACPI definition */
init_unity_map_range(struct ivmd_header * m,struct acpi_table_header * ivrs_base)2634 static int __init init_unity_map_range(struct ivmd_header *m,
2635 struct acpi_table_header *ivrs_base)
2636 {
2637 struct unity_map_entry *e = NULL;
2638 struct amd_iommu_pci_seg *pci_seg;
2639 char *s;
2640
2641 pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
2642 if (pci_seg == NULL)
2643 return -ENOMEM;
2644
2645 e = kzalloc_obj(*e);
2646 if (e == NULL)
2647 return -ENOMEM;
2648
2649 switch (m->type) {
2650 default:
2651 kfree(e);
2652 return 0;
2653 case ACPI_IVMD_TYPE:
2654 s = "IVMD_TYPEi\t\t\t";
2655 e->devid_start = e->devid_end = m->devid;
2656 break;
2657 case ACPI_IVMD_TYPE_ALL:
2658 s = "IVMD_TYPE_ALL\t\t";
2659 e->devid_start = 0;
2660 e->devid_end = pci_seg->last_bdf;
2661 break;
2662 case ACPI_IVMD_TYPE_RANGE:
2663 s = "IVMD_TYPE_RANGE\t\t";
2664 e->devid_start = m->devid;
2665 e->devid_end = m->aux;
2666 break;
2667 }
2668 e->address_start = PAGE_ALIGN(m->range_start);
2669 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2670 e->prot = m->flags >> 1;
2671
2672 /*
2673 * Treat per-device exclusion ranges as r/w unity-mapped regions
2674 * since some buggy BIOSes might lead to the overwritten exclusion
2675 * range (exclusion_start and exclusion_length members). This
2676 * happens when there are multiple exclusion ranges (IVMD entries)
2677 * defined in ACPI table.
2678 */
2679 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2680 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2681
2682 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
2683 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
2684 " flags: %x\n", s, m->pci_seg,
2685 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2686 PCI_FUNC(e->devid_start), m->pci_seg,
2687 PCI_BUS_NUM(e->devid_end),
2688 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2689 e->address_start, e->address_end, m->flags);
2690
2691 list_add_tail(&e->list, &pci_seg->unity_map);
2692
2693 return 0;
2694 }
2695
2696 /* iterates over all memory definitions we find in the ACPI table */
init_memory_definitions(struct acpi_table_header * table)2697 static int __init init_memory_definitions(struct acpi_table_header *table)
2698 {
2699 u8 *p = (u8 *)table, *end = (u8 *)table;
2700 struct ivmd_header *m;
2701
2702 end += table->length;
2703 p += IVRS_HEADER_LENGTH;
2704
2705 while (p < end) {
2706 m = (struct ivmd_header *)p;
2707 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2708 init_unity_map_range(m, table);
2709
2710 p += m->length;
2711 }
2712
2713 return 0;
2714 }
2715
2716 /*
2717 * Init the device table to not allow DMA access for devices
2718 */
init_device_table_dma(struct amd_iommu_pci_seg * pci_seg)2719 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2720 {
2721 u32 devid;
2722 struct dev_table_entry *dev_table = pci_seg->dev_table;
2723
2724 if (!dev_table || amd_iommu_pgtable == PD_MODE_NONE)
2725 return;
2726
2727 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2728 set_dte_bit(&dev_table[devid], DEV_ENTRY_VALID);
2729 if (!amd_iommu_snp_en)
2730 set_dte_bit(&dev_table[devid], DEV_ENTRY_TRANSLATION);
2731 }
2732 }
2733
uninit_device_table_dma(struct amd_iommu_pci_seg * pci_seg)2734 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2735 {
2736 u32 devid;
2737 struct dev_table_entry *dev_table = pci_seg->dev_table;
2738
2739 if (dev_table == NULL)
2740 return;
2741
2742 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2743 dev_table[devid].data[0] = 0ULL;
2744 dev_table[devid].data[1] = 0ULL;
2745 }
2746 }
2747
init_device_table(void)2748 static void init_device_table(void)
2749 {
2750 struct amd_iommu_pci_seg *pci_seg;
2751 u32 devid;
2752
2753 if (!amd_iommu_irq_remap)
2754 return;
2755
2756 for_each_pci_segment(pci_seg) {
2757 for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
2758 set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN);
2759 }
2760 }
2761
iommu_init_flags(struct amd_iommu * iommu)2762 static void iommu_init_flags(struct amd_iommu *iommu)
2763 {
2764 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2765 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2766 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2767
2768 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2769 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2770 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2771
2772 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2773 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2774 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2775
2776 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2777 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2778 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2779
2780 /*
2781 * make IOMMU memory accesses cache coherent
2782 */
2783 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2784
2785 /* Set IOTLB invalidation timeout to 1s */
2786 iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT);
2787
2788 /* Enable Enhanced Peripheral Page Request Handling */
2789 if (check_feature(FEATURE_EPHSUP))
2790 iommu_feature_enable(iommu, CONTROL_EPH_EN);
2791 }
2792
iommu_apply_resume_quirks(struct amd_iommu * iommu)2793 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2794 {
2795 int i, j;
2796 u32 ioc_feature_control;
2797 struct pci_dev *pdev = iommu->root_pdev;
2798
2799 /* RD890 BIOSes may not have completely reconfigured the iommu */
2800 if (!is_rd890_iommu(iommu->dev) || !pdev)
2801 return;
2802
2803 /*
2804 * First, we need to ensure that the iommu is enabled. This is
2805 * controlled by a register in the northbridge
2806 */
2807
2808 /* Select Northbridge indirect register 0x75 and enable writing */
2809 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2810 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2811
2812 /* Enable the iommu */
2813 if (!(ioc_feature_control & 0x1))
2814 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2815
2816 /* Restore the iommu BAR */
2817 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2818 iommu->stored_addr_lo);
2819 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2820 iommu->stored_addr_hi);
2821
2822 /* Restore the l1 indirect regs for each of the 6 l1s */
2823 for (i = 0; i < 6; i++)
2824 for (j = 0; j < 0x12; j++)
2825 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2826
2827 /* Restore the l2 indirect regs */
2828 for (i = 0; i < 0x83; i++)
2829 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2830
2831 /* Lock PCI setup registers */
2832 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2833 iommu->stored_addr_lo | 1);
2834 }
2835
iommu_enable_ga(struct amd_iommu * iommu)2836 static void iommu_enable_ga(struct amd_iommu *iommu)
2837 {
2838 #ifdef CONFIG_IRQ_REMAP
2839 switch (amd_iommu_guest_ir) {
2840 case AMD_IOMMU_GUEST_IR_VAPIC:
2841 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2842 iommu_feature_enable(iommu, CONTROL_GA_EN);
2843 iommu->irte_ops = &irte_128_ops;
2844 break;
2845 default:
2846 iommu->irte_ops = &irte_32_ops;
2847 break;
2848 }
2849 #endif
2850 }
2851
iommu_disable_irtcachedis(struct amd_iommu * iommu)2852 static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
2853 {
2854 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
2855 }
2856
iommu_enable_irtcachedis(struct amd_iommu * iommu)2857 static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
2858 {
2859 u64 ctrl;
2860
2861 if (!amd_iommu_irtcachedis)
2862 return;
2863
2864 /*
2865 * Note:
2866 * The support for IRTCacheDis feature is dertermined by
2867 * checking if the bit is writable.
2868 */
2869 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
2870 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
2871 ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
2872 if (ctrl)
2873 iommu->irtcachedis_enabled = true;
2874 pr_info("iommu%d (%#06x) : IRT cache is %s\n",
2875 iommu->index, iommu->devid,
2876 iommu->irtcachedis_enabled ? "disabled" : "enabled");
2877 }
2878
iommu_enable_2k_int(struct amd_iommu * iommu)2879 static void iommu_enable_2k_int(struct amd_iommu *iommu)
2880 {
2881 if (!FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
2882 return;
2883
2884 iommu_feature_set(iommu,
2885 CONTROL_NUM_INT_REMAP_MODE_2K,
2886 CONTROL_NUM_INT_REMAP_MODE_MASK,
2887 CONTROL_NUM_INT_REMAP_MODE);
2888 }
2889
early_enable_iommu(struct amd_iommu * iommu)2890 static void early_enable_iommu(struct amd_iommu *iommu)
2891 {
2892 iommu_disable(iommu);
2893 iommu_init_flags(iommu);
2894 iommu_set_device_table(iommu);
2895 iommu_enable_command_buffer(iommu);
2896 iommu_enable_event_buffer(iommu);
2897 iommu_set_exclusion_range(iommu);
2898 iommu_enable_gt(iommu);
2899 iommu_enable_ga(iommu);
2900 iommu_enable_xt(iommu);
2901 iommu_enable_irtcachedis(iommu);
2902 iommu_enable_2k_int(iommu);
2903 iommu_enable(iommu);
2904 amd_iommu_flush_all_caches(iommu);
2905 }
2906
2907 /*
2908 * This function finally enables all IOMMUs found in the system after
2909 * they have been initialized.
2910 *
2911 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse
2912 * the old content of device table entries. Not this case or reuse failed,
2913 * just continue as normal kernel does.
2914 */
early_enable_iommus(void)2915 static void early_enable_iommus(void)
2916 {
2917 struct amd_iommu *iommu;
2918 struct amd_iommu_pci_seg *pci_seg;
2919
2920 if (!reuse_device_table()) {
2921 /*
2922 * If come here because of failure in reusing device table from old
2923 * kernel with all IOMMUs enabled, print error message and try to
2924 * free allocated old_dev_tbl_cpy.
2925 */
2926 if (amd_iommu_pre_enabled) {
2927 pr_err("Failed to reuse DEV table from previous kernel.\n");
2928 /*
2929 * Bail out early if unable to remap/reuse DEV table from
2930 * previous kernel if SNP enabled as IOMMU commands will
2931 * time out without DEV table and cause kdump boot panic.
2932 */
2933 BUG_ON(check_feature(FEATURE_SNP));
2934 }
2935
2936 for_each_pci_segment(pci_seg) {
2937 if (pci_seg->old_dev_tbl_cpy != NULL) {
2938 memunmap((void *)pci_seg->old_dev_tbl_cpy);
2939 pci_seg->old_dev_tbl_cpy = NULL;
2940 }
2941 }
2942
2943 for_each_iommu(iommu) {
2944 clear_translation_pre_enabled(iommu);
2945 early_enable_iommu(iommu);
2946 }
2947 } else {
2948 pr_info("Reused DEV table from previous kernel.\n");
2949
2950 for_each_pci_segment(pci_seg) {
2951 iommu_free_pages(pci_seg->dev_table);
2952 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
2953 }
2954
2955 for_each_iommu(iommu) {
2956 iommu_disable_command_buffer(iommu);
2957 iommu_disable_event_buffer(iommu);
2958 iommu_disable_irtcachedis(iommu);
2959 iommu_enable_command_buffer(iommu);
2960 iommu_enable_event_buffer(iommu);
2961 iommu_enable_ga(iommu);
2962 iommu_enable_xt(iommu);
2963 iommu_enable_irtcachedis(iommu);
2964 iommu_enable_2k_int(iommu);
2965 iommu_set_device_table(iommu);
2966 amd_iommu_flush_all_caches(iommu);
2967 }
2968 }
2969 }
2970
enable_iommus_ppr(void)2971 static void enable_iommus_ppr(void)
2972 {
2973 struct amd_iommu *iommu;
2974
2975 if (!amd_iommu_gt_ppr_supported())
2976 return;
2977
2978 for_each_iommu(iommu)
2979 amd_iommu_enable_ppr_log(iommu);
2980 }
2981
enable_iommus_vapic(void)2982 static void enable_iommus_vapic(void)
2983 {
2984 #ifdef CONFIG_IRQ_REMAP
2985 u32 status, i;
2986 struct amd_iommu *iommu;
2987
2988 for_each_iommu(iommu) {
2989 /*
2990 * Disable GALog if already running. It could have been enabled
2991 * in the previous boot before kdump.
2992 */
2993 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2994 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2995 continue;
2996
2997 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
2998 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
2999
3000 /*
3001 * Need to set and poll check the GALOGRun bit to zero before
3002 * we can set/ modify GA Log registers safely.
3003 */
3004 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
3005 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
3006 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
3007 break;
3008 udelay(10);
3009 }
3010
3011 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
3012 return;
3013 }
3014
3015 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
3016 !check_feature(FEATURE_GAM_VAPIC)) {
3017 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3018 return;
3019 }
3020
3021 if (amd_iommu_snp_en &&
3022 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
3023 pr_warn("Force to disable Virtual APIC due to SNP\n");
3024 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3025 return;
3026 }
3027
3028 /* Enabling GAM and SNPAVIC support */
3029 for_each_iommu(iommu) {
3030 if (iommu_init_ga_log(iommu) ||
3031 iommu_ga_log_enable(iommu))
3032 return;
3033
3034 iommu_feature_enable(iommu, CONTROL_GAM_EN);
3035 if (amd_iommu_snp_en)
3036 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
3037 }
3038
3039 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
3040 pr_info("Virtual APIC enabled\n");
3041 #endif
3042 }
3043
disable_iommus(void)3044 static void disable_iommus(void)
3045 {
3046 struct amd_iommu *iommu;
3047
3048 for_each_iommu(iommu)
3049 iommu_disable(iommu);
3050
3051 #ifdef CONFIG_IRQ_REMAP
3052 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
3053 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
3054 #endif
3055 }
3056
3057 /*
3058 * Suspend/Resume support
3059 * disable suspend until real resume implemented
3060 */
3061
amd_iommu_resume(void * data)3062 static void amd_iommu_resume(void *data)
3063 {
3064 struct amd_iommu *iommu;
3065
3066 for_each_iommu(iommu)
3067 iommu_apply_resume_quirks(iommu);
3068
3069 /* re-load the hardware */
3070 for_each_iommu(iommu)
3071 early_enable_iommu(iommu);
3072
3073 amd_iommu_enable_interrupts();
3074 }
3075
amd_iommu_suspend(void * data)3076 static int amd_iommu_suspend(void *data)
3077 {
3078 /* disable IOMMUs to go out of the way for BIOS */
3079 disable_iommus();
3080
3081 return 0;
3082 }
3083
3084 static const struct syscore_ops amd_iommu_syscore_ops = {
3085 .suspend = amd_iommu_suspend,
3086 .resume = amd_iommu_resume,
3087 };
3088
3089 static struct syscore amd_iommu_syscore = {
3090 .ops = &amd_iommu_syscore_ops,
3091 };
3092
free_iommu_resources(void)3093 static void __init free_iommu_resources(void)
3094 {
3095 free_iommu_all();
3096 free_pci_segments();
3097 }
3098
3099 /* SB IOAPIC is always on this device in AMD systems */
3100 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
3101
check_ioapic_information(void)3102 static bool __init check_ioapic_information(void)
3103 {
3104 const char *fw_bug = FW_BUG;
3105 bool ret, has_sb_ioapic;
3106 int idx;
3107
3108 has_sb_ioapic = false;
3109 ret = false;
3110
3111 /*
3112 * If we have map overrides on the kernel command line the
3113 * messages in this function might not describe firmware bugs
3114 * anymore - so be careful
3115 */
3116 if (cmdline_maps)
3117 fw_bug = "";
3118
3119 for (idx = 0; idx < nr_ioapics; idx++) {
3120 int devid, id = mpc_ioapic_id(idx);
3121
3122 devid = get_ioapic_devid(id);
3123 if (devid < 0) {
3124 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
3125 fw_bug, id);
3126 ret = false;
3127 } else if (devid == IOAPIC_SB_DEVID) {
3128 has_sb_ioapic = true;
3129 ret = true;
3130 }
3131 }
3132
3133 if (!has_sb_ioapic) {
3134 /*
3135 * We expect the SB IOAPIC to be listed in the IVRS
3136 * table. The system timer is connected to the SB IOAPIC
3137 * and if we don't have it in the list the system will
3138 * panic at boot time. This situation usually happens
3139 * when the BIOS is buggy and provides us the wrong
3140 * device id for the IOAPIC in the system.
3141 */
3142 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
3143 }
3144
3145 if (!ret)
3146 pr_err("Disabling interrupt remapping\n");
3147
3148 return ret;
3149 }
3150
free_dma_resources(void)3151 static void __init free_dma_resources(void)
3152 {
3153 amd_iommu_pdom_id_destroy();
3154 free_unity_maps();
3155 }
3156
ivinfo_init(void * ivrs)3157 static void __init ivinfo_init(void *ivrs)
3158 {
3159 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
3160 }
3161
3162 /*
3163 * This is the hardware init function for AMD IOMMU in the system.
3164 * This function is called either from amd_iommu_init or from the interrupt
3165 * remapping setup code.
3166 *
3167 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
3168 * four times:
3169 *
3170 * 1 pass) Discover the most comprehensive IVHD type to use.
3171 *
3172 * 2 pass) Find the highest PCI device id the driver has to handle.
3173 * Upon this information the size of the data structures is
3174 * determined that needs to be allocated.
3175 *
3176 * 3 pass) Initialize the data structures just allocated with the
3177 * information in the ACPI table about available AMD IOMMUs
3178 * in the system. It also maps the PCI devices in the
3179 * system to specific IOMMUs
3180 *
3181 * 4 pass) After the basic data structures are allocated and
3182 * initialized we update them with information about memory
3183 * remapping requirements parsed out of the ACPI table in
3184 * this last pass.
3185 *
3186 * After everything is set up the IOMMUs are enabled and the necessary
3187 * hotplug and suspend notifiers are registered.
3188 */
early_amd_iommu_init(void)3189 static int __init early_amd_iommu_init(void)
3190 {
3191 struct acpi_table_header *ivrs_base;
3192 int ret;
3193 acpi_status status;
3194 u8 efr_hats;
3195
3196 if (!amd_iommu_detected)
3197 return -ENODEV;
3198
3199 status = acpi_get_table("IVRS", 0, &ivrs_base);
3200 if (status == AE_NOT_FOUND)
3201 return -ENODEV;
3202 else if (ACPI_FAILURE(status)) {
3203 const char *err = acpi_format_exception(status);
3204 pr_err("IVRS table error: %s\n", err);
3205 return -EINVAL;
3206 }
3207
3208 if (!boot_cpu_has(X86_FEATURE_CX16)) {
3209 pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
3210 ret = -EINVAL;
3211 goto out;
3212 }
3213
3214 /*
3215 * Validate checksum here so we don't need to do it when
3216 * we actually parse the table
3217 */
3218 ret = check_ivrs_checksum(ivrs_base);
3219 if (ret)
3220 goto out;
3221
3222 ivinfo_init(ivrs_base);
3223
3224 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
3225 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
3226
3227 /*
3228 * now the data structures are allocated and basically initialized
3229 * start the real acpi table scan
3230 */
3231 ret = init_iommu_all(ivrs_base);
3232 if (ret)
3233 goto out;
3234
3235 /* 5 level guest page table */
3236 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3237 FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
3238 amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
3239
3240 efr_hats = FIELD_GET(FEATURE_HATS, amd_iommu_efr);
3241 if (efr_hats != 0x3) {
3242 /*
3243 * efr[HATS] bits specify the maximum host translation level
3244 * supported, with LEVEL 4 being initial max level.
3245 */
3246 amd_iommu_hpt_level = efr_hats + PAGE_MODE_4_LEVEL;
3247 } else {
3248 pr_warn_once(FW_BUG "Disable host address translation due to invalid translation level (%#x).\n",
3249 efr_hats);
3250 amd_iommu_hatdis = true;
3251 }
3252
3253 if (amd_iommu_pgtable == PD_MODE_V2) {
3254 if (!amd_iommu_v2_pgtbl_supported()) {
3255 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
3256 amd_iommu_pgtable = PD_MODE_V1;
3257 }
3258 }
3259
3260 if (amd_iommu_hatdis) {
3261 /*
3262 * Host (v1) page table is not available. Attempt to use
3263 * Guest (v2) page table.
3264 */
3265 if (amd_iommu_v2_pgtbl_supported())
3266 amd_iommu_pgtable = PD_MODE_V2;
3267 else
3268 amd_iommu_pgtable = PD_MODE_NONE;
3269 }
3270
3271 /* Disable any previously enabled IOMMUs */
3272 if (!is_kdump_kernel() || amd_iommu_disabled)
3273 disable_iommus();
3274
3275 if (amd_iommu_irq_remap)
3276 amd_iommu_irq_remap = check_ioapic_information();
3277
3278 if (amd_iommu_irq_remap) {
3279 struct amd_iommu_pci_seg *pci_seg;
3280 ret = -ENOMEM;
3281 for_each_pci_segment(pci_seg) {
3282 if (alloc_irq_lookup_table(pci_seg))
3283 goto out;
3284 }
3285 }
3286
3287 ret = init_memory_definitions(ivrs_base);
3288 if (ret)
3289 goto out;
3290
3291 /* init the device table */
3292 init_device_table();
3293
3294 out:
3295 /* Don't leak any ACPI memory */
3296 acpi_put_table(ivrs_base);
3297
3298 return ret;
3299 }
3300
amd_iommu_enable_interrupts(void)3301 static int amd_iommu_enable_interrupts(void)
3302 {
3303 struct amd_iommu *iommu;
3304 int ret = 0;
3305
3306 for_each_iommu(iommu) {
3307 ret = iommu_init_irq(iommu);
3308 if (ret)
3309 goto out;
3310 }
3311
3312 /*
3313 * Interrupt handler is ready to process interrupts. Enable
3314 * PPR and GA log interrupt for all IOMMUs.
3315 */
3316 enable_iommus_vapic();
3317 enable_iommus_ppr();
3318
3319 out:
3320 return ret;
3321 }
3322
detect_ivrs(void)3323 static bool __init detect_ivrs(void)
3324 {
3325 struct acpi_table_header *ivrs_base;
3326 acpi_status status;
3327 int i;
3328
3329 status = acpi_get_table("IVRS", 0, &ivrs_base);
3330 if (status == AE_NOT_FOUND)
3331 return false;
3332 else if (ACPI_FAILURE(status)) {
3333 const char *err = acpi_format_exception(status);
3334 pr_err("IVRS table error: %s\n", err);
3335 return false;
3336 }
3337
3338 acpi_put_table(ivrs_base);
3339
3340 if (amd_iommu_force_enable)
3341 goto out;
3342
3343 /* Don't use IOMMU if there is Stoney Ridge graphics */
3344 for (i = 0; i < 32; i++) {
3345 u32 pci_id;
3346
3347 pci_id = read_pci_config(0, i, 0, 0);
3348 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
3349 pr_info("Disable IOMMU on Stoney Ridge\n");
3350 return false;
3351 }
3352 }
3353
3354 out:
3355 /* Make sure ACS will be enabled during PCI probe */
3356 pci_request_acs();
3357
3358 return true;
3359 }
3360
iommu_snp_enable(void)3361 static __init void iommu_snp_enable(void)
3362 {
3363 #ifdef CONFIG_KVM_AMD_SEV
3364 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
3365 return;
3366 /*
3367 * The SNP support requires that IOMMU must be enabled, and is
3368 * configured with V1 page table (DTE[Mode] = 0 is not supported).
3369 */
3370 if (no_iommu || iommu_default_passthrough()) {
3371 pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
3372 goto disable_snp;
3373 }
3374
3375 if (amd_iommu_pgtable != PD_MODE_V1) {
3376 pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
3377 goto disable_snp;
3378 }
3379
3380 amd_iommu_snp_en = check_feature(FEATURE_SNP);
3381 if (!amd_iommu_snp_en) {
3382 pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
3383 goto disable_snp;
3384 }
3385
3386 /*
3387 * Enable host SNP support once SNP support is checked on IOMMU.
3388 */
3389 if (snp_rmptable_init()) {
3390 pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n");
3391 goto disable_snp;
3392 }
3393
3394 pr_info("IOMMU SNP support enabled.\n");
3395 return;
3396
3397 disable_snp:
3398 cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
3399 #endif
3400 }
3401
3402 /****************************************************************************
3403 *
3404 * AMD IOMMU Initialization State Machine
3405 *
3406 ****************************************************************************/
3407
state_next(void)3408 static int __init state_next(void)
3409 {
3410 int ret = 0;
3411
3412 switch (init_state) {
3413 case IOMMU_START_STATE:
3414 if (!detect_ivrs()) {
3415 init_state = IOMMU_NOT_FOUND;
3416 ret = -ENODEV;
3417 } else {
3418 init_state = IOMMU_IVRS_DETECTED;
3419 }
3420 break;
3421 case IOMMU_IVRS_DETECTED:
3422 if (amd_iommu_disabled) {
3423 init_state = IOMMU_CMDLINE_DISABLED;
3424 ret = -EINVAL;
3425 } else {
3426 ret = early_amd_iommu_init();
3427 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
3428 }
3429 break;
3430 case IOMMU_ACPI_FINISHED:
3431 early_enable_iommus();
3432 x86_platform.iommu_shutdown = disable_iommus;
3433 init_state = IOMMU_ENABLED;
3434 break;
3435 case IOMMU_ENABLED:
3436 register_syscore(&amd_iommu_syscore);
3437 iommu_snp_enable();
3438 ret = amd_iommu_init_pci();
3439 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
3440 break;
3441 case IOMMU_PCI_INIT:
3442 ret = amd_iommu_enable_interrupts();
3443 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
3444 break;
3445 case IOMMU_INTERRUPTS_EN:
3446 init_state = IOMMU_INITIALIZED;
3447 break;
3448 case IOMMU_INITIALIZED:
3449 /* Nothing to do */
3450 break;
3451 case IOMMU_NOT_FOUND:
3452 case IOMMU_INIT_ERROR:
3453 case IOMMU_CMDLINE_DISABLED:
3454 /* Error states => do nothing */
3455 ret = -EINVAL;
3456 break;
3457 default:
3458 /* Unknown state */
3459 BUG();
3460 }
3461
3462 if (ret) {
3463 free_dma_resources();
3464 if (!irq_remapping_enabled) {
3465 disable_iommus();
3466 free_iommu_resources();
3467 } else {
3468 struct amd_iommu *iommu;
3469 struct amd_iommu_pci_seg *pci_seg;
3470
3471 for_each_pci_segment(pci_seg)
3472 uninit_device_table_dma(pci_seg);
3473
3474 for_each_iommu(iommu)
3475 amd_iommu_flush_all_caches(iommu);
3476 }
3477 }
3478 return ret;
3479 }
3480
iommu_go_to_state(enum iommu_init_state state)3481 static int __init iommu_go_to_state(enum iommu_init_state state)
3482 {
3483 int ret = -EINVAL;
3484
3485 while (init_state != state) {
3486 if (init_state == IOMMU_NOT_FOUND ||
3487 init_state == IOMMU_INIT_ERROR ||
3488 init_state == IOMMU_CMDLINE_DISABLED)
3489 break;
3490 ret = state_next();
3491 }
3492
3493 /*
3494 * SNP platform initilazation requires IOMMUs to be fully configured.
3495 * If the SNP support on IOMMUs has NOT been checked, simply mark SNP
3496 * as unsupported. If the SNP support on IOMMUs has been checked and
3497 * host SNP support enabled but RMP enforcement has not been enabled
3498 * in IOMMUs, then the system is in a half-baked state, but can limp
3499 * along as all memory should be Hypervisor-Owned in the RMP. WARN,
3500 * but leave SNP as "supported" to avoid confusing the kernel.
3501 */
3502 if (ret && cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
3503 !WARN_ON_ONCE(amd_iommu_snp_en))
3504 cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
3505
3506 return ret;
3507 }
3508
3509 #ifdef CONFIG_IRQ_REMAP
amd_iommu_prepare(void)3510 int __init amd_iommu_prepare(void)
3511 {
3512 int ret;
3513
3514 amd_iommu_irq_remap = true;
3515
3516 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3517 if (ret) {
3518 amd_iommu_irq_remap = false;
3519 return ret;
3520 }
3521
3522 return amd_iommu_irq_remap ? 0 : -ENODEV;
3523 }
3524
amd_iommu_enable(void)3525 int __init amd_iommu_enable(void)
3526 {
3527 int ret;
3528
3529 ret = iommu_go_to_state(IOMMU_ENABLED);
3530 if (ret)
3531 return ret;
3532
3533 irq_remapping_enabled = 1;
3534 return amd_iommu_xt_mode;
3535 }
3536
amd_iommu_disable(void)3537 void amd_iommu_disable(void)
3538 {
3539 amd_iommu_suspend(NULL);
3540 }
3541
amd_iommu_reenable(int mode)3542 int amd_iommu_reenable(int mode)
3543 {
3544 amd_iommu_resume(NULL);
3545
3546 return 0;
3547 }
3548
amd_iommu_enable_faulting(unsigned int cpu)3549 int amd_iommu_enable_faulting(unsigned int cpu)
3550 {
3551 /* We enable MSI later when PCI is initialized */
3552 return 0;
3553 }
3554 #endif
3555
3556 /*
3557 * This is the core init function for AMD IOMMU hardware in the system.
3558 * This function is called from the generic x86 DMA layer initialization
3559 * code.
3560 */
amd_iommu_init(void)3561 static int __init amd_iommu_init(void)
3562 {
3563 int ret;
3564
3565 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3566 #ifdef CONFIG_GART_IOMMU
3567 if (ret && list_empty(&amd_iommu_list)) {
3568 /*
3569 * We failed to initialize the AMD IOMMU - try fallback
3570 * to GART if possible.
3571 */
3572 gart_iommu_init();
3573 }
3574 #endif
3575
3576 if (!ret)
3577 amd_iommu_debugfs_setup();
3578
3579 return ret;
3580 }
3581
amd_iommu_sme_check(void)3582 static bool amd_iommu_sme_check(void)
3583 {
3584 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
3585 (boot_cpu_data.x86 != 0x17))
3586 return true;
3587
3588 /* For Fam17h, a specific level of support is required */
3589 if (boot_cpu_data.microcode >= 0x08001205)
3590 return true;
3591
3592 if ((boot_cpu_data.microcode >= 0x08001126) &&
3593 (boot_cpu_data.microcode <= 0x080011ff))
3594 return true;
3595
3596 pr_notice("IOMMU not currently supported when SME is active\n");
3597
3598 return false;
3599 }
3600
3601 /****************************************************************************
3602 *
3603 * Early detect code. This code runs at IOMMU detection time in the DMA
3604 * layer. It just looks if there is an IVRS ACPI table to detect AMD
3605 * IOMMUs
3606 *
3607 ****************************************************************************/
amd_iommu_detect(void)3608 void __init amd_iommu_detect(void)
3609 {
3610 int ret;
3611
3612 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3613 goto disable_snp;
3614
3615 if (!amd_iommu_sme_check())
3616 goto disable_snp;
3617
3618 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3619 if (ret)
3620 goto disable_snp;
3621
3622 amd_iommu_detected = true;
3623 iommu_detected = 1;
3624 x86_init.iommu.iommu_init = amd_iommu_init;
3625 return;
3626
3627 disable_snp:
3628 if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
3629 cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
3630 }
3631
3632 /****************************************************************************
3633 *
3634 * Parsing functions for the AMD IOMMU specific kernel command line
3635 * options.
3636 *
3637 ****************************************************************************/
3638
parse_amd_iommu_dump(char * str)3639 static int __init parse_amd_iommu_dump(char *str)
3640 {
3641 amd_iommu_dump = true;
3642
3643 return 1;
3644 }
3645
parse_amd_iommu_intr(char * str)3646 static int __init parse_amd_iommu_intr(char *str)
3647 {
3648 for (; *str; ++str) {
3649 if (strncmp(str, "legacy", 6) == 0) {
3650 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3651 break;
3652 }
3653 if (strncmp(str, "vapic", 5) == 0) {
3654 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3655 break;
3656 }
3657 }
3658 return 1;
3659 }
3660
parse_amd_iommu_options(char * str)3661 static int __init parse_amd_iommu_options(char *str)
3662 {
3663 if (!str)
3664 return -EINVAL;
3665
3666 while (*str) {
3667 if (strncmp(str, "fullflush", 9) == 0) {
3668 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3669 iommu_set_dma_strict();
3670 } else if (strncmp(str, "force_enable", 12) == 0) {
3671 amd_iommu_force_enable = true;
3672 } else if (strncmp(str, "off", 3) == 0) {
3673 amd_iommu_disabled = true;
3674 } else if (strncmp(str, "force_isolation", 15) == 0) {
3675 amd_iommu_force_isolation = true;
3676 } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
3677 amd_iommu_pgtable = PD_MODE_V1;
3678 } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
3679 amd_iommu_pgtable = PD_MODE_V2;
3680 } else if (strncmp(str, "irtcachedis", 11) == 0) {
3681 amd_iommu_irtcachedis = true;
3682 } else if (strncmp(str, "nohugepages", 11) == 0) {
3683 pr_info("Restricting V1 page-sizes to 4KiB");
3684 amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_4K;
3685 } else if (strncmp(str, "v2_pgsizes_only", 15) == 0) {
3686 pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB");
3687 amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
3688 } else {
3689 pr_notice("Unknown option - '%s'\n", str);
3690 }
3691
3692 str += strcspn(str, ",");
3693 while (*str == ',')
3694 str++;
3695 }
3696
3697 return 1;
3698 }
3699
parse_ivrs_ioapic(char * str)3700 static int __init parse_ivrs_ioapic(char *str)
3701 {
3702 u32 seg = 0, bus, dev, fn;
3703 int id, i;
3704 u32 devid;
3705
3706 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3707 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3708 goto found;
3709
3710 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3711 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3712 pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
3713 str, id, seg, bus, dev, fn);
3714 goto found;
3715 }
3716
3717 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3718 return 1;
3719
3720 found:
3721 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3722 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3723 str);
3724 return 1;
3725 }
3726
3727 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3728
3729 cmdline_maps = true;
3730 i = early_ioapic_map_size++;
3731 early_ioapic_map[i].id = id;
3732 early_ioapic_map[i].devid = devid;
3733 early_ioapic_map[i].cmd_line = true;
3734
3735 return 1;
3736 }
3737
parse_ivrs_hpet(char * str)3738 static int __init parse_ivrs_hpet(char *str)
3739 {
3740 u32 seg = 0, bus, dev, fn;
3741 int id, i;
3742 u32 devid;
3743
3744 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3745 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3746 goto found;
3747
3748 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3749 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3750 pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
3751 str, id, seg, bus, dev, fn);
3752 goto found;
3753 }
3754
3755 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3756 return 1;
3757
3758 found:
3759 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3760 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3761 str);
3762 return 1;
3763 }
3764
3765 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3766
3767 cmdline_maps = true;
3768 i = early_hpet_map_size++;
3769 early_hpet_map[i].id = id;
3770 early_hpet_map[i].devid = devid;
3771 early_hpet_map[i].cmd_line = true;
3772
3773 return 1;
3774 }
3775
3776 #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
3777
parse_ivrs_acpihid(char * str)3778 static int __init parse_ivrs_acpihid(char *str)
3779 {
3780 u32 seg = 0, bus, dev, fn;
3781 char *hid, *uid, *p, *addr;
3782 char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
3783 int i;
3784
3785 addr = strchr(str, '@');
3786 if (!addr) {
3787 addr = strchr(str, '=');
3788 if (!addr)
3789 goto not_found;
3790
3791 ++addr;
3792
3793 if (strlen(addr) > ACPIID_LEN)
3794 goto not_found;
3795
3796 if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
3797 sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
3798 pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
3799 str, acpiid, seg, bus, dev, fn);
3800 goto found;
3801 }
3802 goto not_found;
3803 }
3804
3805 /* We have the '@', make it the terminator to get just the acpiid */
3806 *addr++ = 0;
3807
3808 if (strlen(str) > ACPIID_LEN)
3809 goto not_found;
3810
3811 if (sscanf(str, "=%s", acpiid) != 1)
3812 goto not_found;
3813
3814 if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
3815 sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
3816 goto found;
3817
3818 not_found:
3819 pr_err("Invalid command line: ivrs_acpihid%s\n", str);
3820 return 1;
3821
3822 found:
3823 p = acpiid;
3824 hid = strsep(&p, ":");
3825 uid = p;
3826
3827 if (!hid || !(*hid) || !uid) {
3828 pr_err("Invalid command line: hid or uid\n");
3829 return 1;
3830 }
3831
3832 /*
3833 * Ignore leading zeroes after ':', so e.g., AMDI0095:00
3834 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
3835 */
3836 while (*uid == '0' && *(uid + 1))
3837 uid++;
3838
3839 if (strlen(hid) >= ACPIHID_HID_LEN) {
3840 pr_err("Invalid command line: hid is too long\n");
3841 return 1;
3842 } else if (strlen(uid) >= ACPIHID_UID_LEN) {
3843 pr_err("Invalid command line: uid is too long\n");
3844 return 1;
3845 }
3846
3847 i = early_acpihid_map_size++;
3848 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3849 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3850 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3851 early_acpihid_map[i].cmd_line = true;
3852
3853 return 1;
3854 }
3855
3856 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3857 __setup("amd_iommu=", parse_amd_iommu_options);
3858 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3859 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3860 __setup("ivrs_hpet", parse_ivrs_hpet);
3861 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3862
amd_iommu_pasid_supported(void)3863 bool amd_iommu_pasid_supported(void)
3864 {
3865 /* CPU page table size should match IOMMU guest page table size */
3866 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3867 amd_iommu_gpt_level != PAGE_MODE_5_LEVEL)
3868 return false;
3869
3870 /*
3871 * Since DTE[Mode]=0 is prohibited on SNP-enabled system
3872 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
3873 * setting up IOMMUv1 page table.
3874 */
3875 return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
3876 }
3877
get_amd_iommu(unsigned int idx)3878 struct amd_iommu *get_amd_iommu(unsigned int idx)
3879 {
3880 unsigned int i = 0;
3881 struct amd_iommu *iommu;
3882
3883 for_each_iommu(iommu)
3884 if (i++ == idx)
3885 return iommu;
3886 return NULL;
3887 }
3888
3889 /****************************************************************************
3890 *
3891 * IOMMU EFR Performance Counter support functionality. This code allows
3892 * access to the IOMMU PC functionality.
3893 *
3894 ****************************************************************************/
3895
amd_iommu_pc_get_max_banks(unsigned int idx)3896 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3897 {
3898 struct amd_iommu *iommu = get_amd_iommu(idx);
3899
3900 if (iommu)
3901 return iommu->max_banks;
3902
3903 return 0;
3904 }
3905
amd_iommu_pc_supported(void)3906 bool amd_iommu_pc_supported(void)
3907 {
3908 return amd_iommu_pc_present;
3909 }
3910
amd_iommu_pc_get_max_counters(unsigned int idx)3911 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3912 {
3913 struct amd_iommu *iommu = get_amd_iommu(idx);
3914
3915 if (iommu)
3916 return iommu->max_counters;
3917
3918 return 0;
3919 }
3920
iommu_pc_get_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value,bool is_write)3921 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3922 u8 fxn, u64 *value, bool is_write)
3923 {
3924 u32 offset;
3925 u32 max_offset_lim;
3926
3927 /* Make sure the IOMMU PC resource is available */
3928 if (!amd_iommu_pc_present)
3929 return -ENODEV;
3930
3931 /* Check for valid iommu and pc register indexing */
3932 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3933 return -ENODEV;
3934
3935 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3936
3937 /* Limit the offset to the hw defined mmio region aperture */
3938 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3939 (iommu->max_counters << 8) | 0x28);
3940 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3941 (offset > max_offset_lim))
3942 return -EINVAL;
3943
3944 if (is_write) {
3945 u64 val = *value & GENMASK_ULL(47, 0);
3946
3947 writel((u32)val, iommu->mmio_base + offset);
3948 writel((val >> 32), iommu->mmio_base + offset + 4);
3949 } else {
3950 *value = readl(iommu->mmio_base + offset + 4);
3951 *value <<= 32;
3952 *value |= readl(iommu->mmio_base + offset);
3953 *value &= GENMASK_ULL(47, 0);
3954 }
3955
3956 return 0;
3957 }
3958
amd_iommu_pc_get_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3959 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3960 {
3961 if (!iommu)
3962 return -EINVAL;
3963
3964 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3965 }
3966
amd_iommu_pc_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3967 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3968 {
3969 if (!iommu)
3970 return -EINVAL;
3971
3972 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3973 }
3974
3975 #ifdef CONFIG_KVM_AMD_SEV
iommu_page_make_shared(void * page)3976 static int iommu_page_make_shared(void *page)
3977 {
3978 unsigned long paddr, pfn;
3979
3980 paddr = iommu_virt_to_phys(page);
3981 /* Cbit maybe set in the paddr */
3982 pfn = __sme_clr(paddr) >> PAGE_SHIFT;
3983
3984 if (!(pfn % PTRS_PER_PMD)) {
3985 int ret, level;
3986 bool assigned;
3987
3988 ret = snp_lookup_rmpentry(pfn, &assigned, &level);
3989 if (ret) {
3990 pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
3991 return ret;
3992 }
3993
3994 if (!assigned) {
3995 pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
3996 return -EINVAL;
3997 }
3998
3999 if (level > PG_LEVEL_4K) {
4000 ret = psmash(pfn);
4001 if (!ret)
4002 goto done;
4003
4004 pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n",
4005 pfn, ret, level);
4006 return ret;
4007 }
4008 }
4009
4010 done:
4011 return rmp_make_shared(pfn, PG_LEVEL_4K);
4012 }
4013
iommu_make_shared(void * va,size_t size)4014 static int iommu_make_shared(void *va, size_t size)
4015 {
4016 void *page;
4017 int ret;
4018
4019 if (!va)
4020 return 0;
4021
4022 for (page = va; page < (va + size); page += PAGE_SIZE) {
4023 ret = iommu_page_make_shared(page);
4024 if (ret)
4025 return ret;
4026 }
4027
4028 return 0;
4029 }
4030
amd_iommu_snp_disable(void)4031 int amd_iommu_snp_disable(void)
4032 {
4033 struct amd_iommu *iommu;
4034 int ret;
4035
4036 if (!amd_iommu_snp_en)
4037 return 0;
4038
4039 for_each_iommu(iommu) {
4040 ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
4041 if (ret)
4042 return ret;
4043
4044 ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
4045 if (ret)
4046 return ret;
4047
4048 ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
4049 if (ret)
4050 return ret;
4051 }
4052
4053 return 0;
4054 }
4055 EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
4056
amd_iommu_sev_tio_supported(void)4057 bool amd_iommu_sev_tio_supported(void)
4058 {
4059 return check_feature2(FEATURE_SEVSNPIO_SUP);
4060 }
4061 EXPORT_SYMBOL_GPL(amd_iommu_sev_tio_supported);
4062 #endif
4063