xref: /linux/drivers/iommu/intel/dmar.c (revision f1d26d72f01556c787b1291729aa7a2ce37656a8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2006, Intel Corporation.
4  *
5  * Copyright (C) 2006-2008 Intel Corporation
6  * Author: Ashok Raj <ashok.raj@intel.com>
7  * Author: Shaohua Li <shaohua.li@intel.com>
8  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *
10  * This file implements early detection/parsing of Remapping Devices
11  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
12  * tables.
13  *
14  * These routines are used by both DMA-remapping and Interrupt-remapping
15  */
16 
17 #define pr_fmt(fmt)     "DMAR: " fmt
18 
19 #include <linux/pci.h>
20 #include <linux/dmar.h>
21 #include <linux/iova.h>
22 #include <linux/timer.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/tboot.h>
26 #include <linux/dmi.h>
27 #include <linux/slab.h>
28 #include <linux/iommu.h>
29 #include <linux/numa.h>
30 #include <linux/limits.h>
31 #include <asm/irq_remapping.h>
32 
33 #include "iommu.h"
34 #include "../irq_remapping.h"
35 #include "../iommu-pages.h"
36 #include "perf.h"
37 #include "trace.h"
38 #include "perfmon.h"
39 
40 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
41 struct dmar_res_callback {
42 	dmar_res_handler_t	cb[ACPI_DMAR_TYPE_RESERVED];
43 	void			*arg[ACPI_DMAR_TYPE_RESERVED];
44 	bool			ignore_unhandled;
45 	bool			print_entry;
46 };
47 
48 /*
49  * Assumptions:
50  * 1) The hotplug framework guarentees that DMAR unit will be hot-added
51  *    before IO devices managed by that unit.
52  * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
53  *    after IO devices managed by that unit.
54  * 3) Hotplug events are rare.
55  *
56  * Locking rules for DMA and interrupt remapping related global data structures:
57  * 1) Use dmar_global_lock in process context
58  * 2) Use RCU in interrupt context
59  */
60 DECLARE_RWSEM(dmar_global_lock);
61 LIST_HEAD(dmar_drhd_units);
62 
63 struct acpi_table_header * __initdata dmar_tbl;
64 static int dmar_dev_scope_status = 1;
65 static DEFINE_IDA(dmar_seq_ids);
66 
67 static int alloc_iommu(struct dmar_drhd_unit *drhd);
68 static void free_iommu(struct intel_iommu *iommu);
69 
dmar_register_drhd_unit(struct dmar_drhd_unit * drhd)70 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
71 {
72 	/*
73 	 * add INCLUDE_ALL at the tail, so scan the list will find it at
74 	 * the very end.
75 	 */
76 	if (drhd->include_all)
77 		list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
78 	else
79 		list_add_rcu(&drhd->list, &dmar_drhd_units);
80 }
81 
dmar_alloc_dev_scope(void * start,void * end,int * cnt)82 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
83 {
84 	struct acpi_dmar_device_scope *scope;
85 
86 	*cnt = 0;
87 	while (start < end) {
88 		scope = start;
89 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
90 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
91 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
92 			(*cnt)++;
93 		else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
94 			scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
95 			pr_warn("Unsupported device scope\n");
96 		}
97 		start += scope->length;
98 	}
99 	if (*cnt == 0)
100 		return NULL;
101 
102 	return kzalloc_objs(struct dmar_dev_scope, *cnt);
103 }
104 
dmar_free_dev_scope(struct dmar_dev_scope ** devices,int * cnt)105 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
106 {
107 	int i;
108 	struct device *tmp_dev;
109 
110 	if (*devices && *cnt) {
111 		for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
112 			put_device(tmp_dev);
113 		kfree(*devices);
114 	}
115 
116 	*devices = NULL;
117 	*cnt = 0;
118 }
119 
120 /* Optimize out kzalloc()/kfree() for normal cases */
121 static char dmar_pci_notify_info_buf[64];
122 
123 static struct dmar_pci_notify_info *
dmar_alloc_pci_notify_info(struct pci_dev * dev,unsigned long event)124 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
125 {
126 	int level = 0;
127 	size_t size;
128 	struct pci_dev *tmp;
129 	struct dmar_pci_notify_info *info;
130 
131 	/*
132 	 * Ignore devices that have a domain number higher than what can
133 	 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
134 	 */
135 	if (pci_domain_nr(dev->bus) > U16_MAX)
136 		return NULL;
137 
138 	/* Only generate path[] for device addition event */
139 	if (event == BUS_NOTIFY_ADD_DEVICE)
140 		for (tmp = dev; tmp; tmp = tmp->bus->self)
141 			level++;
142 
143 	size = struct_size(info, path, level);
144 	if (size <= sizeof(dmar_pci_notify_info_buf)) {
145 		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
146 	} else {
147 		info = kzalloc(size, GFP_KERNEL);
148 		if (!info) {
149 			if (dmar_dev_scope_status == 0)
150 				dmar_dev_scope_status = -ENOMEM;
151 			return NULL;
152 		}
153 	}
154 
155 	info->event = event;
156 	info->dev = dev;
157 	info->seg = pci_domain_nr(dev->bus);
158 	info->level = level;
159 	if (event == BUS_NOTIFY_ADD_DEVICE) {
160 		for (tmp = dev; tmp; tmp = tmp->bus->self) {
161 			level--;
162 			info->path[level].bus = tmp->bus->number;
163 			info->path[level].device = PCI_SLOT(tmp->devfn);
164 			info->path[level].function = PCI_FUNC(tmp->devfn);
165 			if (pci_is_root_bus(tmp->bus))
166 				info->bus = tmp->bus->number;
167 		}
168 	}
169 
170 	return info;
171 }
172 
dmar_free_pci_notify_info(struct dmar_pci_notify_info * info)173 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
174 {
175 	if ((void *)info != dmar_pci_notify_info_buf)
176 		kfree(info);
177 }
178 
dmar_match_pci_path(struct dmar_pci_notify_info * info,int bus,struct acpi_dmar_pci_path * path,int count)179 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
180 				struct acpi_dmar_pci_path *path, int count)
181 {
182 	int i;
183 
184 	if (info->bus != bus)
185 		goto fallback;
186 	if (info->level != count)
187 		goto fallback;
188 
189 	for (i = 0; i < count; i++) {
190 		if (path[i].device != info->path[i].device ||
191 		    path[i].function != info->path[i].function)
192 			goto fallback;
193 	}
194 
195 	return true;
196 
197 fallback:
198 
199 	if (count != 1)
200 		return false;
201 
202 	i = info->level - 1;
203 	if (bus              == info->path[i].bus &&
204 	    path[0].device   == info->path[i].device &&
205 	    path[0].function == info->path[i].function) {
206 		pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
207 			bus, path[0].device, path[0].function);
208 		return true;
209 	}
210 
211 	return false;
212 }
213 
214 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
dmar_insert_dev_scope(struct dmar_pci_notify_info * info,void * start,void * end,u16 segment,struct dmar_dev_scope * devices,int devices_cnt)215 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
216 			  void *start, void*end, u16 segment,
217 			  struct dmar_dev_scope *devices,
218 			  int devices_cnt)
219 {
220 	int i, level;
221 	struct device *tmp, *dev = &info->dev->dev;
222 	struct acpi_dmar_device_scope *scope;
223 	struct acpi_dmar_pci_path *path;
224 
225 	if (segment != info->seg)
226 		return 0;
227 
228 	for (; start < end; start += scope->length) {
229 		scope = start;
230 		if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
231 		    scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
232 			continue;
233 
234 		path = (struct acpi_dmar_pci_path *)(scope + 1);
235 		level = (scope->length - sizeof(*scope)) / sizeof(*path);
236 		if (!dmar_match_pci_path(info, scope->bus, path, level))
237 			continue;
238 
239 		/*
240 		 * We expect devices with endpoint scope to have normal PCI
241 		 * headers, and devices with bridge scope to have bridge PCI
242 		 * headers.  However PCI NTB devices may be listed in the
243 		 * DMAR table with bridge scope, even though they have a
244 		 * normal PCI header.  NTB devices are identified by class
245 		 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
246 		 * for this special case.
247 		 */
248 		if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
249 		     info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
250 		    (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
251 		     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
252 		      info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
253 			pr_warn("Device scope type does not match for %s\n",
254 				pci_name(info->dev));
255 			return -EINVAL;
256 		}
257 
258 		for_each_dev_scope(devices, devices_cnt, i, tmp)
259 			if (tmp == NULL) {
260 				devices[i].bus = info->dev->bus->number;
261 				devices[i].devfn = info->dev->devfn;
262 				rcu_assign_pointer(devices[i].dev,
263 						   get_device(dev));
264 				return 1;
265 			}
266 		if (WARN_ON(i >= devices_cnt))
267 			return -EINVAL;
268 	}
269 
270 	return 0;
271 }
272 
dmar_remove_dev_scope(struct dmar_pci_notify_info * info,u16 segment,struct dmar_dev_scope * devices,int count)273 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
274 			  struct dmar_dev_scope *devices, int count)
275 {
276 	int index;
277 	struct device *tmp;
278 
279 	if (info->seg != segment)
280 		return 0;
281 
282 	for_each_active_dev_scope(devices, count, index, tmp)
283 		if (tmp == &info->dev->dev) {
284 			RCU_INIT_POINTER(devices[index].dev, NULL);
285 			synchronize_rcu();
286 			put_device(tmp);
287 			return 1;
288 		}
289 
290 	return 0;
291 }
292 
dmar_pci_bus_add_dev(struct dmar_pci_notify_info * info)293 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
294 {
295 	int ret = 0;
296 	struct dmar_drhd_unit *dmaru;
297 	struct acpi_dmar_hardware_unit *drhd;
298 
299 	for_each_drhd_unit(dmaru) {
300 		if (dmaru->include_all)
301 			continue;
302 
303 		drhd = container_of(dmaru->hdr,
304 				    struct acpi_dmar_hardware_unit, header);
305 		ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
306 				((void *)drhd) + drhd->header.length,
307 				dmaru->segment,
308 				dmaru->devices, dmaru->devices_cnt);
309 		if (ret)
310 			break;
311 	}
312 	if (ret >= 0)
313 		ret = dmar_iommu_notify_scope_dev(info);
314 	if (ret < 0 && dmar_dev_scope_status == 0)
315 		dmar_dev_scope_status = ret;
316 
317 	if (ret >= 0)
318 		intel_irq_remap_add_device(info);
319 
320 	return ret;
321 }
322 
dmar_pci_bus_del_dev(struct dmar_pci_notify_info * info)323 static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
324 {
325 	struct dmar_drhd_unit *dmaru;
326 
327 	for_each_drhd_unit(dmaru)
328 		if (dmar_remove_dev_scope(info, dmaru->segment,
329 			dmaru->devices, dmaru->devices_cnt))
330 			break;
331 	dmar_iommu_notify_scope_dev(info);
332 }
333 
vf_inherit_msi_domain(struct pci_dev * pdev)334 static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
335 {
336 	struct pci_dev *physfn = pci_physfn(pdev);
337 
338 	dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
339 }
340 
dmar_pci_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)341 static int dmar_pci_bus_notifier(struct notifier_block *nb,
342 				 unsigned long action, void *data)
343 {
344 	struct pci_dev *pdev = to_pci_dev(data);
345 	struct dmar_pci_notify_info *info;
346 
347 	/* Only care about add/remove events for physical functions.
348 	 * For VFs we actually do the lookup based on the corresponding
349 	 * PF in device_to_iommu() anyway. */
350 	if (pdev->is_virtfn) {
351 		/*
352 		 * Ensure that the VF device inherits the irq domain of the
353 		 * PF device. Ideally the device would inherit the domain
354 		 * from the bus, but DMAR can have multiple units per bus
355 		 * which makes this impossible. The VF 'bus' could inherit
356 		 * from the PF device, but that's yet another x86'sism to
357 		 * inflict on everybody else.
358 		 */
359 		if (action == BUS_NOTIFY_ADD_DEVICE)
360 			vf_inherit_msi_domain(pdev);
361 		return NOTIFY_DONE;
362 	}
363 
364 	if (action != BUS_NOTIFY_ADD_DEVICE &&
365 	    action != BUS_NOTIFY_REMOVED_DEVICE)
366 		return NOTIFY_DONE;
367 
368 	info = dmar_alloc_pci_notify_info(pdev, action);
369 	if (!info)
370 		return NOTIFY_DONE;
371 
372 	down_write(&dmar_global_lock);
373 	if (action == BUS_NOTIFY_ADD_DEVICE)
374 		dmar_pci_bus_add_dev(info);
375 	else if (action == BUS_NOTIFY_REMOVED_DEVICE)
376 		dmar_pci_bus_del_dev(info);
377 	up_write(&dmar_global_lock);
378 
379 	dmar_free_pci_notify_info(info);
380 
381 	return NOTIFY_OK;
382 }
383 
384 static struct notifier_block dmar_pci_bus_nb = {
385 	.notifier_call = dmar_pci_bus_notifier,
386 	.priority = 1,
387 };
388 
389 static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit * drhd)390 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
391 {
392 	struct dmar_drhd_unit *dmaru;
393 
394 	list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
395 				dmar_rcu_check())
396 		if (dmaru->segment == drhd->segment &&
397 		    dmaru->reg_base_addr == drhd->address)
398 			return dmaru;
399 
400 	return NULL;
401 }
402 
403 /*
404  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
405  * structure which uniquely represent one DMA remapping hardware unit
406  * present in the platform
407  */
dmar_parse_one_drhd(struct acpi_dmar_header * header,void * arg)408 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
409 {
410 	struct acpi_dmar_hardware_unit *drhd;
411 	struct dmar_drhd_unit *dmaru;
412 	int ret;
413 
414 	drhd = (struct acpi_dmar_hardware_unit *)header;
415 	dmaru = dmar_find_dmaru(drhd);
416 	if (dmaru)
417 		goto out;
418 
419 	dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
420 	if (!dmaru)
421 		return -ENOMEM;
422 
423 	/*
424 	 * If header is allocated from slab by ACPI _DSM method, we need to
425 	 * copy the content because the memory buffer will be freed on return.
426 	 */
427 	dmaru->hdr = (void *)(dmaru + 1);
428 	memcpy(dmaru->hdr, header, header->length);
429 	dmaru->reg_base_addr = drhd->address;
430 	dmaru->segment = drhd->segment;
431 	/* The size of the register set is 2 ^ N 4 KB pages. */
432 	dmaru->reg_size = 1UL << (drhd->size + 12);
433 	dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
434 	dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
435 					      ((void *)drhd) + drhd->header.length,
436 					      &dmaru->devices_cnt);
437 	if (dmaru->devices_cnt && dmaru->devices == NULL) {
438 		kfree(dmaru);
439 		return -ENOMEM;
440 	}
441 
442 	ret = alloc_iommu(dmaru);
443 	if (ret) {
444 		dmar_free_dev_scope(&dmaru->devices,
445 				    &dmaru->devices_cnt);
446 		kfree(dmaru);
447 		return ret;
448 	}
449 	dmar_register_drhd_unit(dmaru);
450 
451 out:
452 	if (arg)
453 		(*(int *)arg)++;
454 
455 	return 0;
456 }
457 
dmar_free_drhd(struct dmar_drhd_unit * dmaru)458 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
459 {
460 	if (dmaru->devices && dmaru->devices_cnt)
461 		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
462 	if (dmaru->iommu)
463 		free_iommu(dmaru->iommu);
464 	kfree(dmaru);
465 }
466 
dmar_parse_one_andd(struct acpi_dmar_header * header,void * arg)467 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
468 				      void *arg)
469 {
470 	struct acpi_dmar_andd *andd = (void *)header;
471 
472 	/* Check for NUL termination within the designated length */
473 	if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
474 		pr_warn(FW_BUG
475 			   "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
476 			   "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
477 			   dmi_get_system_info(DMI_BIOS_VENDOR),
478 			   dmi_get_system_info(DMI_BIOS_VERSION),
479 			   dmi_get_system_info(DMI_PRODUCT_VERSION));
480 		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
481 		return -EINVAL;
482 	}
483 	pr_info("ANDD device: %x name: %s\n", andd->device_number,
484 		andd->device_name);
485 
486 	return 0;
487 }
488 
489 #ifdef CONFIG_ACPI_NUMA
dmar_parse_one_rhsa(struct acpi_dmar_header * header,void * arg)490 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
491 {
492 	struct acpi_dmar_rhsa *rhsa;
493 	struct dmar_drhd_unit *drhd;
494 
495 	rhsa = (struct acpi_dmar_rhsa *)header;
496 	for_each_drhd_unit(drhd) {
497 		if (drhd->reg_base_addr == rhsa->base_address) {
498 			int node = pxm_to_node(rhsa->proximity_domain);
499 
500 			if (node != NUMA_NO_NODE && !node_online(node))
501 				node = NUMA_NO_NODE;
502 			drhd->iommu->node = node;
503 			return 0;
504 		}
505 	}
506 	pr_warn(FW_BUG
507 		"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
508 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
509 		rhsa->base_address,
510 		dmi_get_system_info(DMI_BIOS_VENDOR),
511 		dmi_get_system_info(DMI_BIOS_VERSION),
512 		dmi_get_system_info(DMI_PRODUCT_VERSION));
513 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
514 
515 	return 0;
516 }
517 #else
518 #define	dmar_parse_one_rhsa		dmar_res_noop
519 #endif
520 
521 static void
dmar_table_print_dmar_entry(struct acpi_dmar_header * header)522 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
523 {
524 	struct acpi_dmar_hardware_unit *drhd;
525 	struct acpi_dmar_reserved_memory *rmrr;
526 	struct acpi_dmar_atsr *atsr;
527 	struct acpi_dmar_rhsa *rhsa;
528 	struct acpi_dmar_satc *satc;
529 
530 	switch (header->type) {
531 	case ACPI_DMAR_TYPE_HARDWARE_UNIT:
532 		drhd = container_of(header, struct acpi_dmar_hardware_unit,
533 				    header);
534 		pr_info("DRHD base: %#016Lx flags: %#x\n",
535 			(unsigned long long)drhd->address, drhd->flags);
536 		break;
537 	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
538 		rmrr = container_of(header, struct acpi_dmar_reserved_memory,
539 				    header);
540 		pr_info("RMRR base: %#016Lx end: %#016Lx\n",
541 			(unsigned long long)rmrr->base_address,
542 			(unsigned long long)rmrr->end_address);
543 		break;
544 	case ACPI_DMAR_TYPE_ROOT_ATS:
545 		atsr = container_of(header, struct acpi_dmar_atsr, header);
546 		pr_info("ATSR flags: %#x\n", atsr->flags);
547 		break;
548 	case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
549 		rhsa = container_of(header, struct acpi_dmar_rhsa, header);
550 		pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
551 		       (unsigned long long)rhsa->base_address,
552 		       rhsa->proximity_domain);
553 		break;
554 	case ACPI_DMAR_TYPE_NAMESPACE:
555 		/* We don't print this here because we need to sanity-check
556 		   it first. So print it in dmar_parse_one_andd() instead. */
557 		break;
558 	case ACPI_DMAR_TYPE_SATC:
559 		satc = container_of(header, struct acpi_dmar_satc, header);
560 		pr_info("SATC flags: 0x%x\n", satc->flags);
561 		break;
562 	}
563 }
564 
565 /**
566  * dmar_table_detect - checks to see if the platform supports DMAR devices
567  */
dmar_table_detect(void)568 static int __init dmar_table_detect(void)
569 {
570 	acpi_status status = AE_OK;
571 
572 	/* if we could find DMAR table, then there are DMAR devices */
573 	status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
574 
575 	if (ACPI_SUCCESS(status) && !dmar_tbl) {
576 		pr_warn("Unable to map DMAR\n");
577 		status = AE_NOT_FOUND;
578 	}
579 
580 	return ACPI_SUCCESS(status) ? 0 : -ENOENT;
581 }
582 
dmar_walk_remapping_entries(struct acpi_dmar_header * start,size_t len,struct dmar_res_callback * cb)583 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
584 				       size_t len, struct dmar_res_callback *cb)
585 {
586 	struct acpi_dmar_header *iter, *next;
587 	struct acpi_dmar_header *end = ((void *)start) + len;
588 
589 	for (iter = start; iter < end; iter = next) {
590 		next = (void *)iter + iter->length;
591 		if (iter->length == 0) {
592 			/* Avoid looping forever on bad ACPI tables */
593 			pr_debug(FW_BUG "Invalid 0-length structure\n");
594 			break;
595 		} else if (next > end) {
596 			/* Avoid passing table end */
597 			pr_warn(FW_BUG "Record passes table end\n");
598 			return -EINVAL;
599 		}
600 
601 		if (cb->print_entry)
602 			dmar_table_print_dmar_entry(iter);
603 
604 		if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
605 			/* continue for forward compatibility */
606 			pr_debug("Unknown DMAR structure type %d\n",
607 				 iter->type);
608 		} else if (cb->cb[iter->type]) {
609 			int ret;
610 
611 			ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
612 			if (ret)
613 				return ret;
614 		} else if (!cb->ignore_unhandled) {
615 			pr_warn("No handler for DMAR structure type %d\n",
616 				iter->type);
617 			return -EINVAL;
618 		}
619 	}
620 
621 	return 0;
622 }
623 
dmar_walk_dmar_table(struct acpi_table_dmar * dmar,struct dmar_res_callback * cb)624 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
625 				       struct dmar_res_callback *cb)
626 {
627 	return dmar_walk_remapping_entries((void *)(dmar + 1),
628 			dmar->header.length - sizeof(*dmar), cb);
629 }
630 
631 /**
632  * parse_dmar_table - parses the DMA reporting table
633  */
634 static int __init
parse_dmar_table(void)635 parse_dmar_table(void)
636 {
637 	struct acpi_table_dmar *dmar;
638 	int drhd_count = 0;
639 	int ret;
640 	struct dmar_res_callback cb = {
641 		.print_entry = true,
642 		.ignore_unhandled = true,
643 		.arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
644 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
645 		.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
646 		.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
647 		.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
648 		.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
649 		.cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
650 	};
651 
652 	/*
653 	 * Do it again, earlier dmar_tbl mapping could be mapped with
654 	 * fixed map.
655 	 */
656 	dmar_table_detect();
657 
658 	/*
659 	 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
660 	 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
661 	 */
662 	dmar_tbl = tboot_get_dmar_table(dmar_tbl);
663 
664 	dmar = (struct acpi_table_dmar *)dmar_tbl;
665 	if (!dmar)
666 		return -ENODEV;
667 
668 	if (dmar->width < PAGE_SHIFT - 1) {
669 		pr_warn("Invalid DMAR haw\n");
670 		return -EINVAL;
671 	}
672 
673 	pr_info("Host address width %d\n", dmar->width + 1);
674 	ret = dmar_walk_dmar_table(dmar, &cb);
675 	if (ret == 0 && drhd_count == 0)
676 		pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
677 
678 	return ret;
679 }
680 
dmar_pci_device_match(struct dmar_dev_scope devices[],int cnt,struct pci_dev * dev)681 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
682 				 int cnt, struct pci_dev *dev)
683 {
684 	int index;
685 	struct device *tmp;
686 
687 	while (dev) {
688 		for_each_active_dev_scope(devices, cnt, index, tmp)
689 			if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
690 				return 1;
691 
692 		/* Check our parent */
693 		dev = dev->bus->self;
694 	}
695 
696 	return 0;
697 }
698 
699 struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev * dev)700 dmar_find_matched_drhd_unit(struct pci_dev *dev)
701 {
702 	struct dmar_drhd_unit *dmaru;
703 	struct acpi_dmar_hardware_unit *drhd;
704 
705 	dev = pci_physfn(dev);
706 
707 	rcu_read_lock();
708 	for_each_drhd_unit(dmaru) {
709 		drhd = container_of(dmaru->hdr,
710 				    struct acpi_dmar_hardware_unit,
711 				    header);
712 
713 		if (dmaru->include_all &&
714 		    drhd->segment == pci_domain_nr(dev->bus))
715 			goto out;
716 
717 		if (dmar_pci_device_match(dmaru->devices,
718 					  dmaru->devices_cnt, dev))
719 			goto out;
720 	}
721 	dmaru = NULL;
722 out:
723 	rcu_read_unlock();
724 
725 	return dmaru;
726 }
727 
dmar_acpi_insert_dev_scope(u8 device_number,struct acpi_device * adev)728 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
729 					      struct acpi_device *adev)
730 {
731 	struct dmar_drhd_unit *dmaru;
732 	struct acpi_dmar_hardware_unit *drhd;
733 	struct acpi_dmar_device_scope *scope;
734 	struct device *tmp;
735 	int i;
736 	struct acpi_dmar_pci_path *path;
737 
738 	for_each_drhd_unit(dmaru) {
739 		drhd = container_of(dmaru->hdr,
740 				    struct acpi_dmar_hardware_unit,
741 				    header);
742 
743 		for (scope = (void *)(drhd + 1);
744 		     (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
745 		     scope = ((void *)scope) + scope->length) {
746 			if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
747 				continue;
748 			if (scope->enumeration_id != device_number)
749 				continue;
750 
751 			path = (void *)(scope + 1);
752 			pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
753 				dev_name(&adev->dev), dmaru->reg_base_addr,
754 				scope->bus, path->device, path->function);
755 			for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
756 				if (tmp == NULL) {
757 					dmaru->devices[i].bus = scope->bus;
758 					dmaru->devices[i].devfn = PCI_DEVFN(path->device,
759 									    path->function);
760 					rcu_assign_pointer(dmaru->devices[i].dev,
761 							   get_device(&adev->dev));
762 					return;
763 				}
764 			BUG_ON(i >= dmaru->devices_cnt);
765 		}
766 	}
767 	pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
768 		device_number, dev_name(&adev->dev));
769 }
770 
dmar_acpi_dev_scope_init(void)771 static int __init dmar_acpi_dev_scope_init(void)
772 {
773 	struct acpi_dmar_andd *andd;
774 
775 	if (dmar_tbl == NULL)
776 		return -ENODEV;
777 
778 	for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
779 	     ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
780 	     andd = ((void *)andd) + andd->header.length) {
781 		if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
782 			acpi_handle h;
783 			struct acpi_device *adev;
784 
785 			if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
786 							  andd->device_name,
787 							  &h))) {
788 				pr_err("Failed to find handle for ACPI object %s\n",
789 				       andd->device_name);
790 				continue;
791 			}
792 			adev = acpi_fetch_acpi_dev(h);
793 			if (!adev) {
794 				pr_err("Failed to get device for ACPI object %s\n",
795 				       andd->device_name);
796 				continue;
797 			}
798 			dmar_acpi_insert_dev_scope(andd->device_number, adev);
799 		}
800 	}
801 	return 0;
802 }
803 
dmar_dev_scope_init(void)804 int __init dmar_dev_scope_init(void)
805 {
806 	struct pci_dev *dev = NULL;
807 	struct dmar_pci_notify_info *info;
808 
809 	if (dmar_dev_scope_status != 1)
810 		return dmar_dev_scope_status;
811 
812 	if (list_empty(&dmar_drhd_units)) {
813 		dmar_dev_scope_status = -ENODEV;
814 	} else {
815 		dmar_dev_scope_status = 0;
816 
817 		dmar_acpi_dev_scope_init();
818 
819 		for_each_pci_dev(dev) {
820 			if (dev->is_virtfn)
821 				continue;
822 
823 			info = dmar_alloc_pci_notify_info(dev,
824 					BUS_NOTIFY_ADD_DEVICE);
825 			if (!info) {
826 				pci_dev_put(dev);
827 				return dmar_dev_scope_status;
828 			} else {
829 				dmar_pci_bus_add_dev(info);
830 				dmar_free_pci_notify_info(info);
831 			}
832 		}
833 	}
834 
835 	return dmar_dev_scope_status;
836 }
837 
dmar_register_bus_notifier(void)838 void __init dmar_register_bus_notifier(void)
839 {
840 	bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
841 }
842 
843 
dmar_table_init(void)844 int __init dmar_table_init(void)
845 {
846 	static int dmar_table_initialized;
847 	int ret;
848 
849 	if (dmar_table_initialized == 0) {
850 		ret = parse_dmar_table();
851 		if (ret < 0) {
852 			if (ret != -ENODEV)
853 				pr_info("Parse DMAR table failure.\n");
854 		} else  if (list_empty(&dmar_drhd_units)) {
855 			pr_info("No DMAR devices found\n");
856 			ret = -ENODEV;
857 		}
858 
859 		if (ret < 0)
860 			dmar_table_initialized = ret;
861 		else
862 			dmar_table_initialized = 1;
863 	}
864 
865 	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
866 }
867 
warn_invalid_dmar(u64 addr,const char * message)868 static void warn_invalid_dmar(u64 addr, const char *message)
869 {
870 	pr_warn_once(FW_BUG
871 		"Your BIOS is broken; DMAR reported at address %llx%s!\n"
872 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
873 		addr, message,
874 		dmi_get_system_info(DMI_BIOS_VENDOR),
875 		dmi_get_system_info(DMI_BIOS_VERSION),
876 		dmi_get_system_info(DMI_PRODUCT_VERSION));
877 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
878 }
879 
880 static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header * entry,void * arg)881 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
882 {
883 	struct acpi_dmar_hardware_unit *drhd;
884 	void __iomem *addr;
885 	u64 cap, ecap;
886 
887 	drhd = (void *)entry;
888 	if (!drhd->address) {
889 		warn_invalid_dmar(0, "");
890 		return -EINVAL;
891 	}
892 
893 	if (arg)
894 		addr = ioremap(drhd->address, VTD_PAGE_SIZE);
895 	else
896 		addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
897 	if (!addr) {
898 		pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
899 		return -EINVAL;
900 	}
901 
902 	cap = readq(addr + DMAR_CAP_REG);
903 	ecap = readq(addr + DMAR_ECAP_REG);
904 
905 	if (arg)
906 		iounmap(addr);
907 	else
908 		early_iounmap(addr, VTD_PAGE_SIZE);
909 
910 	if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
911 		warn_invalid_dmar(drhd->address, " returns all ones");
912 		return -EINVAL;
913 	}
914 
915 	return 0;
916 }
917 
detect_intel_iommu(void)918 void __init detect_intel_iommu(void)
919 {
920 	int ret;
921 	struct dmar_res_callback validate_drhd_cb = {
922 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
923 		.ignore_unhandled = true,
924 	};
925 
926 	down_write(&dmar_global_lock);
927 	ret = dmar_table_detect();
928 	if (!ret)
929 		ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
930 					   &validate_drhd_cb);
931 	if (!ret && !no_iommu && !iommu_detected &&
932 	    (!dmar_disabled || dmar_platform_optin())) {
933 		iommu_detected = 1;
934 		/* Make sure ACS will be enabled */
935 		pci_request_acs();
936 	}
937 
938 	if (!ret) {
939 		x86_init.iommu.iommu_init = intel_iommu_init;
940 		x86_platform.iommu_shutdown = intel_iommu_shutdown;
941 	}
942 
943 	if (dmar_tbl) {
944 		acpi_put_table(dmar_tbl);
945 		dmar_tbl = NULL;
946 	}
947 	up_write(&dmar_global_lock);
948 }
949 
unmap_iommu(struct intel_iommu * iommu)950 static void unmap_iommu(struct intel_iommu *iommu)
951 {
952 	iounmap(iommu->reg);
953 	release_mem_region(iommu->reg_phys, iommu->reg_size);
954 }
955 
956 /**
957  * map_iommu: map the iommu's registers
958  * @iommu: the iommu to map
959  * @drhd: DMA remapping hardware definition structure
960  *
961  * Memory map the iommu's registers.  Start w/ a single page, and
962  * possibly expand if that turns out to be insufficent.
963  */
map_iommu(struct intel_iommu * iommu,struct dmar_drhd_unit * drhd)964 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
965 {
966 	u64 phys_addr = drhd->reg_base_addr;
967 	int map_size, err=0;
968 
969 	iommu->reg_phys = phys_addr;
970 	iommu->reg_size = drhd->reg_size;
971 
972 	if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
973 		pr_err("Can't reserve memory\n");
974 		err = -EBUSY;
975 		goto out;
976 	}
977 
978 	iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
979 	if (!iommu->reg) {
980 		pr_err("Can't map the region\n");
981 		err = -ENOMEM;
982 		goto release;
983 	}
984 
985 	iommu->cap = readq(iommu->reg + DMAR_CAP_REG);
986 	iommu->ecap = readq(iommu->reg + DMAR_ECAP_REG);
987 
988 	if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
989 		err = -EINVAL;
990 		warn_invalid_dmar(phys_addr, " returns all ones");
991 		goto unmap;
992 	}
993 
994 	/* the registers might be more than one page */
995 	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
996 			 cap_max_fault_reg_offset(iommu->cap));
997 	map_size = VTD_PAGE_ALIGN(map_size);
998 	if (map_size > iommu->reg_size) {
999 		iounmap(iommu->reg);
1000 		release_mem_region(iommu->reg_phys, iommu->reg_size);
1001 		iommu->reg_size = map_size;
1002 		if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1003 					iommu->name)) {
1004 			pr_err("Can't reserve memory\n");
1005 			err = -EBUSY;
1006 			goto out;
1007 		}
1008 		iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1009 		if (!iommu->reg) {
1010 			pr_err("Can't map the region\n");
1011 			err = -ENOMEM;
1012 			goto release;
1013 		}
1014 	}
1015 
1016 	if (cap_ecmds(iommu->cap)) {
1017 		int i;
1018 
1019 		for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) {
1020 			iommu->ecmdcap[i] = readq(iommu->reg + DMAR_ECCAP_REG +
1021 						  i * DMA_ECMD_REG_STEP);
1022 		}
1023 	}
1024 
1025 	err = 0;
1026 	goto out;
1027 
1028 unmap:
1029 	iounmap(iommu->reg);
1030 release:
1031 	release_mem_region(iommu->reg_phys, iommu->reg_size);
1032 out:
1033 	return err;
1034 }
1035 
alloc_iommu(struct dmar_drhd_unit * drhd)1036 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1037 {
1038 	struct intel_iommu *iommu;
1039 	u32 ver, sts;
1040 	int agaw = -1;
1041 	int msagaw = -1;
1042 	int err;
1043 
1044 	if (!drhd->reg_base_addr) {
1045 		warn_invalid_dmar(0, "");
1046 		return -EINVAL;
1047 	}
1048 
1049 	iommu = kzalloc_obj(*iommu);
1050 	if (!iommu)
1051 		return -ENOMEM;
1052 
1053 	iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1054 					DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
1055 	if (iommu->seq_id < 0) {
1056 		pr_err("Failed to allocate seq_id\n");
1057 		err = iommu->seq_id;
1058 		goto error;
1059 	}
1060 	snprintf(iommu->name, sizeof(iommu->name), "dmar%d", iommu->seq_id);
1061 
1062 	err = map_iommu(iommu, drhd);
1063 	if (err) {
1064 		pr_err("Failed to map %s\n", iommu->name);
1065 		goto error_free_seq_id;
1066 	}
1067 
1068 	if (!cap_sagaw(iommu->cap) &&
1069 	    (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1070 		pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1071 			iommu->name);
1072 		drhd->ignored = 1;
1073 	}
1074 
1075 	if (!drhd->ignored) {
1076 		agaw = iommu_calculate_agaw(iommu);
1077 		if (agaw < 0) {
1078 			pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1079 			       iommu->seq_id);
1080 			drhd->ignored = 1;
1081 		}
1082 	}
1083 	if (!drhd->ignored) {
1084 		msagaw = iommu_calculate_max_sagaw(iommu);
1085 		if (msagaw < 0) {
1086 			pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1087 			       iommu->seq_id);
1088 			drhd->ignored = 1;
1089 			agaw = -1;
1090 		}
1091 	}
1092 	iommu->agaw = agaw;
1093 	iommu->msagaw = msagaw;
1094 	iommu->segment = drhd->segment;
1095 	iommu->device_rbtree = RB_ROOT;
1096 	spin_lock_init(&iommu->device_rbtree_lock);
1097 	mutex_init(&iommu->iopf_lock);
1098 	iommu->node = NUMA_NO_NODE;
1099 	spin_lock_init(&iommu->lock);
1100 	ida_init(&iommu->domain_ida);
1101 	mutex_init(&iommu->did_lock);
1102 
1103 	ver = readl(iommu->reg + DMAR_VER_REG);
1104 	pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1105 		iommu->name,
1106 		(unsigned long long)drhd->reg_base_addr,
1107 		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1108 		(unsigned long long)iommu->cap,
1109 		(unsigned long long)iommu->ecap);
1110 
1111 	/* Reflect status in gcmd */
1112 	sts = readl(iommu->reg + DMAR_GSTS_REG);
1113 	if (sts & DMA_GSTS_IRES)
1114 		iommu->gcmd |= DMA_GCMD_IRE;
1115 	if (sts & DMA_GSTS_TES)
1116 		iommu->gcmd |= DMA_GCMD_TE;
1117 	if (sts & DMA_GSTS_QIES)
1118 		iommu->gcmd |= DMA_GCMD_QIE;
1119 
1120 	if (alloc_iommu_pmu(iommu))
1121 		pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1122 
1123 	raw_spin_lock_init(&iommu->register_lock);
1124 
1125 	/*
1126 	 * A value of N in PSS field of eCap register indicates hardware
1127 	 * supports PASID field of N+1 bits.
1128 	 */
1129 	if (pasid_supported(iommu))
1130 		iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
1131 
1132 	/*
1133 	 * This is only for hotplug; at boot time intel_iommu_enabled won't
1134 	 * be set yet. When intel_iommu_init() runs, it registers the units
1135 	 * present at boot time, then sets intel_iommu_enabled.
1136 	 */
1137 	if (intel_iommu_enabled && !drhd->ignored) {
1138 		err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1139 					     intel_iommu_groups,
1140 					     "%s", iommu->name);
1141 		if (err)
1142 			goto err_unmap;
1143 
1144 		err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1145 		if (err)
1146 			goto err_sysfs;
1147 
1148 		iommu_pmu_register(iommu);
1149 	}
1150 
1151 	drhd->iommu = iommu;
1152 	iommu->drhd = drhd;
1153 
1154 	return 0;
1155 
1156 err_sysfs:
1157 	iommu_device_sysfs_remove(&iommu->iommu);
1158 err_unmap:
1159 	free_iommu_pmu(iommu);
1160 	unmap_iommu(iommu);
1161 error_free_seq_id:
1162 	ida_free(&dmar_seq_ids, iommu->seq_id);
1163 error:
1164 	kfree(iommu);
1165 	return err;
1166 }
1167 
free_iommu(struct intel_iommu * iommu)1168 static void free_iommu(struct intel_iommu *iommu)
1169 {
1170 	if (intel_iommu_enabled && !iommu->drhd->ignored) {
1171 		iommu_pmu_unregister(iommu);
1172 		iommu_device_unregister(&iommu->iommu);
1173 		iommu_device_sysfs_remove(&iommu->iommu);
1174 	}
1175 
1176 	free_iommu_pmu(iommu);
1177 
1178 	if (iommu->irq) {
1179 		if (iommu->pr_irq) {
1180 			free_irq(iommu->pr_irq, iommu);
1181 			dmar_free_hwirq(iommu->pr_irq);
1182 			iommu->pr_irq = 0;
1183 		}
1184 		free_irq(iommu->irq, iommu);
1185 		dmar_free_hwirq(iommu->irq);
1186 		iommu->irq = 0;
1187 	}
1188 
1189 	if (iommu->qi) {
1190 		iommu_free_pages(iommu->qi->desc);
1191 		kfree(iommu->qi->desc_status);
1192 		kfree(iommu->qi);
1193 	}
1194 
1195 	if (iommu->reg)
1196 		unmap_iommu(iommu);
1197 
1198 	ida_destroy(&iommu->domain_ida);
1199 	ida_free(&dmar_seq_ids, iommu->seq_id);
1200 	kfree(iommu);
1201 }
1202 
1203 /*
1204  * Reclaim all the submitted descriptors which have completed its work.
1205  */
reclaim_free_desc(struct q_inval * qi)1206 static inline void reclaim_free_desc(struct q_inval *qi)
1207 {
1208 	while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) {
1209 		qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1210 		qi->free_cnt++;
1211 	}
1212 }
1213 
qi_type_string(u8 type)1214 static const char *qi_type_string(u8 type)
1215 {
1216 	switch (type) {
1217 	case QI_CC_TYPE:
1218 		return "Context-cache Invalidation";
1219 	case QI_IOTLB_TYPE:
1220 		return "IOTLB Invalidation";
1221 	case QI_DIOTLB_TYPE:
1222 		return "Device-TLB Invalidation";
1223 	case QI_IEC_TYPE:
1224 		return "Interrupt Entry Cache Invalidation";
1225 	case QI_IWD_TYPE:
1226 		return "Invalidation Wait";
1227 	case QI_EIOTLB_TYPE:
1228 		return "PASID-based IOTLB Invalidation";
1229 	case QI_PC_TYPE:
1230 		return "PASID-cache Invalidation";
1231 	case QI_DEIOTLB_TYPE:
1232 		return "PASID-based Device-TLB Invalidation";
1233 	case QI_PGRP_RESP_TYPE:
1234 		return "Page Group Response";
1235 	default:
1236 		return "UNKNOWN";
1237 	}
1238 }
1239 
qi_dump_fault(struct intel_iommu * iommu,u32 fault)1240 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1241 {
1242 	unsigned int head = readl(iommu->reg + DMAR_IQH_REG);
1243 	u64 iqe_err = readq(iommu->reg + DMAR_IQER_REG);
1244 	struct qi_desc *desc = iommu->qi->desc + head;
1245 
1246 	if (fault & DMA_FSTS_IQE)
1247 		pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
1248 		       DMAR_IQER_REG_IQEI(iqe_err));
1249 	if (fault & DMA_FSTS_ITE)
1250 		pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
1251 		       DMAR_IQER_REG_ITESID(iqe_err));
1252 	if (fault & DMA_FSTS_ICE)
1253 		pr_err("VT-d detected Invalidation Completion Error: SID %llx",
1254 		       DMAR_IQER_REG_ICESID(iqe_err));
1255 
1256 	pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1257 	       qi_type_string(desc->qw0 & 0xf),
1258 	       (unsigned long long)desc->qw0,
1259 	       (unsigned long long)desc->qw1);
1260 
1261 	head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1262 	head <<= qi_shift(iommu);
1263 	desc = iommu->qi->desc + head;
1264 
1265 	pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1266 	       qi_type_string(desc->qw0 & 0xf),
1267 	       (unsigned long long)desc->qw0,
1268 	       (unsigned long long)desc->qw1);
1269 }
1270 
qi_check_fault(struct intel_iommu * iommu,int index,int wait_index)1271 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1272 {
1273 	u32 fault;
1274 	int head, tail;
1275 	struct device *dev;
1276 	u64 iqe_err, ite_sid;
1277 	struct q_inval *qi = iommu->qi;
1278 	int shift = qi_shift(iommu);
1279 
1280 	if (qi->desc_status[wait_index] == QI_ABORT)
1281 		return -EAGAIN;
1282 
1283 	fault = readl(iommu->reg + DMAR_FSTS_REG);
1284 	if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
1285 		qi_dump_fault(iommu, fault);
1286 
1287 	/*
1288 	 * If IQE happens, the head points to the descriptor associated
1289 	 * with the error. No new descriptors are fetched until the IQE
1290 	 * is cleared.
1291 	 */
1292 	if (fault & DMA_FSTS_IQE) {
1293 		head = readl(iommu->reg + DMAR_IQH_REG);
1294 		if ((head >> shift) == index) {
1295 			struct qi_desc *desc = qi->desc + head;
1296 
1297 			/*
1298 			 * desc->qw2 and desc->qw3 are either reserved or
1299 			 * used by software as private data. We won't print
1300 			 * out these two qw's for security consideration.
1301 			 */
1302 			memcpy(desc, qi->desc + (wait_index << shift),
1303 			       1 << shift);
1304 			writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1305 			pr_info("Invalidation Queue Error (IQE) cleared\n");
1306 			return -EINVAL;
1307 		}
1308 	}
1309 
1310 	/*
1311 	 * If ITE happens, all pending wait_desc commands are aborted.
1312 	 * No new descriptors are fetched until the ITE is cleared.
1313 	 */
1314 	if (fault & DMA_FSTS_ITE) {
1315 		head = readl(iommu->reg + DMAR_IQH_REG);
1316 		head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1317 		tail = readl(iommu->reg + DMAR_IQT_REG);
1318 		tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1319 
1320 		/*
1321 		 * SID field is valid only when the ITE field is Set in FSTS_REG
1322 		 * see Intel VT-d spec r4.1, section 11.4.9.9
1323 		 */
1324 		iqe_err = readq(iommu->reg + DMAR_IQER_REG);
1325 		ite_sid = DMAR_IQER_REG_ITESID(iqe_err);
1326 
1327 		writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1328 		pr_info("Invalidation Time-out Error (ITE) cleared\n");
1329 
1330 		do {
1331 			if (qi->desc_status[head] == QI_IN_USE)
1332 				qi->desc_status[head] = QI_ABORT;
1333 			head = (head - 1 + QI_LENGTH) % QI_LENGTH;
1334 		} while (head != tail);
1335 
1336 		/*
1337 		 * If device was released or isn't present, no need to retry
1338 		 * the ATS invalidate request anymore.
1339 		 *
1340 		 * 0 value of ite_sid means old VT-d device, no ite_sid value.
1341 		 * see Intel VT-d spec r4.1, section 11.4.9.9
1342 		 */
1343 		if (ite_sid) {
1344 			dev = device_rbtree_find(iommu, ite_sid);
1345 			if (!dev || !dev_is_pci(dev) ||
1346 			    !pci_device_is_present(to_pci_dev(dev)))
1347 				return -ETIMEDOUT;
1348 		}
1349 		if (qi->desc_status[wait_index] == QI_ABORT)
1350 			return -EAGAIN;
1351 	}
1352 
1353 	if (fault & DMA_FSTS_ICE) {
1354 		writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1355 		pr_info("Invalidation Completion Error (ICE) cleared\n");
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 /*
1362  * Function to submit invalidation descriptors of all types to the queued
1363  * invalidation interface(QI). Multiple descriptors can be submitted at a
1364  * time, a wait descriptor will be appended to each submission to ensure
1365  * hardware has completed the invalidation before return. Wait descriptors
1366  * can be part of the submission but it will not be polled for completion.
1367  */
qi_submit_sync(struct intel_iommu * iommu,struct qi_desc * desc,unsigned int count,unsigned long options)1368 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1369 		   unsigned int count, unsigned long options)
1370 {
1371 	struct q_inval *qi = iommu->qi;
1372 	s64 devtlb_start_ktime = 0;
1373 	s64 iotlb_start_ktime = 0;
1374 	s64 iec_start_ktime = 0;
1375 	struct qi_desc wait_desc;
1376 	int wait_index, index;
1377 	unsigned long flags;
1378 	int offset, shift;
1379 	int rc, i;
1380 	u64 type;
1381 
1382 	if (!qi)
1383 		return 0;
1384 
1385 	type = desc->qw0 & GENMASK_ULL(3, 0);
1386 
1387 	if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
1388 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1389 		iotlb_start_ktime = ktime_to_ns(ktime_get());
1390 
1391 	if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
1392 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1393 		devtlb_start_ktime = ktime_to_ns(ktime_get());
1394 
1395 	if (type == QI_IEC_TYPE &&
1396 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1397 		iec_start_ktime = ktime_to_ns(ktime_get());
1398 
1399 restart:
1400 	rc = 0;
1401 
1402 	raw_spin_lock_irqsave(&qi->q_lock, flags);
1403 	/*
1404 	 * Check if we have enough empty slots in the queue to submit,
1405 	 * the calculation is based on:
1406 	 * # of desc + 1 wait desc + 1 space between head and tail
1407 	 */
1408 	while (qi->free_cnt < count + 2) {
1409 		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1410 		cpu_relax();
1411 		raw_spin_lock_irqsave(&qi->q_lock, flags);
1412 	}
1413 
1414 	index = qi->free_head;
1415 	wait_index = (index + count) % QI_LENGTH;
1416 	shift = qi_shift(iommu);
1417 
1418 	for (i = 0; i < count; i++) {
1419 		offset = ((index + i) % QI_LENGTH) << shift;
1420 		memcpy(qi->desc + offset, &desc[i], 1 << shift);
1421 		qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1422 		trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1423 				desc[i].qw2, desc[i].qw3);
1424 	}
1425 	qi->desc_status[wait_index] = QI_IN_USE;
1426 
1427 	wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1428 			QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1429 	if (options & QI_OPT_WAIT_DRAIN)
1430 		wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1431 	wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1432 	wait_desc.qw2 = 0;
1433 	wait_desc.qw3 = 0;
1434 
1435 	offset = wait_index << shift;
1436 	memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1437 
1438 	qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1439 	qi->free_cnt -= count + 1;
1440 
1441 	/*
1442 	 * update the HW tail register indicating the presence of
1443 	 * new descriptors.
1444 	 */
1445 	writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1446 
1447 	while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
1448 		/*
1449 		 * We will leave the interrupts disabled, to prevent interrupt
1450 		 * context to queue another cmd while a cmd is already submitted
1451 		 * and waiting for completion on this cpu. This is to avoid
1452 		 * a deadlock where the interrupt context can wait indefinitely
1453 		 * for free slots in the queue.
1454 		 */
1455 		rc = qi_check_fault(iommu, index, wait_index);
1456 		if (rc)
1457 			break;
1458 
1459 		raw_spin_unlock(&qi->q_lock);
1460 		cpu_relax();
1461 		raw_spin_lock(&qi->q_lock);
1462 	}
1463 
1464 	/*
1465 	 * The reclaim code can free descriptors from multiple submissions
1466 	 * starting from the tail of the queue. When count == 0, the
1467 	 * status of the standalone wait descriptor at the tail of the queue
1468 	 * must be set to QI_FREE to allow the reclaim code to proceed.
1469 	 * It is also possible that descriptors from one of the previous
1470 	 * submissions has to be reclaimed by a subsequent submission.
1471 	 */
1472 	for (i = 0; i <= count; i++)
1473 		qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE;
1474 
1475 	reclaim_free_desc(qi);
1476 	raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1477 
1478 	if (rc == -EAGAIN)
1479 		goto restart;
1480 
1481 	if (iotlb_start_ktime)
1482 		dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1483 				ktime_to_ns(ktime_get()) - iotlb_start_ktime);
1484 
1485 	if (devtlb_start_ktime)
1486 		dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1487 				ktime_to_ns(ktime_get()) - devtlb_start_ktime);
1488 
1489 	if (iec_start_ktime)
1490 		dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1491 				ktime_to_ns(ktime_get()) - iec_start_ktime);
1492 
1493 	return rc;
1494 }
1495 
1496 /*
1497  * Flush the global interrupt entry cache.
1498  */
qi_global_iec(struct intel_iommu * iommu)1499 void qi_global_iec(struct intel_iommu *iommu)
1500 {
1501 	struct qi_desc desc;
1502 
1503 	desc.qw0 = QI_IEC_TYPE;
1504 	desc.qw1 = 0;
1505 	desc.qw2 = 0;
1506 	desc.qw3 = 0;
1507 
1508 	/* should never fail */
1509 	qi_submit_sync(iommu, &desc, 1, 0);
1510 }
1511 
qi_flush_context(struct intel_iommu * iommu,u16 did,u16 sid,u8 fm,u64 type)1512 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1513 		      u64 type)
1514 {
1515 	struct qi_desc desc;
1516 
1517 	desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1518 			| QI_CC_GRAN(type) | QI_CC_TYPE;
1519 	desc.qw1 = 0;
1520 	desc.qw2 = 0;
1521 	desc.qw3 = 0;
1522 
1523 	qi_submit_sync(iommu, &desc, 1, 0);
1524 }
1525 
qi_flush_iotlb(struct intel_iommu * iommu,u16 did,u64 addr,unsigned int size_order,u64 type)1526 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1527 		    unsigned int size_order, u64 type)
1528 {
1529 	struct qi_desc desc;
1530 
1531 	qi_desc_iotlb(iommu, did, addr, size_order, type, &desc);
1532 	qi_submit_sync(iommu, &desc, 1, 0);
1533 }
1534 
qi_flush_dev_iotlb(struct intel_iommu * iommu,u16 sid,u16 pfsid,u16 qdep,u64 addr,unsigned mask)1535 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1536 			u16 qdep, u64 addr, unsigned mask)
1537 {
1538 	struct qi_desc desc;
1539 
1540 	/*
1541 	 * VT-d spec, section 4.3:
1542 	 *
1543 	 * Software is recommended to not submit any Device-TLB invalidation
1544 	 * requests while address remapping hardware is disabled.
1545 	 */
1546 	if (!(iommu->gcmd & DMA_GCMD_TE))
1547 		return;
1548 
1549 	qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &desc);
1550 	qi_submit_sync(iommu, &desc, 1, 0);
1551 }
1552 
1553 /* PASID-selective IOTLB invalidation */
qi_flush_piotlb_all(struct intel_iommu * iommu,u16 did,u32 pasid)1554 void qi_flush_piotlb_all(struct intel_iommu *iommu, u16 did, u32 pasid)
1555 {
1556 	struct qi_desc desc = {};
1557 
1558 	qi_desc_piotlb_all(did, pasid, &desc);
1559 	qi_submit_sync(iommu, &desc, 1, 0);
1560 }
1561 
1562 /* PASID-based device IOTLB Invalidate */
qi_flush_dev_iotlb_pasid(struct intel_iommu * iommu,u16 sid,u16 pfsid,u32 pasid,u16 qdep,u64 addr,unsigned int size_order)1563 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1564 			      u32 pasid,  u16 qdep, u64 addr, unsigned int size_order)
1565 {
1566 	struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1567 
1568 	/*
1569 	 * VT-d spec, section 4.3:
1570 	 *
1571 	 * Software is recommended to not submit any Device-TLB invalidation
1572 	 * requests while address remapping hardware is disabled.
1573 	 */
1574 	if (!(iommu->gcmd & DMA_GCMD_TE))
1575 		return;
1576 
1577 	qi_desc_dev_iotlb_pasid(sid, pfsid, pasid,
1578 				qdep, addr, size_order,
1579 				&desc);
1580 	qi_submit_sync(iommu, &desc, 1, 0);
1581 }
1582 
qi_flush_pasid_cache(struct intel_iommu * iommu,u16 did,u64 granu,u32 pasid)1583 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1584 			  u64 granu, u32 pasid)
1585 {
1586 	struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1587 
1588 	desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1589 			QI_PC_GRAN(granu) | QI_PC_TYPE;
1590 	qi_submit_sync(iommu, &desc, 1, 0);
1591 }
1592 
1593 /*
1594  * Disable Queued Invalidation interface.
1595  */
dmar_disable_qi(struct intel_iommu * iommu)1596 void dmar_disable_qi(struct intel_iommu *iommu)
1597 {
1598 	unsigned long flags;
1599 	u32 sts;
1600 	cycles_t start_time = get_cycles();
1601 
1602 	if (!ecap_qis(iommu->ecap))
1603 		return;
1604 
1605 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1606 
1607 	sts =  readl(iommu->reg + DMAR_GSTS_REG);
1608 	if (!(sts & DMA_GSTS_QIES))
1609 		goto end;
1610 
1611 	/*
1612 	 * Give a chance to HW to complete the pending invalidation requests.
1613 	 */
1614 	while ((readl(iommu->reg + DMAR_IQT_REG) !=
1615 		readl(iommu->reg + DMAR_IQH_REG)) &&
1616 		(DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1617 		cpu_relax();
1618 
1619 	iommu->gcmd &= ~DMA_GCMD_QIE;
1620 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1621 
1622 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1623 		      !(sts & DMA_GSTS_QIES), sts);
1624 end:
1625 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1626 }
1627 
1628 /*
1629  * Enable queued invalidation.
1630  */
__dmar_enable_qi(struct intel_iommu * iommu)1631 static void __dmar_enable_qi(struct intel_iommu *iommu)
1632 {
1633 	u32 sts;
1634 	unsigned long flags;
1635 	struct q_inval *qi = iommu->qi;
1636 	u64 val = virt_to_phys(qi->desc);
1637 
1638 	qi->free_head = qi->free_tail = 0;
1639 	qi->free_cnt = QI_LENGTH;
1640 
1641 	/*
1642 	 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1643 	 * is present.
1644 	 */
1645 	if (ecap_smts(iommu->ecap))
1646 		val |= BIT_ULL(11) | BIT_ULL(0);
1647 
1648 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1649 
1650 	/* write zero to the tail reg */
1651 	writel(0, iommu->reg + DMAR_IQT_REG);
1652 
1653 	writeq(val, iommu->reg + DMAR_IQA_REG);
1654 
1655 	iommu->gcmd |= DMA_GCMD_QIE;
1656 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1657 
1658 	/* Make sure hardware complete it */
1659 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1660 
1661 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1662 }
1663 
1664 /*
1665  * Enable Queued Invalidation interface. This is a must to support
1666  * interrupt-remapping. Also used by DMA-remapping, which replaces
1667  * register based IOTLB invalidation.
1668  */
dmar_enable_qi(struct intel_iommu * iommu)1669 int dmar_enable_qi(struct intel_iommu *iommu)
1670 {
1671 	struct q_inval *qi;
1672 	void *desc;
1673 
1674 	if (!ecap_qis(iommu->ecap))
1675 		return -ENOENT;
1676 
1677 	/*
1678 	 * queued invalidation is already setup and enabled.
1679 	 */
1680 	if (iommu->qi)
1681 		return 0;
1682 
1683 	iommu->qi = kmalloc_obj(*qi, GFP_ATOMIC);
1684 	if (!iommu->qi)
1685 		return -ENOMEM;
1686 
1687 	qi = iommu->qi;
1688 
1689 	/*
1690 	 * Need two pages to accommodate 256 descriptors of 256 bits each
1691 	 * if the remapping hardware supports scalable mode translation.
1692 	 */
1693 	desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
1694 					 ecap_smts(iommu->ecap) ? SZ_8K :
1695 								  SZ_4K);
1696 	if (!desc) {
1697 		kfree(qi);
1698 		iommu->qi = NULL;
1699 		return -ENOMEM;
1700 	}
1701 
1702 	qi->desc = desc;
1703 
1704 	qi->desc_status = kzalloc_objs(int, QI_LENGTH, GFP_ATOMIC);
1705 	if (!qi->desc_status) {
1706 		iommu_free_pages(qi->desc);
1707 		kfree(qi);
1708 		iommu->qi = NULL;
1709 		return -ENOMEM;
1710 	}
1711 
1712 	raw_spin_lock_init(&qi->q_lock);
1713 
1714 	__dmar_enable_qi(iommu);
1715 
1716 	return 0;
1717 }
1718 
1719 /* iommu interrupt handling. Most stuff are MSI-like. */
1720 
1721 enum faulttype {
1722 	DMA_REMAP,
1723 	INTR_REMAP,
1724 	UNKNOWN,
1725 };
1726 
1727 static const char *dma_remap_fault_reasons[] =
1728 {
1729 	"Software",
1730 	"Present bit in root entry is clear",
1731 	"Present bit in context entry is clear",
1732 	"Invalid context entry",
1733 	"Access beyond MGAW",
1734 	"PTE Write access is not set",
1735 	"PTE Read access is not set",
1736 	"Next page table ptr is invalid",
1737 	"Root table address invalid",
1738 	"Context table ptr is invalid",
1739 	"non-zero reserved fields in RTP",
1740 	"non-zero reserved fields in CTP",
1741 	"non-zero reserved fields in PTE",
1742 	"PCE for translation request specifies blocking",
1743 };
1744 
1745 static const char * const dma_remap_sm_fault_reasons[] = {
1746 	"SM: Invalid Root Table Address",
1747 	"SM: TTM 0 for request with PASID",
1748 	"SM: TTM 0 for page group request",
1749 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1750 	"SM: Error attempting to access Root Entry",
1751 	"SM: Present bit in Root Entry is clear",
1752 	"SM: Non-zero reserved field set in Root Entry",
1753 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1754 	"SM: Error attempting to access Context Entry",
1755 	"SM: Present bit in Context Entry is clear",
1756 	"SM: Non-zero reserved field set in the Context Entry",
1757 	"SM: Invalid Context Entry",
1758 	"SM: DTE field in Context Entry is clear",
1759 	"SM: PASID Enable field in Context Entry is clear",
1760 	"SM: PASID is larger than the max in Context Entry",
1761 	"SM: PRE field in Context-Entry is clear",
1762 	"SM: RID_PASID field error in Context-Entry",
1763 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1764 	"SM: Error attempting to access the PASID Directory Entry",
1765 	"SM: Present bit in Directory Entry is clear",
1766 	"SM: Non-zero reserved field set in PASID Directory Entry",
1767 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1768 	"SM: Error attempting to access PASID Table Entry",
1769 	"SM: Present bit in PASID Table Entry is clear",
1770 	"SM: Non-zero reserved field set in PASID Table Entry",
1771 	"SM: Invalid Scalable-Mode PASID Table Entry",
1772 	"SM: ERE field is clear in PASID Table Entry",
1773 	"SM: SRE field is clear in PASID Table Entry",
1774 	"Unknown", "Unknown",/* 0x5E-0x5F */
1775 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1776 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1777 	"SM: Error attempting to access first-level paging entry",
1778 	"SM: Present bit in first-level paging entry is clear",
1779 	"SM: Non-zero reserved field set in first-level paging entry",
1780 	"SM: Error attempting to access FL-PML4 entry",
1781 	"SM: First-level entry address beyond MGAW in Nested translation",
1782 	"SM: Read permission error in FL-PML4 entry in Nested translation",
1783 	"SM: Read permission error in first-level paging entry in Nested translation",
1784 	"SM: Write permission error in first-level paging entry in Nested translation",
1785 	"SM: Error attempting to access second-level paging entry",
1786 	"SM: Read/Write permission error in second-level paging entry",
1787 	"SM: Non-zero reserved field set in second-level paging entry",
1788 	"SM: Invalid second-level page table pointer",
1789 	"SM: A/D bit update needed in second-level entry when set up in no snoop",
1790 	"Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1791 	"SM: Address in first-level translation is not canonical",
1792 	"SM: U/S set 0 for first-level translation with user privilege",
1793 	"SM: No execute permission for request with PASID and ER=1",
1794 	"SM: Address beyond the DMA hardware max",
1795 	"SM: Second-level entry address beyond the max",
1796 	"SM: No write permission for Write/AtomicOp request",
1797 	"SM: No read permission for Read/AtomicOp request",
1798 	"SM: Invalid address-interrupt address",
1799 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1800 	"SM: A/D bit update needed in first-level entry when set up in no snoop",
1801 };
1802 
1803 static const char *irq_remap_fault_reasons[] =
1804 {
1805 	"Detected reserved fields in the decoded interrupt-remapped request",
1806 	"Interrupt index exceeded the interrupt-remapping table size",
1807 	"Present field in the IRTE entry is clear",
1808 	"Error accessing interrupt-remapping table pointed by IRTA_REG",
1809 	"Detected reserved fields in the IRTE entry",
1810 	"Blocked a compatibility format interrupt request",
1811 	"Blocked an interrupt request due to source-id verification failure",
1812 };
1813 
dmar_get_fault_reason(u8 fault_reason,int * fault_type)1814 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1815 {
1816 	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1817 					ARRAY_SIZE(irq_remap_fault_reasons))) {
1818 		*fault_type = INTR_REMAP;
1819 		return irq_remap_fault_reasons[fault_reason - 0x20];
1820 	} else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1821 			ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1822 		*fault_type = DMA_REMAP;
1823 		return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1824 	} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1825 		*fault_type = DMA_REMAP;
1826 		return dma_remap_fault_reasons[fault_reason];
1827 	} else {
1828 		*fault_type = UNKNOWN;
1829 		return "Unknown";
1830 	}
1831 }
1832 
1833 
dmar_msi_reg(struct intel_iommu * iommu,int irq)1834 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1835 {
1836 	if (iommu->irq == irq)
1837 		return DMAR_FECTL_REG;
1838 	else if (iommu->pr_irq == irq)
1839 		return DMAR_PECTL_REG;
1840 	else if (iommu->perf_irq == irq)
1841 		return DMAR_PERFINTRCTL_REG;
1842 	else
1843 		BUG();
1844 }
1845 
dmar_msi_unmask(struct irq_data * data)1846 void dmar_msi_unmask(struct irq_data *data)
1847 {
1848 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1849 	int reg = dmar_msi_reg(iommu, data->irq);
1850 	unsigned long flag;
1851 
1852 	/* unmask it */
1853 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1854 	writel(0, iommu->reg + reg);
1855 	/* Read a reg to force flush the post write */
1856 	readl(iommu->reg + reg);
1857 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1858 }
1859 
dmar_msi_mask(struct irq_data * data)1860 void dmar_msi_mask(struct irq_data *data)
1861 {
1862 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1863 	int reg = dmar_msi_reg(iommu, data->irq);
1864 	unsigned long flag;
1865 
1866 	/* mask it */
1867 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1868 	writel(DMA_FECTL_IM, iommu->reg + reg);
1869 	/* Read a reg to force flush the post write */
1870 	readl(iommu->reg + reg);
1871 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1872 }
1873 
dmar_msi_write(int irq,struct msi_msg * msg)1874 void dmar_msi_write(int irq, struct msi_msg *msg)
1875 {
1876 	struct intel_iommu *iommu = irq_get_handler_data(irq);
1877 	int reg = dmar_msi_reg(iommu, irq);
1878 	unsigned long flag;
1879 
1880 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1881 	writel(msg->data, iommu->reg + reg + 4);
1882 	writel(msg->address_lo, iommu->reg + reg + 8);
1883 	writel(msg->address_hi, iommu->reg + reg + 12);
1884 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1885 }
1886 
dmar_fault_do_one(struct intel_iommu * iommu,int type,u8 fault_reason,u32 pasid,u16 source_id,unsigned long long addr)1887 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1888 		u8 fault_reason, u32 pasid, u16 source_id,
1889 		unsigned long long addr)
1890 {
1891 	const char *reason;
1892 	int fault_type;
1893 
1894 	reason = dmar_get_fault_reason(fault_reason, &fault_type);
1895 
1896 	if (fault_type == INTR_REMAP) {
1897 		pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
1898 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1899 		       PCI_FUNC(source_id & 0xFF), addr >> 48,
1900 		       fault_reason, reason);
1901 
1902 		return 0;
1903 	}
1904 
1905 	if (pasid == IOMMU_PASID_INVALID)
1906 		pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1907 		       type ? "DMA Read" : "DMA Write",
1908 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1909 		       PCI_FUNC(source_id & 0xFF), addr,
1910 		       fault_reason, reason);
1911 	else
1912 		pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1913 		       type ? "DMA Read" : "DMA Write", pasid,
1914 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1915 		       PCI_FUNC(source_id & 0xFF), addr,
1916 		       fault_reason, reason);
1917 
1918 	dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
1919 
1920 	return 0;
1921 }
1922 
1923 #define PRIMARY_FAULT_REG_LEN (16)
dmar_fault(int irq,void * dev_id)1924 irqreturn_t dmar_fault(int irq, void *dev_id)
1925 {
1926 	struct intel_iommu *iommu = dev_id;
1927 	int reg, fault_index;
1928 	u32 fault_status;
1929 	unsigned long flag;
1930 	static DEFINE_RATELIMIT_STATE(rs,
1931 				      DEFAULT_RATELIMIT_INTERVAL,
1932 				      DEFAULT_RATELIMIT_BURST);
1933 
1934 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1935 	fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1936 	if (fault_status && __ratelimit(&rs))
1937 		pr_err("DRHD: handling fault status reg %x\n", fault_status);
1938 
1939 	/* TBD: ignore advanced fault log currently */
1940 	if (!(fault_status & DMA_FSTS_PPF))
1941 		goto unlock_exit;
1942 
1943 	fault_index = dma_fsts_fault_record_index(fault_status);
1944 	reg = cap_fault_reg_offset(iommu->cap);
1945 	while (1) {
1946 		/* Disable printing, simply clear the fault when ratelimited */
1947 		bool ratelimited = !__ratelimit(&rs);
1948 		u8 fault_reason;
1949 		u16 source_id;
1950 		u64 guest_addr;
1951 		u32 pasid;
1952 		int type;
1953 		u32 data;
1954 		bool pasid_present;
1955 
1956 		/* highest 32 bits */
1957 		data = readl(iommu->reg + reg +
1958 				fault_index * PRIMARY_FAULT_REG_LEN + 12);
1959 		if (!(data & DMA_FRCD_F))
1960 			break;
1961 
1962 		if (!ratelimited) {
1963 			fault_reason = dma_frcd_fault_reason(data);
1964 			type = dma_frcd_type(data);
1965 
1966 			pasid = dma_frcd_pasid_value(data);
1967 			data = readl(iommu->reg + reg +
1968 				     fault_index * PRIMARY_FAULT_REG_LEN + 8);
1969 			source_id = dma_frcd_source_id(data);
1970 
1971 			pasid_present = dma_frcd_pasid_present(data);
1972 			guest_addr = readq(iommu->reg + reg +
1973 					   fault_index * PRIMARY_FAULT_REG_LEN);
1974 			guest_addr = dma_frcd_page_addr(guest_addr);
1975 		}
1976 
1977 		/* clear the fault */
1978 		writel(DMA_FRCD_F, iommu->reg + reg +
1979 			fault_index * PRIMARY_FAULT_REG_LEN + 12);
1980 
1981 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1982 
1983 		if (!ratelimited)
1984 			/* Using pasid -1 if pasid is not present */
1985 			dmar_fault_do_one(iommu, type, fault_reason,
1986 					  pasid_present ? pasid : IOMMU_PASID_INVALID,
1987 					  source_id, guest_addr);
1988 
1989 		fault_index++;
1990 		if (fault_index >= cap_num_fault_regs(iommu->cap))
1991 			fault_index = 0;
1992 		raw_spin_lock_irqsave(&iommu->register_lock, flag);
1993 	}
1994 
1995 	writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1996 	       iommu->reg + DMAR_FSTS_REG);
1997 
1998 unlock_exit:
1999 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2000 	return IRQ_HANDLED;
2001 }
2002 
dmar_set_interrupt(struct intel_iommu * iommu)2003 int dmar_set_interrupt(struct intel_iommu *iommu)
2004 {
2005 	int irq, ret;
2006 
2007 	/*
2008 	 * Check if the fault interrupt is already initialized.
2009 	 */
2010 	if (iommu->irq)
2011 		return 0;
2012 
2013 	irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2014 	if (irq > 0) {
2015 		iommu->irq = irq;
2016 	} else {
2017 		pr_err("No free IRQ vectors\n");
2018 		return -EINVAL;
2019 	}
2020 
2021 	ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2022 	if (ret)
2023 		pr_err("Can't request irq\n");
2024 	return ret;
2025 }
2026 
enable_drhd_fault_handling(unsigned int cpu)2027 int enable_drhd_fault_handling(unsigned int cpu)
2028 {
2029 	struct dmar_drhd_unit *drhd;
2030 	struct intel_iommu *iommu;
2031 
2032 	/*
2033 	 * Enable fault control interrupt.
2034 	 */
2035 	guard(rwsem_read)(&dmar_global_lock);
2036 	for_each_iommu(iommu, drhd) {
2037 		u32 fault_status;
2038 		int ret;
2039 
2040 		if (iommu->irq || iommu->node != cpu_to_node(cpu))
2041 			continue;
2042 
2043 		ret = dmar_set_interrupt(iommu);
2044 
2045 		if (ret) {
2046 			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
2047 			       (unsigned long long)drhd->reg_base_addr, ret);
2048 			return -1;
2049 		}
2050 
2051 		/*
2052 		 * Clear any previous faults.
2053 		 */
2054 		dmar_fault(iommu->irq, iommu);
2055 		fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2056 		writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 /*
2063  * Re-enable Queued Invalidation interface.
2064  */
dmar_reenable_qi(struct intel_iommu * iommu)2065 int dmar_reenable_qi(struct intel_iommu *iommu)
2066 {
2067 	if (!ecap_qis(iommu->ecap))
2068 		return -ENOENT;
2069 
2070 	if (!iommu->qi)
2071 		return -ENOENT;
2072 
2073 	/*
2074 	 * First disable queued invalidation.
2075 	 */
2076 	dmar_disable_qi(iommu);
2077 	/*
2078 	 * Then enable queued invalidation again. Since there is no pending
2079 	 * invalidation requests now, it's safe to re-enable queued
2080 	 * invalidation.
2081 	 */
2082 	__dmar_enable_qi(iommu);
2083 
2084 	return 0;
2085 }
2086 
2087 /*
2088  * Check interrupt remapping support in DMAR table description.
2089  */
dmar_ir_support(void)2090 int __init dmar_ir_support(void)
2091 {
2092 	struct acpi_table_dmar *dmar;
2093 	dmar = (struct acpi_table_dmar *)dmar_tbl;
2094 	if (!dmar)
2095 		return 0;
2096 	return dmar->flags & 0x1;
2097 }
2098 
2099 /* Check whether DMAR units are in use */
dmar_in_use(void)2100 static inline bool dmar_in_use(void)
2101 {
2102 	return irq_remapping_enabled || intel_iommu_enabled;
2103 }
2104 
dmar_free_unused_resources(void)2105 static int __init dmar_free_unused_resources(void)
2106 {
2107 	struct dmar_drhd_unit *dmaru, *dmaru_n;
2108 
2109 	if (dmar_in_use())
2110 		return 0;
2111 
2112 	if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2113 		bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2114 
2115 	down_write(&dmar_global_lock);
2116 	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2117 		list_del(&dmaru->list);
2118 		dmar_free_drhd(dmaru);
2119 	}
2120 	up_write(&dmar_global_lock);
2121 
2122 	return 0;
2123 }
2124 
2125 late_initcall(dmar_free_unused_resources);
2126 
2127 /*
2128  * DMAR Hotplug Support
2129  * For more details, please refer to Intel(R) Virtualization Technology
2130  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2131  * "Remapping Hardware Unit Hot Plug".
2132  */
2133 static guid_t dmar_hp_guid =
2134 	GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2135 		  0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2136 
2137 /*
2138  * Currently there's only one revision and BIOS will not check the revision id,
2139  * so use 0 for safety.
2140  */
2141 #define	DMAR_DSM_REV_ID			0
2142 #define	DMAR_DSM_FUNC_DRHD		1
2143 #define	DMAR_DSM_FUNC_ATSR		2
2144 #define	DMAR_DSM_FUNC_RHSA		3
2145 #define	DMAR_DSM_FUNC_SATC		4
2146 
dmar_detect_dsm(acpi_handle handle,int func)2147 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2148 {
2149 	return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2150 }
2151 
dmar_walk_dsm_resource(acpi_handle handle,int func,dmar_res_handler_t handler,void * arg)2152 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2153 				  dmar_res_handler_t handler, void *arg)
2154 {
2155 	int ret = -ENODEV;
2156 	union acpi_object *obj;
2157 	struct acpi_dmar_header *start;
2158 	struct dmar_res_callback callback;
2159 	static int res_type[] = {
2160 		[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2161 		[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2162 		[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2163 		[DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
2164 	};
2165 
2166 	if (!dmar_detect_dsm(handle, func))
2167 		return 0;
2168 
2169 	obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2170 				      func, NULL, ACPI_TYPE_BUFFER);
2171 	if (!obj)
2172 		return -ENODEV;
2173 
2174 	memset(&callback, 0, sizeof(callback));
2175 	callback.cb[res_type[func]] = handler;
2176 	callback.arg[res_type[func]] = arg;
2177 	start = (struct acpi_dmar_header *)obj->buffer.pointer;
2178 	ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2179 
2180 	ACPI_FREE(obj);
2181 
2182 	return ret;
2183 }
2184 
dmar_hp_add_drhd(struct acpi_dmar_header * header,void * arg)2185 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2186 {
2187 	int ret;
2188 	struct dmar_drhd_unit *dmaru;
2189 
2190 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2191 	if (!dmaru)
2192 		return -ENODEV;
2193 
2194 	ret = dmar_ir_hotplug(dmaru, true);
2195 	if (ret == 0)
2196 		ret = dmar_iommu_hotplug(dmaru, true);
2197 
2198 	return ret;
2199 }
2200 
dmar_hp_remove_drhd(struct acpi_dmar_header * header,void * arg)2201 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2202 {
2203 	int i, ret;
2204 	struct device *dev;
2205 	struct dmar_drhd_unit *dmaru;
2206 
2207 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2208 	if (!dmaru)
2209 		return 0;
2210 
2211 	/*
2212 	 * All PCI devices managed by this unit should have been destroyed.
2213 	 */
2214 	if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2215 		for_each_active_dev_scope(dmaru->devices,
2216 					  dmaru->devices_cnt, i, dev)
2217 			return -EBUSY;
2218 	}
2219 
2220 	ret = dmar_ir_hotplug(dmaru, false);
2221 	if (ret == 0)
2222 		ret = dmar_iommu_hotplug(dmaru, false);
2223 
2224 	return ret;
2225 }
2226 
dmar_hp_release_drhd(struct acpi_dmar_header * header,void * arg)2227 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2228 {
2229 	struct dmar_drhd_unit *dmaru;
2230 
2231 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2232 	if (dmaru) {
2233 		list_del_rcu(&dmaru->list);
2234 		synchronize_rcu();
2235 		dmar_free_drhd(dmaru);
2236 	}
2237 
2238 	return 0;
2239 }
2240 
dmar_hotplug_insert(acpi_handle handle)2241 static int dmar_hotplug_insert(acpi_handle handle)
2242 {
2243 	int ret;
2244 	int drhd_count = 0;
2245 
2246 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2247 				     &dmar_validate_one_drhd, (void *)1);
2248 	if (ret)
2249 		goto out;
2250 
2251 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2252 				     &dmar_parse_one_drhd, (void *)&drhd_count);
2253 	if (ret == 0 && drhd_count == 0) {
2254 		pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2255 		goto out;
2256 	} else if (ret) {
2257 		goto release_drhd;
2258 	}
2259 
2260 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2261 				     &dmar_parse_one_rhsa, NULL);
2262 	if (ret)
2263 		goto release_drhd;
2264 
2265 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2266 				     &dmar_parse_one_atsr, NULL);
2267 	if (ret)
2268 		goto release_atsr;
2269 
2270 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2271 				     &dmar_hp_add_drhd, NULL);
2272 	if (!ret)
2273 		return 0;
2274 
2275 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2276 			       &dmar_hp_remove_drhd, NULL);
2277 release_atsr:
2278 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2279 			       &dmar_release_one_atsr, NULL);
2280 release_drhd:
2281 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2282 			       &dmar_hp_release_drhd, NULL);
2283 out:
2284 	return ret;
2285 }
2286 
dmar_hotplug_remove(acpi_handle handle)2287 static int dmar_hotplug_remove(acpi_handle handle)
2288 {
2289 	int ret;
2290 
2291 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2292 				     &dmar_check_one_atsr, NULL);
2293 	if (ret)
2294 		return ret;
2295 
2296 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2297 				     &dmar_hp_remove_drhd, NULL);
2298 	if (ret == 0) {
2299 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2300 					       &dmar_release_one_atsr, NULL));
2301 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2302 					       &dmar_hp_release_drhd, NULL));
2303 	} else {
2304 		dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2305 				       &dmar_hp_add_drhd, NULL);
2306 	}
2307 
2308 	return ret;
2309 }
2310 
dmar_get_dsm_handle(acpi_handle handle,u32 lvl,void * context,void ** retval)2311 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2312 				       void *context, void **retval)
2313 {
2314 	acpi_handle *phdl = retval;
2315 
2316 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2317 		*phdl = handle;
2318 		return AE_CTRL_TERMINATE;
2319 	}
2320 
2321 	return AE_OK;
2322 }
2323 
dmar_device_hotplug(acpi_handle handle,bool insert)2324 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2325 {
2326 	int ret;
2327 	acpi_handle tmp = NULL;
2328 	acpi_status status;
2329 
2330 	if (!dmar_in_use())
2331 		return 0;
2332 
2333 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2334 		tmp = handle;
2335 	} else {
2336 		status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2337 					     ACPI_UINT32_MAX,
2338 					     dmar_get_dsm_handle,
2339 					     NULL, NULL, &tmp);
2340 		if (ACPI_FAILURE(status)) {
2341 			pr_warn("Failed to locate _DSM method.\n");
2342 			return -ENXIO;
2343 		}
2344 	}
2345 	if (tmp == NULL)
2346 		return 0;
2347 
2348 	down_write(&dmar_global_lock);
2349 	if (insert)
2350 		ret = dmar_hotplug_insert(tmp);
2351 	else
2352 		ret = dmar_hotplug_remove(tmp);
2353 	up_write(&dmar_global_lock);
2354 
2355 	return ret;
2356 }
2357 
dmar_device_add(acpi_handle handle)2358 int dmar_device_add(acpi_handle handle)
2359 {
2360 	return dmar_device_hotplug(handle, true);
2361 }
2362 
dmar_device_remove(acpi_handle handle)2363 int dmar_device_remove(acpi_handle handle)
2364 {
2365 	return dmar_device_hotplug(handle, false);
2366 }
2367 
2368 /*
2369  * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2370  *
2371  * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2372  * the ACPI DMAR table. This means that the platform boot firmware has made
2373  * sure no device can issue DMA outside of RMRR regions.
2374  */
dmar_platform_optin(void)2375 bool dmar_platform_optin(void)
2376 {
2377 	struct acpi_table_dmar *dmar;
2378 	acpi_status status;
2379 	bool ret;
2380 
2381 	status = acpi_get_table(ACPI_SIG_DMAR, 0,
2382 				(struct acpi_table_header **)&dmar);
2383 	if (ACPI_FAILURE(status))
2384 		return false;
2385 
2386 	ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2387 	acpi_put_table((struct acpi_table_header *)dmar);
2388 
2389 	return ret;
2390 }
2391 EXPORT_SYMBOL_GPL(dmar_platform_optin);
2392