xref: /kvm-unit-tests/lib/x86/intel-iommu.c (revision 92d2c192ff927d9c121fa8006000ebcb1f9ad2d5)
1 /*
2  * Intel IOMMU APIs
3  *
4  * Copyright (C) 2016 Red Hat, Inc.
5  *
6  * Authors:
7  *   Peter Xu <peterx@redhat.com>,
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or
10  * later.
11  */
12 
13 #include "intel-iommu.h"
14 #include "libcflat.h"
15 
16 /*
17  * VT-d in QEMU currently only support 39 bits address width, which is
18  * 3-level translation.
19  */
20 #define VTD_PAGE_LEVEL      3
21 #define VTD_CE_AW_39BIT     0x1
22 
23 typedef uint64_t vtd_pte_t;
24 
25 struct vtd_root_entry {
26 	/* Quad 1 */
27 	uint64_t present:1;
28 	uint64_t __reserved:11;
29 	uint64_t context_table_p:52;
30 	/* Quad 2 */
31 	uint64_t __reserved_2;
32 } __attribute__ ((packed));
33 typedef struct vtd_root_entry vtd_re_t;
34 
35 struct vtd_context_entry {
36 	/* Quad 1 */
37 	uint64_t present:1;
38 	uint64_t disable_fault_report:1;
39 	uint64_t trans_type:2;
40 	uint64_t __reserved:8;
41 	uint64_t slptptr:52;
42 	/* Quad 2 */
43 	uint64_t addr_width:3;
44 	uint64_t __ignore:4;
45 	uint64_t __reserved_2:1;
46 	uint64_t domain_id:16;
47 	uint64_t __reserved_3:40;
48 } __attribute__ ((packed));
49 typedef struct vtd_context_entry vtd_ce_t;
50 
51 #define VTD_RTA_MASK  (PAGE_MASK)
52 #define VTD_IRTA_MASK (PAGE_MASK)
53 
54 void *vtd_reg_base;
55 
56 static uint64_t vtd_root_table(void)
57 {
58 	/* No extend root table support yet */
59 	return vtd_readq(DMAR_RTADDR_REG) & VTD_RTA_MASK;
60 }
61 
62 static uint64_t vtd_ir_table(void)
63 {
64 	return vtd_readq(DMAR_IRTA_REG) & VTD_IRTA_MASK;
65 }
66 
67 static void vtd_gcmd_or(uint32_t cmd)
68 {
69 	uint32_t status;
70 
71 	/* We only allow set one bit for each time */
72 	assert(is_power_of_2(cmd));
73 
74 	status = vtd_readl(DMAR_GSTS_REG);
75 	vtd_writel(DMAR_GCMD_REG, status | cmd);
76 
77 	if (cmd & VTD_GCMD_ONE_SHOT_BITS) {
78 		/* One-shot bits are taking effect immediately */
79 		return;
80 	}
81 
82 	/* Make sure IOMMU handled our command request */
83 	while (!(vtd_readl(DMAR_GSTS_REG) & cmd))
84 		cpu_relax();
85 }
86 
87 static void vtd_dump_init_info(void)
88 {
89 	uint32_t version;
90 
91 	version = vtd_readl(DMAR_VER_REG);
92 
93 	/* Major version >= 1 */
94 	assert(((version >> 3) & 0xf) >= 1);
95 
96 	printf("VT-d version:   0x%x\n", version);
97 	printf("     cap:       0x%016lx\n", vtd_readq(DMAR_CAP_REG));
98 	printf("     ecap:      0x%016lx\n", vtd_readq(DMAR_ECAP_REG));
99 }
100 
101 static void vtd_setup_root_table(void)
102 {
103 	void *root = alloc_page();
104 
105 	memset(root, 0, PAGE_SIZE);
106 	vtd_writeq(DMAR_RTADDR_REG, virt_to_phys(root));
107 	vtd_gcmd_or(VTD_GCMD_ROOT);
108 	printf("DMAR table address: 0x%016lx\n", vtd_root_table());
109 }
110 
111 static void vtd_setup_ir_table(void)
112 {
113 	void *root = alloc_page();
114 
115 	memset(root, 0, PAGE_SIZE);
116 	/* 0xf stands for table size (2^(0xf+1) == 65536) */
117 	vtd_writeq(DMAR_IRTA_REG, virt_to_phys(root) | 0xf);
118 	vtd_gcmd_or(VTD_GCMD_IR_TABLE);
119 	printf("IR table address: 0x%016lx\n", vtd_ir_table());
120 }
121 
122 static void vtd_install_pte(vtd_pte_t *root, iova_t iova,
123 			    phys_addr_t pa, int level_target)
124 {
125 	int level;
126 	unsigned int offset;
127 	void *page;
128 
129 	for (level = VTD_PAGE_LEVEL; level > level_target; level--) {
130 		offset = PGDIR_OFFSET(iova, level);
131 		if (!(root[offset] & VTD_PTE_RW)) {
132 			page = alloc_page();
133 			memset(page, 0, PAGE_SIZE);
134 			root[offset] = virt_to_phys(page) | VTD_PTE_RW;
135 		}
136 		root = (uint64_t *)(phys_to_virt(root[offset] &
137 						 VTD_PTE_ADDR));
138 	}
139 
140 	offset = PGDIR_OFFSET(iova, level);
141 	root[offset] = pa | VTD_PTE_RW;
142 	if (level != 1) {
143 		/* This is huge page */
144 		root[offset] |= VTD_PTE_HUGE;
145 	}
146 }
147 
148 #define  VTD_PHYS_TO_VIRT(x) \
149 	((void *)(((uint64_t)phys_to_virt(x)) >> VTD_PAGE_SHIFT))
150 
151 /**
152  * vtd_map_range: setup IO address mapping for specific memory range
153  *
154  * @sid: source ID of the device to setup
155  * @iova: start IO virtual address
156  * @pa: start physical address
157  * @size: size of the mapping area
158  */
159 void vtd_map_range(uint16_t sid, iova_t iova, phys_addr_t pa, size_t size)
160 {
161 	uint8_t bus_n, devfn;
162 	void *slptptr;
163 	vtd_ce_t *ce;
164 	vtd_re_t *re = phys_to_virt(vtd_root_table());
165 
166 	assert(IS_ALIGNED(iova, SZ_4K));
167 	assert(IS_ALIGNED(pa, SZ_4K));
168 	assert(IS_ALIGNED(size, SZ_4K));
169 
170 	bus_n = PCI_BDF_GET_BUS(sid);
171 	devfn = PCI_BDF_GET_DEVFN(sid);
172 
173 	/* Point to the correct root entry */
174 	re += bus_n;
175 
176 	if (!re->present) {
177 		ce = alloc_page();
178 		memset(ce, 0, PAGE_SIZE);
179 		memset(re, 0, sizeof(*re));
180 		re->context_table_p = virt_to_phys(ce) >> VTD_PAGE_SHIFT;
181 		re->present = 1;
182 		printf("allocated vt-d root entry for PCI bus %d\n",
183 		       bus_n);
184 	} else
185 		ce = VTD_PHYS_TO_VIRT(re->context_table_p);
186 
187 	/* Point to the correct context entry */
188 	ce += devfn;
189 
190 	if (!ce->present) {
191 		slptptr = alloc_page();
192 		memset(slptptr, 0, PAGE_SIZE);
193 		memset(ce, 0, sizeof(*ce));
194 		/* To make it simple, domain ID is the same as SID */
195 		ce->domain_id = sid;
196 		/* We only test 39 bits width case (3-level paging) */
197 		ce->addr_width = VTD_CE_AW_39BIT;
198 		ce->slptptr = virt_to_phys(slptptr) >> VTD_PAGE_SHIFT;
199 		ce->trans_type = VTD_CONTEXT_TT_MULTI_LEVEL;
200 		ce->present = 1;
201 		/* No error reporting yet */
202 		ce->disable_fault_report = 1;
203 		printf("allocated vt-d context entry for devfn 0x%x\n",
204 		       devfn);
205 	} else
206 		slptptr = VTD_PHYS_TO_VIRT(ce->slptptr);
207 
208 	while (size) {
209 		/* TODO: currently we only map 4K pages (level = 1) */
210 		printf("map 4K page IOVA 0x%lx to 0x%lx (sid=0x%04x)\n",
211 		       iova, pa, sid);
212 		vtd_install_pte(slptptr, iova, pa, 1);
213 		size -= VTD_PAGE_SIZE;
214 		iova += VTD_PAGE_SIZE;
215 		pa += VTD_PAGE_SIZE;
216 	}
217 }
218 
219 void vtd_init(void)
220 {
221 	setup_vm();
222 	smp_init();
223 
224 	vtd_reg_base = ioremap(Q35_HOST_BRIDGE_IOMMU_ADDR, PAGE_SIZE);
225 
226 	vtd_dump_init_info();
227 	vtd_gcmd_or(VTD_GCMD_QI); /* Enable QI */
228 	vtd_setup_root_table();
229 	vtd_setup_ir_table();
230 	vtd_gcmd_or(VTD_GCMD_DMAR); /* Enable DMAR */
231 	vtd_gcmd_or(VTD_GCMD_IR);   /* Enable IR */
232 }
233