xref: /kvmtool/pci.c (revision ff01b5dbbd040d39619f0f033da6eac5dbed3b2b)
1 #include "kvm/devices.h"
2 #include "kvm/pci.h"
3 #include "kvm/ioport.h"
4 #include "kvm/irq.h"
5 #include "kvm/util.h"
6 #include "kvm/kvm.h"
7 
8 #include <linux/err.h>
9 #include <assert.h>
10 
11 static u32 pci_config_address_bits;
12 
13 /* This is within our PCI gap - in an unused area.
14  * Note this is a PCI *bus address*, is used to assign BARs etc.!
15  * (That's why it can still 32bit even with 64bit guests-- 64bit
16  * PCI isn't currently supported.)
17  */
18 static u32 io_space_blocks		= KVM_PCI_MMIO_AREA;
19 
20 /*
21  * BARs must be naturally aligned, so enforce this in the allocator.
22  */
23 u32 pci_get_io_space_block(u32 size)
24 {
25 	u32 block = ALIGN(io_space_blocks, size);
26 	io_space_blocks = block + size;
27 	return block;
28 }
29 
30 void pci__assign_irq(struct device_header *dev_hdr)
31 {
32 	struct pci_device_header *pci_hdr = dev_hdr->data;
33 
34 	/*
35 	 * PCI supports only INTA#,B#,C#,D# per device.
36 	 *
37 	 * A#,B#,C#,D# are allowed for multifunctional devices so stick
38 	 * with A# for our single function devices.
39 	 */
40 	pci_hdr->irq_pin	= 1;
41 	pci_hdr->irq_line	= irq__alloc_line();
42 
43 	if (!pci_hdr->irq_type)
44 		pci_hdr->irq_type = IRQ_TYPE_EDGE_RISING;
45 }
46 
47 static void *pci_config_address_ptr(u16 port)
48 {
49 	unsigned long offset;
50 	void *base;
51 
52 	offset	= port - PCI_CONFIG_ADDRESS;
53 	base	= &pci_config_address_bits;
54 
55 	return base + offset;
56 }
57 
58 static bool pci_config_address_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
59 {
60 	void *p = pci_config_address_ptr(port);
61 
62 	memcpy(p, data, size);
63 
64 	return true;
65 }
66 
67 static bool pci_config_address_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
68 {
69 	void *p = pci_config_address_ptr(port);
70 
71 	memcpy(data, p, size);
72 
73 	return true;
74 }
75 
76 static struct ioport_operations pci_config_address_ops = {
77 	.io_in	= pci_config_address_in,
78 	.io_out	= pci_config_address_out,
79 };
80 
81 static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
82 {
83 	union pci_config_address pci_config_address;
84 
85 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
86 
87 	if (pci_config_address.bus_number != bus_number)
88 		return false;
89 
90 	if (pci_config_address.function_number != function_number)
91 		return false;
92 
93 	return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number));
94 }
95 
96 static bool pci_config_data_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
97 {
98 	union pci_config_address pci_config_address;
99 
100 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
101 	/*
102 	 * If someone accesses PCI configuration space offsets that are not
103 	 * aligned to 4 bytes, it uses ioports to signify that.
104 	 */
105 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
106 
107 	pci__config_wr(vcpu->kvm, pci_config_address, data, size);
108 
109 	return true;
110 }
111 
112 static bool pci_config_data_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
113 {
114 	union pci_config_address pci_config_address;
115 
116 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
117 	/*
118 	 * If someone accesses PCI configuration space offsets that are not
119 	 * aligned to 4 bytes, it uses ioports to signify that.
120 	 */
121 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
122 
123 	pci__config_rd(vcpu->kvm, pci_config_address, data, size);
124 
125 	return true;
126 }
127 
128 static struct ioport_operations pci_config_data_ops = {
129 	.io_in	= pci_config_data_in,
130 	.io_out	= pci_config_data_out,
131 };
132 
133 void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
134 {
135 	void *base;
136 	u8 bar, offset;
137 	struct pci_device_header *pci_hdr;
138 	u8 dev_num = addr.device_number;
139 
140 	if (!pci_device_exists(addr.bus_number, dev_num, 0))
141 		return;
142 
143 	offset = addr.w & PCI_DEV_CFG_MASK;
144 	base = pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
145 
146 	if (pci_hdr->cfg_ops.write)
147 		pci_hdr->cfg_ops.write(kvm, pci_hdr, offset, data, size);
148 
149 	/*
150 	 * legacy hack: ignore writes to uninitialized regions (e.g. ROM BAR).
151 	 * Not very nice but has been working so far.
152 	 */
153 	if (*(u32 *)(base + offset) == 0)
154 		return;
155 
156 	bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32);
157 
158 	/*
159 	 * If the kernel masks the BAR it would expect to find the size of the
160 	 * BAR there next time it reads from it. When the kernel got the size it
161 	 * would write the address back.
162 	 */
163 	if (bar < 6 && ioport__read32(data) == 0xFFFFFFFF) {
164 		u32 sz = pci_hdr->bar_size[bar];
165 		memcpy(base + offset, &sz, sizeof(sz));
166 	} else {
167 		memcpy(base + offset, data, size);
168 	}
169 }
170 
171 void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
172 {
173 	u8 offset;
174 	struct pci_device_header *pci_hdr;
175 	u8 dev_num = addr.device_number;
176 
177 	if (pci_device_exists(addr.bus_number, dev_num, 0)) {
178 		pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
179 		offset = addr.w & PCI_DEV_CFG_MASK;
180 
181 		if (pci_hdr->cfg_ops.read)
182 			pci_hdr->cfg_ops.read(kvm, pci_hdr, offset, data, size);
183 
184 		memcpy(data, (void *)pci_hdr + offset, size);
185 	} else {
186 		memset(data, 0xff, size);
187 	}
188 }
189 
190 static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
191 				   u32 len, u8 is_write, void *kvm)
192 {
193 	union pci_config_address cfg_addr;
194 
195 	addr			-= KVM_PCI_CFG_AREA;
196 	cfg_addr.w		= (u32)addr;
197 	cfg_addr.enable_bit	= 1;
198 
199 	if (is_write)
200 		pci__config_wr(kvm, cfg_addr, data, len);
201 	else
202 		pci__config_rd(kvm, cfg_addr, data, len);
203 }
204 
205 struct pci_device_header *pci__find_dev(u8 dev_num)
206 {
207 	struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num);
208 
209 	if (IS_ERR_OR_NULL(hdr))
210 		return NULL;
211 
212 	return hdr->data;
213 }
214 
215 int pci__init(struct kvm *kvm)
216 {
217 	int r;
218 
219 	r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL);
220 	if (r < 0)
221 		return r;
222 
223 	r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL);
224 	if (r < 0)
225 		goto err_unregister_data;
226 
227 	r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false,
228 			       pci_config_mmio_access, kvm);
229 	if (r < 0)
230 		goto err_unregister_addr;
231 
232 	return 0;
233 
234 err_unregister_addr:
235 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
236 err_unregister_data:
237 	ioport__unregister(kvm, PCI_CONFIG_DATA);
238 	return r;
239 }
240 dev_base_init(pci__init);
241 
242 int pci__exit(struct kvm *kvm)
243 {
244 	ioport__unregister(kvm, PCI_CONFIG_DATA);
245 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
246 
247 	return 0;
248 }
249 dev_base_exit(pci__exit);
250