xref: /kvmtool/pci.c (revision 023fdaae48b45aec087551804616e0836a74d675)
1 #include "kvm/devices.h"
2 #include "kvm/pci.h"
3 #include "kvm/ioport.h"
4 #include "kvm/irq.h"
5 #include "kvm/util.h"
6 #include "kvm/kvm.h"
7 
8 #include <linux/err.h>
9 #include <assert.h>
10 
11 static u32 pci_config_address_bits;
12 
13 /* This is within our PCI gap - in an unused area.
14  * Note this is a PCI *bus address*, is used to assign BARs etc.!
15  * (That's why it can still 32bit even with 64bit guests-- 64bit
16  * PCI isn't currently supported.)
17  */
18 static u32 io_space_blocks		= KVM_PCI_MMIO_AREA;
19 
20 /*
21  * BARs must be naturally aligned, so enforce this in the allocator.
22  */
23 u32 pci_get_io_space_block(u32 size)
24 {
25 	u32 block = ALIGN(io_space_blocks, size);
26 	io_space_blocks = block + size;
27 	return block;
28 }
29 
30 void pci__assign_irq(struct device_header *dev_hdr)
31 {
32 	struct pci_device_header *pci_hdr = dev_hdr->data;
33 
34 	/*
35 	 * PCI supports only INTA#,B#,C#,D# per device.
36 	 *
37 	 * A#,B#,C#,D# are allowed for multifunctional devices so stick
38 	 * with A# for our single function devices.
39 	 */
40 	pci_hdr->irq_pin	= 1;
41 	pci_hdr->irq_line	= irq__alloc_line();
42 }
43 
44 static void *pci_config_address_ptr(u16 port)
45 {
46 	unsigned long offset;
47 	void *base;
48 
49 	offset	= port - PCI_CONFIG_ADDRESS;
50 	base	= &pci_config_address_bits;
51 
52 	return base + offset;
53 }
54 
55 static bool pci_config_address_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
56 {
57 	void *p = pci_config_address_ptr(port);
58 
59 	memcpy(p, data, size);
60 
61 	return true;
62 }
63 
64 static bool pci_config_address_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
65 {
66 	void *p = pci_config_address_ptr(port);
67 
68 	memcpy(data, p, size);
69 
70 	return true;
71 }
72 
73 static struct ioport_operations pci_config_address_ops = {
74 	.io_in	= pci_config_address_in,
75 	.io_out	= pci_config_address_out,
76 };
77 
78 static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
79 {
80 	union pci_config_address pci_config_address;
81 
82 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
83 
84 	if (pci_config_address.bus_number != bus_number)
85 		return false;
86 
87 	if (pci_config_address.function_number != function_number)
88 		return false;
89 
90 	return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number));
91 }
92 
93 static bool pci_config_data_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
94 {
95 	union pci_config_address pci_config_address;
96 
97 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
98 	/*
99 	 * If someone accesses PCI configuration space offsets that are not
100 	 * aligned to 4 bytes, it uses ioports to signify that.
101 	 */
102 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
103 
104 	pci__config_wr(vcpu->kvm, pci_config_address, data, size);
105 
106 	return true;
107 }
108 
109 static bool pci_config_data_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
110 {
111 	union pci_config_address pci_config_address;
112 
113 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
114 	/*
115 	 * If someone accesses PCI configuration space offsets that are not
116 	 * aligned to 4 bytes, it uses ioports to signify that.
117 	 */
118 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
119 
120 	pci__config_rd(vcpu->kvm, pci_config_address, data, size);
121 
122 	return true;
123 }
124 
125 static struct ioport_operations pci_config_data_ops = {
126 	.io_in	= pci_config_data_in,
127 	.io_out	= pci_config_data_out,
128 };
129 
130 void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
131 {
132 	void *base;
133 	u8 bar, offset;
134 	struct pci_device_header *pci_hdr;
135 	u8 dev_num = addr.device_number;
136 
137 	if (!pci_device_exists(addr.bus_number, dev_num, 0))
138 		return;
139 
140 	offset = addr.w & PCI_DEV_CFG_MASK;
141 	base = pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
142 
143 	if (pci_hdr->cfg_ops.write)
144 		pci_hdr->cfg_ops.write(kvm, pci_hdr, offset, data, size);
145 
146 	/*
147 	 * legacy hack: ignore writes to uninitialized regions (e.g. ROM BAR).
148 	 * Not very nice but has been working so far.
149 	 */
150 	if (*(u32 *)(base + offset) == 0)
151 		return;
152 
153 	bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32);
154 
155 	/*
156 	 * If the kernel masks the BAR it would expect to find the size of the
157 	 * BAR there next time it reads from it. When the kernel got the size it
158 	 * would write the address back.
159 	 */
160 	if (bar < 6 && ioport__read32(data) == 0xFFFFFFFF) {
161 		u32 sz = pci_hdr->bar_size[bar];
162 		memcpy(base + offset, &sz, sizeof(sz));
163 	} else {
164 		memcpy(base + offset, data, size);
165 	}
166 }
167 
168 void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
169 {
170 	u8 offset;
171 	struct pci_device_header *pci_hdr;
172 	u8 dev_num = addr.device_number;
173 
174 	if (pci_device_exists(addr.bus_number, dev_num, 0)) {
175 		pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
176 		offset = addr.w & PCI_DEV_CFG_MASK;
177 
178 		if (pci_hdr->cfg_ops.read)
179 			pci_hdr->cfg_ops.read(kvm, pci_hdr, offset, data, size);
180 
181 		memcpy(data, (void *)pci_hdr + offset, size);
182 	} else {
183 		memset(data, 0xff, size);
184 	}
185 }
186 
187 static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
188 				   u32 len, u8 is_write, void *kvm)
189 {
190 	union pci_config_address cfg_addr;
191 
192 	addr			-= KVM_PCI_CFG_AREA;
193 	cfg_addr.w		= (u32)addr;
194 	cfg_addr.enable_bit	= 1;
195 
196 	if (is_write)
197 		pci__config_wr(kvm, cfg_addr, data, len);
198 	else
199 		pci__config_rd(kvm, cfg_addr, data, len);
200 }
201 
202 struct pci_device_header *pci__find_dev(u8 dev_num)
203 {
204 	struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num);
205 
206 	if (IS_ERR_OR_NULL(hdr))
207 		return NULL;
208 
209 	return hdr->data;
210 }
211 
212 int pci__init(struct kvm *kvm)
213 {
214 	int r;
215 
216 	r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL);
217 	if (r < 0)
218 		return r;
219 
220 	r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL);
221 	if (r < 0)
222 		goto err_unregister_data;
223 
224 	r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false,
225 			       pci_config_mmio_access, kvm);
226 	if (r < 0)
227 		goto err_unregister_addr;
228 
229 	return 0;
230 
231 err_unregister_addr:
232 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
233 err_unregister_data:
234 	ioport__unregister(kvm, PCI_CONFIG_DATA);
235 	return r;
236 }
237 dev_base_init(pci__init);
238 
239 int pci__exit(struct kvm *kvm)
240 {
241 	ioport__unregister(kvm, PCI_CONFIG_DATA);
242 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
243 
244 	return 0;
245 }
246 dev_base_exit(pci__exit);
247