xref: /kvmtool/pci.c (revision a0a7d66f12349e828110a3ebe3e8f6e491a4f6dc)
1 #include "kvm/devices.h"
2 #include "kvm/pci.h"
3 #include "kvm/ioport.h"
4 #include "kvm/irq.h"
5 #include "kvm/util.h"
6 #include "kvm/kvm.h"
7 
8 #include <linux/err.h>
9 #include <assert.h>
10 
11 #define PCI_BAR_OFFSET(b)		(offsetof(struct pci_device_header, bar[b]))
12 
13 static u32 pci_config_address_bits;
14 
15 /* This is within our PCI gap - in an unused area.
16  * Note this is a PCI *bus address*, is used to assign BARs etc.!
17  * (That's why it can still 32bit even with 64bit guests-- 64bit
18  * PCI isn't currently supported.)
19  */
20 static u32 io_space_blocks		= KVM_PCI_MMIO_AREA;
21 
22 /*
23  * BARs must be naturally aligned, so enforce this in the allocator.
24  */
25 u32 pci_get_io_space_block(u32 size)
26 {
27 	u32 block = ALIGN(io_space_blocks, size);
28 	io_space_blocks = block + size;
29 	return block;
30 }
31 
32 void pci__assign_irq(struct device_header *dev_hdr)
33 {
34 	struct pci_device_header *pci_hdr = dev_hdr->data;
35 
36 	/*
37 	 * PCI supports only INTA#,B#,C#,D# per device.
38 	 *
39 	 * A#,B#,C#,D# are allowed for multifunctional devices so stick
40 	 * with A# for our single function devices.
41 	 */
42 	pci_hdr->irq_pin	= 1;
43 	pci_hdr->irq_line	= irq__alloc_line();
44 }
45 
46 static void *pci_config_address_ptr(u16 port)
47 {
48 	unsigned long offset;
49 	void *base;
50 
51 	offset	= port - PCI_CONFIG_ADDRESS;
52 	base	= &pci_config_address_bits;
53 
54 	return base + offset;
55 }
56 
57 static bool pci_config_address_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
58 {
59 	void *p = pci_config_address_ptr(port);
60 
61 	memcpy(p, data, size);
62 
63 	return true;
64 }
65 
66 static bool pci_config_address_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
67 {
68 	void *p = pci_config_address_ptr(port);
69 
70 	memcpy(data, p, size);
71 
72 	return true;
73 }
74 
75 static struct ioport_operations pci_config_address_ops = {
76 	.io_in	= pci_config_address_in,
77 	.io_out	= pci_config_address_out,
78 };
79 
80 static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
81 {
82 	union pci_config_address pci_config_address;
83 
84 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
85 
86 	if (pci_config_address.bus_number != bus_number)
87 		return false;
88 
89 	if (pci_config_address.function_number != function_number)
90 		return false;
91 
92 	return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number));
93 }
94 
95 static bool pci_config_data_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
96 {
97 	union pci_config_address pci_config_address;
98 
99 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
100 	/*
101 	 * If someone accesses PCI configuration space offsets that are not
102 	 * aligned to 4 bytes, it uses ioports to signify that.
103 	 */
104 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
105 
106 	pci__config_wr(vcpu->kvm, pci_config_address, data, size);
107 
108 	return true;
109 }
110 
111 static bool pci_config_data_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
112 {
113 	union pci_config_address pci_config_address;
114 
115 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
116 	/*
117 	 * If someone accesses PCI configuration space offsets that are not
118 	 * aligned to 4 bytes, it uses ioports to signify that.
119 	 */
120 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
121 
122 	pci__config_rd(vcpu->kvm, pci_config_address, data, size);
123 
124 	return true;
125 }
126 
127 static struct ioport_operations pci_config_data_ops = {
128 	.io_in	= pci_config_data_in,
129 	.io_out	= pci_config_data_out,
130 };
131 
132 void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
133 {
134 	u8 dev_num;
135 
136 	dev_num	= addr.device_number;
137 
138 	if (pci_device_exists(0, dev_num, 0)) {
139 		unsigned long offset;
140 
141 		offset = addr.w & 0xff;
142 		if (offset < sizeof(struct pci_device_header)) {
143 			void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
144 			struct pci_device_header *hdr = p;
145 			u8 bar = (offset - PCI_BAR_OFFSET(0)) / (sizeof(u32));
146 			u32 sz = cpu_to_le32(PCI_IO_SIZE);
147 
148 			if (bar < 6 && hdr->bar_size[bar])
149 				sz = hdr->bar_size[bar];
150 
151 			/*
152 			 * If the kernel masks the BAR it would expect to find the
153 			 * size of the BAR there next time it reads from it.
154 			 * When the kernel got the size it would write the address
155 			 * back.
156 			 */
157 			if (*(u32 *)(p + offset)) {
158 				/* See if kernel tries to mask one of the BARs */
159 				if ((offset >= PCI_BAR_OFFSET(0)) &&
160 				    (offset <= PCI_BAR_OFFSET(6)) &&
161 				    (ioport__read32(data)  == 0xFFFFFFFF))
162 					memcpy(p + offset, &sz, sizeof(sz));
163 				    else
164 					memcpy(p + offset, data, size);
165 			}
166 		}
167 	}
168 }
169 
170 void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
171 {
172 	u8 dev_num;
173 
174 	dev_num	= addr.device_number;
175 
176 	if (pci_device_exists(0, dev_num, 0)) {
177 		unsigned long offset;
178 
179 		offset = addr.w & 0xff;
180 		if (offset < sizeof(struct pci_device_header)) {
181 			void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
182 
183 			memcpy(data, p + offset, size);
184 		} else {
185 			memset(data, 0x00, size);
186 		}
187 	} else {
188 		memset(data, 0xff, size);
189 	}
190 }
191 
192 static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
193 				   u32 len, u8 is_write, void *kvm)
194 {
195 	union pci_config_address cfg_addr;
196 
197 	addr			-= KVM_PCI_CFG_AREA;
198 	cfg_addr.w		= (u32)addr;
199 	cfg_addr.enable_bit	= 1;
200 
201 	if (is_write)
202 		pci__config_wr(kvm, cfg_addr, data, len);
203 	else
204 		pci__config_rd(kvm, cfg_addr, data, len);
205 }
206 
207 struct pci_device_header *pci__find_dev(u8 dev_num)
208 {
209 	struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num);
210 
211 	if (IS_ERR_OR_NULL(hdr))
212 		return NULL;
213 
214 	return hdr->data;
215 }
216 
217 int pci__init(struct kvm *kvm)
218 {
219 	int r;
220 
221 	r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL);
222 	if (r < 0)
223 		return r;
224 
225 	r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL);
226 	if (r < 0)
227 		goto err_unregister_data;
228 
229 	r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false,
230 			       pci_config_mmio_access, kvm);
231 	if (r < 0)
232 		goto err_unregister_addr;
233 
234 	return 0;
235 
236 err_unregister_addr:
237 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
238 err_unregister_data:
239 	ioport__unregister(kvm, PCI_CONFIG_DATA);
240 	return r;
241 }
242 dev_base_init(pci__init);
243 
244 int pci__exit(struct kvm *kvm)
245 {
246 	ioport__unregister(kvm, PCI_CONFIG_DATA);
247 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
248 
249 	return 0;
250 }
251 dev_base_exit(pci__exit);
252