xref: /kvmtool/pci.c (revision bca12bf63703c74f96942b12736e4337ef2c7d4d)
160742802SPekka Enberg #include "kvm/pci.h"
260742802SPekka Enberg #include "kvm/ioport.h"
376f9c841SCyrill Gorcunov #include "kvm/util.h"
49575e724SSasha Levin #include "kvm/kvm.h"
560742802SPekka Enberg 
66d987703SSasha Levin #include <linux/err.h>
76d987703SSasha Levin #include <assert.h>
86d987703SSasha Levin 
99575e724SSasha Levin #define PCI_BAR_OFFSET(b)		(offsetof(struct pci_device_header, bar[b]))
10b30d05adSPekka Enberg 
11b30d05adSPekka Enberg static struct pci_device_header		*pci_devices[PCI_MAX_DEVICES];
12b30d05adSPekka Enberg 
13aa73be70SMatt Evans static union pci_config_address		pci_config_address;
1460742802SPekka Enberg 
1540f2fd06SMatt Evans /* This is within our PCI gap - in an unused area.
1640f2fd06SMatt Evans  * Note this is a PCI *bus address*, is used to assign BARs etc.!
1740f2fd06SMatt Evans  * (That's why it can still 32bit even with 64bit guests-- 64bit
1840f2fd06SMatt Evans  * PCI isn't currently supported.)
1940f2fd06SMatt Evans  */
2040f2fd06SMatt Evans static u32 io_space_blocks		= KVM_PCI_MMIO_AREA;
219575e724SSasha Levin 
2295d13a52SSasha Levin u32 pci_get_io_space_block(u32 size)
239575e724SSasha Levin {
249575e724SSasha Levin 	u32 block = io_space_blocks;
2595d13a52SSasha Levin 	io_space_blocks += size;
269575e724SSasha Levin 
279575e724SSasha Levin 	return block;
289575e724SSasha Levin }
299575e724SSasha Levin 
303fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port)
31ba824677SPekka Enberg {
32ba824677SPekka Enberg 	unsigned long offset;
33ba824677SPekka Enberg 	void *base;
34ba824677SPekka Enberg 
35ba824677SPekka Enberg 	offset	= port - PCI_CONFIG_ADDRESS;
36ba824677SPekka Enberg 	base	= &pci_config_address;
37ba824677SPekka Enberg 
38ba824677SPekka Enberg 	return base + offset;
39ba824677SPekka Enberg }
40ba824677SPekka Enberg 
41c9f6a037SXiao Guangrong static bool pci_config_address_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
4260742802SPekka Enberg {
43ba824677SPekka Enberg 	void *p = pci_config_address_ptr(port);
4460742802SPekka Enberg 
45ba824677SPekka Enberg 	memcpy(p, data, size);
4660742802SPekka Enberg 
4760742802SPekka Enberg 	return true;
4860742802SPekka Enberg }
4960742802SPekka Enberg 
50c9f6a037SXiao Guangrong static bool pci_config_address_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
5160742802SPekka Enberg {
52ba824677SPekka Enberg 	void *p = pci_config_address_ptr(port);
5360742802SPekka Enberg 
54ba824677SPekka Enberg 	memcpy(data, p, size);
5560742802SPekka Enberg 
5660742802SPekka Enberg 	return true;
5760742802SPekka Enberg }
5860742802SPekka Enberg 
59305b72ceSCyrill Gorcunov static struct ioport_operations pci_config_address_ops = {
60305b72ceSCyrill Gorcunov 	.io_in	= pci_config_address_in,
61305b72ceSCyrill Gorcunov 	.io_out	= pci_config_address_out,
6260742802SPekka Enberg };
6360742802SPekka Enberg 
643fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
6576f9c841SCyrill Gorcunov {
66b30d05adSPekka Enberg 	struct pci_device_header *dev;
67b30d05adSPekka Enberg 
6876f9c841SCyrill Gorcunov 	if (pci_config_address.bus_number != bus_number)
6976f9c841SCyrill Gorcunov 		return false;
7076f9c841SCyrill Gorcunov 
71b30d05adSPekka Enberg 	if (pci_config_address.function_number != function_number)
7276f9c841SCyrill Gorcunov 		return false;
7376f9c841SCyrill Gorcunov 
74b30d05adSPekka Enberg 	if (device_number >= PCI_MAX_DEVICES)
75b30d05adSPekka Enberg 		return false;
76b30d05adSPekka Enberg 
77b30d05adSPekka Enberg 	dev = pci_devices[device_number];
78b30d05adSPekka Enberg 
79b30d05adSPekka Enberg 	return dev != NULL;
8076f9c841SCyrill Gorcunov }
8176f9c841SCyrill Gorcunov 
82c9f6a037SXiao Guangrong static bool pci_config_data_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
839575e724SSasha Levin {
849575e724SSasha Levin 	/*
859575e724SSasha Levin 	 * If someone accesses PCI configuration space offsets that are not
869575e724SSasha Levin 	 * aligned to 4 bytes, it uses ioports to signify that.
879575e724SSasha Levin 	 */
88d0297a59SMatt Evans 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
899575e724SSasha Levin 
90d0297a59SMatt Evans 	pci__config_wr(kvm, pci_config_address, data, size);
91d0297a59SMatt Evans 
92d0297a59SMatt Evans 	return true;
93d0297a59SMatt Evans }
94d0297a59SMatt Evans 
95d0297a59SMatt Evans static bool pci_config_data_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
96d0297a59SMatt Evans {
97d0297a59SMatt Evans 	/*
98d0297a59SMatt Evans 	 * If someone accesses PCI configuration space offsets that are not
99d0297a59SMatt Evans 	 * aligned to 4 bytes, it uses ioports to signify that.
100d0297a59SMatt Evans 	 */
101d0297a59SMatt Evans 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
102d0297a59SMatt Evans 
103d0297a59SMatt Evans 	pci__config_rd(kvm, pci_config_address, data, size);
104d0297a59SMatt Evans 
105d0297a59SMatt Evans 	return true;
106d0297a59SMatt Evans }
107d0297a59SMatt Evans 
108d0297a59SMatt Evans static struct ioport_operations pci_config_data_ops = {
109d0297a59SMatt Evans 	.io_in	= pci_config_data_in,
110d0297a59SMatt Evans 	.io_out	= pci_config_data_out,
111d0297a59SMatt Evans };
112d0297a59SMatt Evans 
113d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
114d0297a59SMatt Evans {
115d0297a59SMatt Evans 	u8 dev_num;
116d0297a59SMatt Evans 
117d0297a59SMatt Evans 	dev_num	= addr.device_number;
1189575e724SSasha Levin 
1199575e724SSasha Levin 	if (pci_device_exists(0, dev_num, 0)) {
1209575e724SSasha Levin 		unsigned long offset;
1219575e724SSasha Levin 
122d0297a59SMatt Evans 		offset = addr.w & 0xff;
1239575e724SSasha Levin 		if (offset < sizeof(struct pci_device_header)) {
1249575e724SSasha Levin 			void *p = pci_devices[dev_num];
1257e012d3cSSasha Levin 			u8 bar = (offset - PCI_BAR_OFFSET(0)) / (sizeof(u32));
1269575e724SSasha Levin 			u32 sz = PCI_IO_SIZE;
1279575e724SSasha Levin 
128c64f7ff0SSasha Levin 			if (bar < 6 && pci_devices[dev_num]->bar_size[bar])
129c64f7ff0SSasha Levin 				sz = pci_devices[dev_num]->bar_size[bar];
130c64f7ff0SSasha Levin 
1319575e724SSasha Levin 			/*
1329575e724SSasha Levin 			 * If the kernel masks the BAR it would expect to find the
1339575e724SSasha Levin 			 * size of the BAR there next time it reads from it.
1349575e724SSasha Levin 			 * When the kernel got the size it would write the address
1359575e724SSasha Levin 			 * back.
1369575e724SSasha Levin 			 */
137aa73be70SMatt Evans 			if (*(u32 *)(p + offset)) {
1389575e724SSasha Levin 				/* See if kernel tries to mask one of the BARs */
1399575e724SSasha Levin 				if ((offset >= PCI_BAR_OFFSET(0)) &&
140c64f7ff0SSasha Levin 				    (offset <= PCI_BAR_OFFSET(6)) &&
141c64f7ff0SSasha Levin 				    (ioport__read32(data)  == 0xFFFFFFFF))
1429575e724SSasha Levin 					memcpy(p + offset, &sz, sizeof(sz));
1439575e724SSasha Levin 				    else
1449575e724SSasha Levin 					memcpy(p + offset, data, size);
1459575e724SSasha Levin 			}
1469575e724SSasha Levin 		}
1479575e724SSasha Levin 	}
1489575e724SSasha Levin }
1499575e724SSasha Levin 
150d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
15160742802SPekka Enberg {
1523fdf659dSSasha Levin 	u8 dev_num;
153e4d2cea2SPekka Enberg 
154d0297a59SMatt Evans 	dev_num	= addr.device_number;
155b30d05adSPekka Enberg 
156b30d05adSPekka Enberg 	if (pci_device_exists(0, dev_num, 0)) {
157598419d5SPekka Enberg 		unsigned long offset;
158598419d5SPekka Enberg 
159d0297a59SMatt Evans 		offset = addr.w & 0xff;
160598419d5SPekka Enberg 		if (offset < sizeof(struct pci_device_header)) {
161b30d05adSPekka Enberg 			void *p = pci_devices[dev_num];
162b30d05adSPekka Enberg 
16318ae021aSPekka Enberg 			memcpy(data, p + offset, size);
1643a60be06SSasha Levin 		} else {
165598419d5SPekka Enberg 			memset(data, 0x00, size);
1663a60be06SSasha Levin 		}
1673a60be06SSasha Levin 	} else {
168e498ea08SPekka Enberg 		memset(data, 0xff, size);
16960742802SPekka Enberg 	}
1703a60be06SSasha Levin }
17160742802SPekka Enberg 
1726d987703SSasha Levin int pci__register(struct pci_device_header *dev, u8 dev_num)
173beb095ebSCyrill Gorcunov {
1746d987703SSasha Levin 	if (dev_num >= PCI_MAX_DEVICES)
1756d987703SSasha Levin 		return -ENOSPC;
1766d987703SSasha Levin 
177b30d05adSPekka Enberg 	pci_devices[dev_num] = dev;
1786d987703SSasha Levin 
1796d987703SSasha Levin 	return 0;
180beb095ebSCyrill Gorcunov }
181beb095ebSCyrill Gorcunov 
182d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num)
183d0297a59SMatt Evans {
1846d987703SSasha Levin 	if (dev_num >= PCI_MAX_DEVICES)
1856d987703SSasha Levin 		return ERR_PTR(-EOVERFLOW);
1866d987703SSasha Levin 
187d0297a59SMatt Evans 	return pci_devices[dev_num];
188d0297a59SMatt Evans }
189d0297a59SMatt Evans 
1906d987703SSasha Levin int pci__init(struct kvm *kvm)
19160742802SPekka Enberg {
1926d987703SSasha Levin 	int r;
1936d987703SSasha Levin 
1944346fd8fSSasha Levin 	r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL);
1956d987703SSasha Levin 	if (r < 0)
1966d987703SSasha Levin 		return r;
1976d987703SSasha Levin 
1984346fd8fSSasha Levin 	r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL);
1996d987703SSasha Levin 	if (r < 0) {
2004346fd8fSSasha Levin 		ioport__unregister(kvm, PCI_CONFIG_DATA);
2016d987703SSasha Levin 		return r;
2026d987703SSasha Levin 	}
2036d987703SSasha Levin 
2046d987703SSasha Levin 	return 0;
2056d987703SSasha Levin }
206*bca12bf6SSasha Levin dev_base_init(pci__init);
2076d987703SSasha Levin 
2086d987703SSasha Levin int pci__exit(struct kvm *kvm)
2096d987703SSasha Levin {
2104346fd8fSSasha Levin 	ioport__unregister(kvm, PCI_CONFIG_DATA);
2114346fd8fSSasha Levin 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
2126d987703SSasha Levin 
2136d987703SSasha Levin 	return 0;
21460742802SPekka Enberg }
215*bca12bf6SSasha Levin dev_base_exit(pci__exit);
216