1 #ifndef KVM__PCI_H
2 #define KVM__PCI_H
3
4 #include <linux/types.h>
5 #include <linux/kvm.h>
6 #include <linux/pci_regs.h>
7 #include <linux/virtio_pci.h>
8 #include <endian.h>
9 #include <stdbool.h>
10
11 #include "kvm/devices.h"
12 #include "kvm/msi.h"
13 #include "kvm/fdt.h"
14 #include "kvm/kvm-arch.h"
15
16 #define pci_dev_err(pci_hdr, fmt, ...) \
17 pr_err("[%04x:%04x] " fmt, (pci_hdr)->vendor_id, (pci_hdr)->device_id, ##__VA_ARGS__)
18 #define pci_dev_warn(pci_hdr, fmt, ...) \
19 pr_warning("[%04x:%04x] " fmt, (pci_hdr)->vendor_id, (pci_hdr)->device_id, ##__VA_ARGS__)
20 #define pci_dev_info(pci_hdr, fmt, ...) \
21 pr_info("[%04x:%04x] " fmt, (pci_hdr)->vendor_id, (pci_hdr)->device_id, ##__VA_ARGS__)
22 #define pci_dev_dbg(pci_hdr, fmt, ...) \
23 pr_debug("[%04x:%04x] " fmt, (pci_hdr)->vendor_id, (pci_hdr)->device_id, ##__VA_ARGS__)
24 #define pci_dev_die(pci_hdr, fmt, ...) \
25 die("[%04x:%04x] " fmt, (pci_hdr)->vendor_id, (pci_hdr)->device_id, ##__VA_ARGS__)
26
27 /*
28 * PCI Configuration Mechanism #1 I/O ports. See Section 3.7.4.1.
29 * ("Configuration Mechanism #1") of the PCI Local Bus Specification 2.1 for
30 * details.
31 */
32 #define PCI_CONFIG_ADDRESS 0xcf8
33 #define PCI_CONFIG_DATA 0xcfc
34 #define PCI_CONFIG_BUS_FORWARD 0xcfa
35 #define PCI_IO_SIZE 0x100
36 #define PCI_IOPORT_START 0x6200
37
38 struct kvm;
39
40 /*
41 * On some distributions, pci_regs.h doesn't define PCI_CFG_SPACE_SIZE and
42 * PCI_CFG_SPACE_EXP_SIZE, so we define our own.
43 */
44 #define PCI_CFG_SIZE_LEGACY (1ULL << 24)
45 #define PCI_DEV_CFG_SIZE_LEGACY 256
46 #define PCI_CFG_SIZE_EXTENDED (1ULL << 28)
47 #define PCI_DEV_CFG_SIZE_EXTENDED 4096
48
49 #ifdef ARCH_HAS_PCI_EXP
50 #define arch_has_pci_exp() (true)
51
52 #define PCI_CFG_SIZE PCI_CFG_SIZE_EXTENDED
53 #define PCI_DEV_CFG_SIZE PCI_DEV_CFG_SIZE_EXTENDED
54
55 union pci_config_address {
56 struct {
57 #if __BYTE_ORDER == __LITTLE_ENDIAN
58 unsigned reg_offset : 2; /* 1 .. 0 */
59 unsigned register_number : 10; /* 11 .. 2 */
60 unsigned function_number : 3; /* 14 .. 12 */
61 unsigned device_number : 5; /* 19 .. 15 */
62 unsigned bus_number : 8; /* 27 .. 20 */
63 unsigned reserved : 3; /* 30 .. 28 */
64 unsigned enable_bit : 1; /* 31 */
65 #else
66 unsigned enable_bit : 1; /* 31 */
67 unsigned reserved : 3; /* 30 .. 28 */
68 unsigned bus_number : 8; /* 27 .. 20 */
69 unsigned device_number : 5; /* 19 .. 15 */
70 unsigned function_number : 3; /* 14 .. 12 */
71 unsigned register_number : 10; /* 11 .. 2 */
72 unsigned reg_offset : 2; /* 1 .. 0 */
73 #endif
74 };
75 u32 w;
76 };
77
78 #else
79 #define arch_has_pci_exp() (false)
80
81 #define PCI_CFG_SIZE PCI_CFG_SIZE_LEGACY
82 #define PCI_DEV_CFG_SIZE PCI_DEV_CFG_SIZE_LEGACY
83
84 union pci_config_address {
85 struct {
86 #if __BYTE_ORDER == __LITTLE_ENDIAN
87 unsigned reg_offset : 2; /* 1 .. 0 */
88 unsigned register_number : 6; /* 7 .. 2 */
89 unsigned function_number : 3; /* 10 .. 8 */
90 unsigned device_number : 5; /* 15 .. 11 */
91 unsigned bus_number : 8; /* 23 .. 16 */
92 unsigned reserved : 7; /* 30 .. 24 */
93 unsigned enable_bit : 1; /* 31 */
94 #else
95 unsigned enable_bit : 1; /* 31 */
96 unsigned reserved : 7; /* 30 .. 24 */
97 unsigned bus_number : 8; /* 23 .. 16 */
98 unsigned device_number : 5; /* 15 .. 11 */
99 unsigned function_number : 3; /* 10 .. 8 */
100 unsigned register_number : 6; /* 7 .. 2 */
101 unsigned reg_offset : 2; /* 1 .. 0 */
102 #endif
103 };
104 u32 w;
105 };
106 #endif /* ARCH_HAS_PCI_EXP */
107
108 #define PCI_DEV_CFG_MASK (PCI_DEV_CFG_SIZE - 1)
109
110 struct msix_table {
111 struct msi_msg msg;
112 u32 ctrl;
113 };
114
115 struct msix_cap {
116 u8 cap;
117 u8 next;
118 u16 ctrl;
119 u32 table_offset;
120 u32 pba_offset;
121 };
122
123 struct msi_cap_64 {
124 u8 cap;
125 u8 next;
126 u16 ctrl;
127 u32 address_lo;
128 u32 address_hi;
129 u16 data;
130 u16 _align;
131 u32 mask_bits;
132 u32 pend_bits;
133 };
134
135 struct msi_cap_32 {
136 u8 cap;
137 u8 next;
138 u16 ctrl;
139 u32 address_lo;
140 u16 data;
141 u16 _align;
142 u32 mask_bits;
143 u32 pend_bits;
144 };
145
146 struct virtio_caps {
147 struct virtio_pci_cap common;
148 struct virtio_pci_notify_cap notify;
149 struct virtio_pci_cap isr;
150 struct virtio_pci_cap device;
151 struct virtio_pci_cfg_cap pci;
152 };
153
154 struct pci_cap_hdr {
155 u8 type;
156 u8 next;
157 };
158
159 struct pci_exp_cap {
160 u8 cap;
161 u8 next;
162 u16 cap_reg;
163 u32 dev_cap;
164 u16 dev_ctrl;
165 u16 dev_status;
166 u32 link_cap;
167 u16 link_ctrl;
168 u16 link_status;
169 u32 slot_cap;
170 u16 slot_ctrl;
171 u16 slot_status;
172 u16 root_ctrl;
173 u16 root_cap;
174 u32 root_status;
175 };
176
177 struct pci_device_header;
178
179 typedef int (*bar_activate_fn_t)(struct kvm *kvm,
180 struct pci_device_header *pci_hdr,
181 int bar_num, void *data);
182 typedef int (*bar_deactivate_fn_t)(struct kvm *kvm,
183 struct pci_device_header *pci_hdr,
184 int bar_num, void *data);
185
186 #define PCI_BAR_OFFSET(b) (offsetof(struct pci_device_header, bar[b]))
187
188 struct pci_config_operations {
189 void (*write)(struct kvm *kvm, struct pci_device_header *pci_hdr,
190 u16 offset, void *data, int sz);
191 void (*read)(struct kvm *kvm, struct pci_device_header *pci_hdr,
192 u16 offset, void *data, int sz);
193 };
194
195 struct pci_device_header {
196 /* Configuration space, as seen by the guest */
197 union {
198 struct {
199 u16 vendor_id;
200 u16 device_id;
201 u16 command;
202 u16 status;
203 u8 revision_id;
204 u8 class[3];
205 u8 cacheline_size;
206 u8 latency_timer;
207 u8 header_type;
208 u8 bist;
209 u32 bar[6];
210 u32 card_bus;
211 u16 subsys_vendor_id;
212 u16 subsys_id;
213 u32 exp_rom_bar;
214 u8 capabilities;
215 u8 reserved1[3];
216 u32 reserved2;
217 u8 irq_line;
218 u8 irq_pin;
219 u8 min_gnt;
220 u8 max_lat;
221 struct msix_cap msix;
222 /* Used only by architectures which support PCIE */
223 struct pci_exp_cap pci_exp;
224 struct virtio_caps virtio;
225 } __attribute__((packed));
226 /* Pad to PCI config space size */
227 u8 __pad[PCI_DEV_CFG_SIZE];
228 };
229
230 /* Private to lkvm */
231 u32 bar_size[6];
232 bool bar_active[6];
233 bar_activate_fn_t bar_activate_fn;
234 bar_deactivate_fn_t bar_deactivate_fn;
235 void *data;
236 struct pci_config_operations cfg_ops;
237 /*
238 * PCI INTx# are level-triggered, but virtual device often feature
239 * edge-triggered INTx# for convenience.
240 */
241 enum irq_type irq_type;
242 };
243
244 #define PCI_CAP(pci_hdr, pos) ((void *)(pci_hdr) + (pos))
245 #define PCI_CAP_OFF(pci_hdr, cap) ((void *)&(pci_hdr)->cap - (void *)(pci_hdr))
246
247 #define pci_for_each_cap(pos, cap, hdr) \
248 for ((pos) = (hdr)->capabilities & ~3; \
249 (cap) = PCI_CAP(hdr, pos), (pos) != 0; \
250 (pos) = ((struct pci_cap_hdr *)(cap))->next & ~3)
251
252 int pci__init(struct kvm *kvm);
253 int pci__exit(struct kvm *kvm);
254 struct pci_device_header *pci__find_dev(u8 dev_num);
255 u32 pci_get_mmio_block(u32 size);
256 u16 pci_get_io_port_block(u32 size);
257 int pci__assign_irq(struct pci_device_header *pci_hdr);
258 void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size);
259 void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size);
260
261 void *pci_find_cap(struct pci_device_header *hdr, u8 cap_type);
262
263 int pci__register_bar_regions(struct kvm *kvm, struct pci_device_header *pci_hdr,
264 bar_activate_fn_t bar_activate_fn,
265 bar_deactivate_fn_t bar_deactivate_fn, void *data);
266
__pci__memory_space_enabled(u16 command)267 static inline bool __pci__memory_space_enabled(u16 command)
268 {
269 return command & PCI_COMMAND_MEMORY;
270 }
271
pci__memory_space_enabled(struct pci_device_header * pci_hdr)272 static inline bool pci__memory_space_enabled(struct pci_device_header *pci_hdr)
273 {
274 return __pci__memory_space_enabled(pci_hdr->command);
275 }
276
__pci__io_space_enabled(u16 command)277 static inline bool __pci__io_space_enabled(u16 command)
278 {
279 return command & PCI_COMMAND_IO;
280 }
281
pci__io_space_enabled(struct pci_device_header * pci_hdr)282 static inline bool pci__io_space_enabled(struct pci_device_header *pci_hdr)
283 {
284 return __pci__io_space_enabled(pci_hdr->command);
285 }
286
__pci__bar_is_io(u32 bar)287 static inline bool __pci__bar_is_io(u32 bar)
288 {
289 return bar & PCI_BASE_ADDRESS_SPACE_IO;
290 }
291
pci__bar_is_io(struct pci_device_header * pci_hdr,int bar_num)292 static inline bool pci__bar_is_io(struct pci_device_header *pci_hdr, int bar_num)
293 {
294 return __pci__bar_is_io(pci_hdr->bar[bar_num]);
295 }
296
pci__bar_is_memory(struct pci_device_header * pci_hdr,int bar_num)297 static inline bool pci__bar_is_memory(struct pci_device_header *pci_hdr, int bar_num)
298 {
299 return !pci__bar_is_io(pci_hdr, bar_num);
300 }
301
__pci__bar_address(u32 bar)302 static inline u32 __pci__bar_address(u32 bar)
303 {
304 if (__pci__bar_is_io(bar))
305 return bar & PCI_BASE_ADDRESS_IO_MASK;
306 return bar & PCI_BASE_ADDRESS_MEM_MASK;
307 }
308
pci__bar_address(struct pci_device_header * pci_hdr,int bar_num)309 static inline u32 pci__bar_address(struct pci_device_header *pci_hdr, int bar_num)
310 {
311 return __pci__bar_address(pci_hdr->bar[bar_num]);
312 }
313
pci__bar_size(struct pci_device_header * pci_hdr,int bar_num)314 static inline u32 pci__bar_size(struct pci_device_header *pci_hdr, int bar_num)
315 {
316 return pci_hdr->bar_size[bar_num];
317 }
318
319 #endif /* KVM__PCI_H */
320