xref: /linux/drivers/pci/controller/pci-hyperv.c (revision 86782c16a81f8232c13c1509fd3295bd97d185b0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) Microsoft Corporation.
4  *
5  * Author:
6  *   Jake Oshins <jakeo@microsoft.com>
7  *
8  * This driver acts as a paravirtual front-end for PCI Express root buses.
9  * When a PCI Express function (either an entire device or an SR-IOV
10  * Virtual Function) is being passed through to the VM, this driver exposes
11  * a new bus to the guest VM.  This is modeled as a root PCI bus because
12  * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
13  * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14  * until a device as been exposed using this driver.
15  *
16  * Each root PCI bus has its own PCI domain, which is called "Segment" in
17  * the PCI Firmware Specifications.  Thus while each device passed through
18  * to the VM using this front-end will appear at "device 0", the domain will
19  * be unique.  Typically, each bus will have one PCI function on it, though
20  * this driver does support more than one.
21  *
22  * In order to map the interrupts from the device through to the guest VM,
23  * this driver also implements an IRQ Domain, which handles interrupts (either
24  * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
25  * set up, torn down, or reaffined, this driver communicates with the
26  * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27  * interrupt will be delivered to the correct virtual processor at the right
28  * vector.  This driver does not support level-triggered (line-based)
29  * interrupts, and will report that the Interrupt Line register in the
30  * function's configuration space is zero.
31  *
32  * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33  * facilities.  For instance, the configuration space of a function exposed
34  * by Hyper-V is mapped into a single page of memory space, and the
35  * read and write handlers for config space must be aware of this mechanism.
36  * Similarly, device setup and teardown involves messages sent to and from
37  * the PCI back-end driver in Hyper-V.
38  */
39 
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irq.h>
47 #include <linux/irqchip/irq-msi-lib.h>
48 #include <linux/msi.h>
49 #include <linux/hyperv.h>
50 #include <linux/refcount.h>
51 #include <linux/irqdomain.h>
52 #include <linux/acpi.h>
53 #include <linux/sizes.h>
54 #include <linux/of_irq.h>
55 #include <asm/mshyperv.h>
56 
57 /*
58  * Protocol versions. The low word is the minor version, the high word the
59  * major version.
60  */
61 
62 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
63 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
64 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
65 
66 enum pci_protocol_version_t {
67 	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
68 	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
69 	PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),	/* Vibranium */
70 	PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4),	/* WS2022 */
71 };
72 
73 #define CPU_AFFINITY_ALL	-1ULL
74 
75 /*
76  * Supported protocol versions in the order of probing - highest go
77  * first.
78  */
79 static enum pci_protocol_version_t pci_protocol_versions[] = {
80 	PCI_PROTOCOL_VERSION_1_4,
81 	PCI_PROTOCOL_VERSION_1_3,
82 	PCI_PROTOCOL_VERSION_1_2,
83 	PCI_PROTOCOL_VERSION_1_1,
84 };
85 
86 #define PCI_CONFIG_MMIO_LENGTH	0x2000
87 #define CFG_PAGE_OFFSET 0x1000
88 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
89 
90 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
91 
92 #define STATUS_REVISION_MISMATCH 0xC0000059
93 
94 /* space for 32bit serial number as string */
95 #define SLOT_NAME_SIZE 11
96 
97 /*
98  * Size of requestor for VMbus; the value is based on the observation
99  * that having more than one request outstanding is 'rare', and so 64
100  * should be generous in ensuring that we don't ever run out.
101  */
102 #define HV_PCI_RQSTOR_SIZE 64
103 
104 /*
105  * Message Types
106  */
107 
108 enum pci_message_type {
109 	/*
110 	 * Version 1.1
111 	 */
112 	PCI_MESSAGE_BASE                = 0x42490000,
113 	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
114 	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
115 	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
116 	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
117 	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
118 	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
119 	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
120 	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
121 	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
122 	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
123 	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
124 	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
125 	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
126 	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
127 	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
128 	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
129 	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
130 	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
131 	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
132 	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
133 	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
134 	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
135 	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
136 	PCI_BUS_RELATIONS2		= PCI_MESSAGE_BASE + 0x19,
137 	PCI_RESOURCES_ASSIGNED3         = PCI_MESSAGE_BASE + 0x1A,
138 	PCI_CREATE_INTERRUPT_MESSAGE3   = PCI_MESSAGE_BASE + 0x1B,
139 	PCI_MESSAGE_MAXIMUM
140 };
141 
142 /*
143  * Structures defining the virtual PCI Express protocol.
144  */
145 
146 union pci_version {
147 	struct {
148 		u16 minor_version;
149 		u16 major_version;
150 	} parts;
151 	u32 version;
152 } __packed;
153 
154 /*
155  * Function numbers are 8-bits wide on Express, as interpreted through ARI,
156  * which is all this driver does.  This representation is the one used in
157  * Windows, which is what is expected when sending this back and forth with
158  * the Hyper-V parent partition.
159  */
160 union win_slot_encoding {
161 	struct {
162 		u32	dev:5;
163 		u32	func:3;
164 		u32	reserved:24;
165 	} bits;
166 	u32 slot;
167 } __packed;
168 
169 /*
170  * Pretty much as defined in the PCI Specifications.
171  */
172 struct pci_function_description {
173 	u16	v_id;	/* vendor ID */
174 	u16	d_id;	/* device ID */
175 	u8	rev;
176 	u8	prog_intf;
177 	u8	subclass;
178 	u8	base_class;
179 	u32	subsystem_id;
180 	union win_slot_encoding win_slot;
181 	u32	ser;	/* serial number */
182 } __packed;
183 
184 enum pci_device_description_flags {
185 	HV_PCI_DEVICE_FLAG_NONE			= 0x0,
186 	HV_PCI_DEVICE_FLAG_NUMA_AFFINITY	= 0x1,
187 };
188 
189 struct pci_function_description2 {
190 	u16	v_id;	/* vendor ID */
191 	u16	d_id;	/* device ID */
192 	u8	rev;
193 	u8	prog_intf;
194 	u8	subclass;
195 	u8	base_class;
196 	u32	subsystem_id;
197 	union	win_slot_encoding win_slot;
198 	u32	ser;	/* serial number */
199 	u32	flags;
200 	u16	virtual_numa_node;
201 	u16	reserved;
202 } __packed;
203 
204 /**
205  * struct hv_msi_desc
206  * @vector:		IDT entry
207  * @delivery_mode:	As defined in Intel's Programmer's
208  *			Reference Manual, Volume 3, Chapter 8.
209  * @vector_count:	Number of contiguous entries in the
210  *			Interrupt Descriptor Table that are
211  *			occupied by this Message-Signaled
212  *			Interrupt. For "MSI", as first defined
213  *			in PCI 2.2, this can be between 1 and
214  *			32. For "MSI-X," as first defined in PCI
215  *			3.0, this must be 1, as each MSI-X table
216  *			entry would have its own descriptor.
217  * @reserved:		Empty space
218  * @cpu_mask:		All the target virtual processors.
219  */
220 struct hv_msi_desc {
221 	u8	vector;
222 	u8	delivery_mode;
223 	u16	vector_count;
224 	u32	reserved;
225 	u64	cpu_mask;
226 } __packed;
227 
228 /**
229  * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
230  * @vector:		IDT entry
231  * @delivery_mode:	As defined in Intel's Programmer's
232  *			Reference Manual, Volume 3, Chapter 8.
233  * @vector_count:	Number of contiguous entries in the
234  *			Interrupt Descriptor Table that are
235  *			occupied by this Message-Signaled
236  *			Interrupt. For "MSI", as first defined
237  *			in PCI 2.2, this can be between 1 and
238  *			32. For "MSI-X," as first defined in PCI
239  *			3.0, this must be 1, as each MSI-X table
240  *			entry would have its own descriptor.
241  * @processor_count:	number of bits enabled in array.
242  * @processor_array:	All the target virtual processors.
243  */
244 struct hv_msi_desc2 {
245 	u8	vector;
246 	u8	delivery_mode;
247 	u16	vector_count;
248 	u16	processor_count;
249 	u16	processor_array[32];
250 } __packed;
251 
252 /*
253  * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
254  *	Everything is the same as in 'hv_msi_desc2' except that the size of the
255  *	'vector' field is larger to support bigger vector values. For ex: LPI
256  *	vectors on ARM.
257  */
258 struct hv_msi_desc3 {
259 	u32	vector;
260 	u8	delivery_mode;
261 	u8	reserved;
262 	u16	vector_count;
263 	u16	processor_count;
264 	u16	processor_array[32];
265 } __packed;
266 
267 /**
268  * struct tran_int_desc
269  * @reserved:		unused, padding
270  * @vector_count:	same as in hv_msi_desc
271  * @data:		This is the "data payload" value that is
272  *			written by the device when it generates
273  *			a message-signaled interrupt, either MSI
274  *			or MSI-X.
275  * @address:		This is the address to which the data
276  *			payload is written on interrupt
277  *			generation.
278  */
279 struct tran_int_desc {
280 	u16	reserved;
281 	u16	vector_count;
282 	u32	data;
283 	u64	address;
284 } __packed;
285 
286 /*
287  * A generic message format for virtual PCI.
288  * Specific message formats are defined later in the file.
289  */
290 
291 struct pci_message {
292 	u32 type;
293 } __packed;
294 
295 struct pci_child_message {
296 	struct pci_message message_type;
297 	union win_slot_encoding wslot;
298 } __packed;
299 
300 struct pci_incoming_message {
301 	struct vmpacket_descriptor hdr;
302 	struct pci_message message_type;
303 } __packed;
304 
305 struct pci_response {
306 	struct vmpacket_descriptor hdr;
307 	s32 status;			/* negative values are failures */
308 } __packed;
309 
310 struct pci_packet {
311 	void (*completion_func)(void *context, struct pci_response *resp,
312 				int resp_packet_size);
313 	void *compl_ctxt;
314 };
315 
316 /*
317  * Specific message types supporting the PCI protocol.
318  */
319 
320 /*
321  * Version negotiation message. Sent from the guest to the host.
322  * The guest is free to try different versions until the host
323  * accepts the version.
324  *
325  * pci_version: The protocol version requested.
326  * is_last_attempt: If TRUE, this is the last version guest will request.
327  * reservedz: Reserved field, set to zero.
328  */
329 
330 struct pci_version_request {
331 	struct pci_message message_type;
332 	u32 protocol_version;
333 } __packed;
334 
335 /*
336  * Bus D0 Entry.  This is sent from the guest to the host when the virtual
337  * bus (PCI Express port) is ready for action.
338  */
339 
340 struct pci_bus_d0_entry {
341 	struct pci_message message_type;
342 	u32 reserved;
343 	u64 mmio_base;
344 } __packed;
345 
346 struct pci_bus_relations {
347 	struct pci_incoming_message incoming;
348 	u32 device_count;
349 	struct pci_function_description func[];
350 } __packed;
351 
352 struct pci_bus_relations2 {
353 	struct pci_incoming_message incoming;
354 	u32 device_count;
355 	struct pci_function_description2 func[];
356 } __packed;
357 
358 struct pci_q_res_req_response {
359 	struct vmpacket_descriptor hdr;
360 	s32 status;			/* negative values are failures */
361 	u32 probed_bar[PCI_STD_NUM_BARS];
362 } __packed;
363 
364 struct pci_set_power {
365 	struct pci_message message_type;
366 	union win_slot_encoding wslot;
367 	u32 power_state;		/* In Windows terms */
368 	u32 reserved;
369 } __packed;
370 
371 struct pci_set_power_response {
372 	struct vmpacket_descriptor hdr;
373 	s32 status;			/* negative values are failures */
374 	union win_slot_encoding wslot;
375 	u32 resultant_state;		/* In Windows terms */
376 	u32 reserved;
377 } __packed;
378 
379 struct pci_resources_assigned {
380 	struct pci_message message_type;
381 	union win_slot_encoding wslot;
382 	u8 memory_range[0x14][6];	/* not used here */
383 	u32 msi_descriptors;
384 	u32 reserved[4];
385 } __packed;
386 
387 struct pci_resources_assigned2 {
388 	struct pci_message message_type;
389 	union win_slot_encoding wslot;
390 	u8 memory_range[0x14][6];	/* not used here */
391 	u32 msi_descriptor_count;
392 	u8 reserved[70];
393 } __packed;
394 
395 struct pci_create_interrupt {
396 	struct pci_message message_type;
397 	union win_slot_encoding wslot;
398 	struct hv_msi_desc int_desc;
399 } __packed;
400 
401 struct pci_create_int_response {
402 	struct pci_response response;
403 	u32 reserved;
404 	struct tran_int_desc int_desc;
405 } __packed;
406 
407 struct pci_create_interrupt2 {
408 	struct pci_message message_type;
409 	union win_slot_encoding wslot;
410 	struct hv_msi_desc2 int_desc;
411 } __packed;
412 
413 struct pci_create_interrupt3 {
414 	struct pci_message message_type;
415 	union win_slot_encoding wslot;
416 	struct hv_msi_desc3 int_desc;
417 } __packed;
418 
419 struct pci_delete_interrupt {
420 	struct pci_message message_type;
421 	union win_slot_encoding wslot;
422 	struct tran_int_desc int_desc;
423 } __packed;
424 
425 /*
426  * Note: the VM must pass a valid block id, wslot and bytes_requested.
427  */
428 struct pci_read_block {
429 	struct pci_message message_type;
430 	u32 block_id;
431 	union win_slot_encoding wslot;
432 	u32 bytes_requested;
433 } __packed;
434 
435 struct pci_read_block_response {
436 	struct vmpacket_descriptor hdr;
437 	u32 status;
438 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
439 } __packed;
440 
441 /*
442  * Note: the VM must pass a valid block id, wslot and byte_count.
443  */
444 struct pci_write_block {
445 	struct pci_message message_type;
446 	u32 block_id;
447 	union win_slot_encoding wslot;
448 	u32 byte_count;
449 	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
450 } __packed;
451 
452 struct pci_dev_inval_block {
453 	struct pci_incoming_message incoming;
454 	union win_slot_encoding wslot;
455 	u64 block_mask;
456 } __packed;
457 
458 struct pci_dev_incoming {
459 	struct pci_incoming_message incoming;
460 	union win_slot_encoding wslot;
461 } __packed;
462 
463 struct pci_eject_response {
464 	struct pci_message message_type;
465 	union win_slot_encoding wslot;
466 	u32 status;
467 } __packed;
468 
469 static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
470 
471 /*
472  * Driver specific state.
473  */
474 
475 enum hv_pcibus_state {
476 	hv_pcibus_init = 0,
477 	hv_pcibus_probed,
478 	hv_pcibus_installed,
479 	hv_pcibus_removing,
480 	hv_pcibus_maximum
481 };
482 
483 struct hv_pcibus_device {
484 #ifdef CONFIG_X86
485 	struct pci_sysdata sysdata;
486 #elif defined(CONFIG_ARM64)
487 	struct pci_config_window sysdata;
488 #endif
489 	struct pci_host_bridge *bridge;
490 	struct fwnode_handle *fwnode;
491 	/* Protocol version negotiated with the host */
492 	enum pci_protocol_version_t protocol_version;
493 
494 	struct mutex state_lock;
495 	enum hv_pcibus_state state;
496 
497 	struct hv_device *hdev;
498 	resource_size_t low_mmio_space;
499 	resource_size_t high_mmio_space;
500 	struct resource *mem_config;
501 	struct resource *low_mmio_res;
502 	struct resource *high_mmio_res;
503 	struct completion *survey_event;
504 	spinlock_t config_lock;	/* Avoid two threads writing index page */
505 	spinlock_t device_list_lock;	/* Protect lists below */
506 	void __iomem *cfg_addr;
507 
508 	struct list_head children;
509 	struct list_head dr_list;
510 
511 	struct irq_domain *irq_domain;
512 
513 	struct workqueue_struct *wq;
514 
515 	/* Highest slot of child device with resources allocated */
516 	int wslot_res_allocated;
517 	bool use_calls; /* Use hypercalls to access mmio cfg space */
518 };
519 
520 /*
521  * Tracks "Device Relations" messages from the host, which must be both
522  * processed in order and deferred so that they don't run in the context
523  * of the incoming packet callback.
524  */
525 struct hv_dr_work {
526 	struct work_struct wrk;
527 	struct hv_pcibus_device *bus;
528 };
529 
530 struct hv_pcidev_description {
531 	u16	v_id;	/* vendor ID */
532 	u16	d_id;	/* device ID */
533 	u8	rev;
534 	u8	prog_intf;
535 	u8	subclass;
536 	u8	base_class;
537 	u32	subsystem_id;
538 	union	win_slot_encoding win_slot;
539 	u32	ser;	/* serial number */
540 	u32	flags;
541 	u16	virtual_numa_node;
542 };
543 
544 struct hv_dr_state {
545 	struct list_head list_entry;
546 	u32 device_count;
547 	struct hv_pcidev_description func[] __counted_by(device_count);
548 };
549 
550 struct hv_pci_dev {
551 	/* List protected by pci_rescan_remove_lock */
552 	struct list_head list_entry;
553 	refcount_t refs;
554 	struct pci_slot *pci_slot;
555 	struct hv_pcidev_description desc;
556 	bool reported_missing;
557 	struct hv_pcibus_device *hbus;
558 	struct work_struct wrk;
559 
560 	void (*block_invalidate)(void *context, u64 block_mask);
561 	void *invalidate_context;
562 
563 	/*
564 	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
565 	 * read it back, for each of the BAR offsets within config space.
566 	 */
567 	u32 probed_bar[PCI_STD_NUM_BARS];
568 };
569 
570 struct hv_pci_compl {
571 	struct completion host_event;
572 	s32 completion_status;
573 };
574 
575 static void hv_pci_onchannelcallback(void *context);
576 
577 #ifdef CONFIG_X86
578 #define DELIVERY_MODE		APIC_DELIVERY_MODE_FIXED
579 #define HV_MSI_CHIP_FLAGS	MSI_CHIP_FLAG_SET_ACK
580 
hv_pci_irqchip_init(void)581 static int hv_pci_irqchip_init(void)
582 {
583 	return 0;
584 }
585 
hv_pci_get_root_domain(void)586 static struct irq_domain *hv_pci_get_root_domain(void)
587 {
588 	return x86_vector_domain;
589 }
590 
hv_msi_get_int_vector(struct irq_data * data)591 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
592 {
593 	struct irq_cfg *cfg = irqd_cfg(data);
594 
595 	return cfg->vector;
596 }
597 
598 #define hv_msi_prepare		pci_msi_prepare
599 
600 /**
601  * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
602  * affinity.
603  * @data:	Describes the IRQ
604  *
605  * Build new a destination for the MSI and make a hypercall to
606  * update the Interrupt Redirection Table. "Device Logical ID"
607  * is built out of this PCI bus's instance GUID and the function
608  * number of the device.
609  */
hv_irq_retarget_interrupt(struct irq_data * data)610 static void hv_irq_retarget_interrupt(struct irq_data *data)
611 {
612 	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
613 	struct hv_retarget_device_interrupt *params;
614 	struct tran_int_desc *int_desc;
615 	struct hv_pcibus_device *hbus;
616 	const struct cpumask *dest;
617 	cpumask_var_t tmp;
618 	struct pci_bus *pbus;
619 	struct pci_dev *pdev;
620 	unsigned long flags;
621 	u32 var_size = 0;
622 	int cpu, nr_bank;
623 	u64 res;
624 
625 	dest = irq_data_get_effective_affinity_mask(data);
626 	pdev = msi_desc_to_pci_dev(msi_desc);
627 	pbus = pdev->bus;
628 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
629 	int_desc = data->chip_data;
630 	if (!int_desc) {
631 		dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
632 			 __func__, data->irq);
633 		return;
634 	}
635 
636 	local_irq_save(flags);
637 
638 	params = *this_cpu_ptr(hyperv_pcpu_input_arg);
639 	memset(params, 0, sizeof(*params));
640 	params->partition_id = HV_PARTITION_ID_SELF;
641 	params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
642 	params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
643 	params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
644 	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
645 			   (hbus->hdev->dev_instance.b[4] << 16) |
646 			   (hbus->hdev->dev_instance.b[7] << 8) |
647 			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
648 			   PCI_FUNC(pdev->devfn);
649 	params->int_target.vector = hv_msi_get_int_vector(data);
650 
651 	if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
652 		/*
653 		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
654 		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
655 		 * with >64 VP support.
656 		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
657 		 * is not sufficient for this hypercall.
658 		 */
659 		params->int_target.flags |=
660 			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
661 
662 		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
663 			res = 1;
664 			goto out;
665 		}
666 
667 		cpumask_and(tmp, dest, cpu_online_mask);
668 		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
669 		free_cpumask_var(tmp);
670 
671 		if (nr_bank <= 0) {
672 			res = 1;
673 			goto out;
674 		}
675 
676 		/*
677 		 * var-sized hypercall, var-size starts after vp_mask (thus
678 		 * vp_set.format does not count, but vp_set.valid_bank_mask
679 		 * does).
680 		 */
681 		var_size = 1 + nr_bank;
682 	} else {
683 		for_each_cpu_and(cpu, dest, cpu_online_mask) {
684 			params->int_target.vp_mask |=
685 				(1ULL << hv_cpu_number_to_vp_number(cpu));
686 		}
687 	}
688 
689 	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
690 			      params, NULL);
691 
692 out:
693 	local_irq_restore(flags);
694 
695 	/*
696 	 * During hibernation, when a CPU is offlined, the kernel tries
697 	 * to move the interrupt to the remaining CPUs that haven't
698 	 * been offlined yet. In this case, the below hv_do_hypercall()
699 	 * always fails since the vmbus channel has been closed:
700 	 * refer to cpu_disable_common() -> fixup_irqs() ->
701 	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
702 	 *
703 	 * Suppress the error message for hibernation because the failure
704 	 * during hibernation does not matter (at this time all the devices
705 	 * have been frozen). Note: the correct affinity info is still updated
706 	 * into the irqdata data structure in migrate_one_irq() ->
707 	 * irq_do_set_affinity(), so later when the VM resumes,
708 	 * hv_pci_restore_msi_state() is able to correctly restore the
709 	 * interrupt with the correct affinity.
710 	 */
711 	if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
712 		dev_err(&hbus->hdev->device,
713 			"%s() failed: %#llx", __func__, res);
714 }
715 
hv_arch_irq_unmask(struct irq_data * data)716 static void hv_arch_irq_unmask(struct irq_data *data)
717 {
718 	if (hv_root_partition())
719 		/*
720 		 * In case of the nested root partition, the nested hypervisor
721 		 * is taking care of interrupt remapping and thus the
722 		 * MAP_DEVICE_INTERRUPT hypercall is required instead of
723 		 * RETARGET_INTERRUPT.
724 		 */
725 		(void)hv_map_msi_interrupt(data, NULL);
726 	else
727 		hv_irq_retarget_interrupt(data);
728 }
729 #elif defined(CONFIG_ARM64)
730 /*
731  * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
732  * of room at the start to allow for SPIs to be specified through ACPI and
733  * starting with a power of two to satisfy power of 2 multi-MSI requirement.
734  */
735 #define HV_PCI_MSI_SPI_START	64
736 #define HV_PCI_MSI_SPI_NR	(1020 - HV_PCI_MSI_SPI_START)
737 #define DELIVERY_MODE		0
738 #define HV_MSI_CHIP_FLAGS	MSI_CHIP_FLAG_SET_EOI
739 #define hv_msi_prepare		NULL
740 
741 struct hv_pci_chip_data {
742 	DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
743 	struct mutex	map_lock;
744 };
745 
746 /* Hyper-V vPCI MSI GIC IRQ domain */
747 static struct irq_domain *hv_msi_gic_irq_domain;
748 
749 /* Hyper-V PCI MSI IRQ chip */
750 static struct irq_chip hv_arm64_msi_irq_chip = {
751 	.name = "MSI",
752 	.irq_set_affinity = irq_chip_set_affinity_parent,
753 	.irq_eoi = irq_chip_eoi_parent,
754 	.irq_mask = irq_chip_mask_parent,
755 	.irq_unmask = irq_chip_unmask_parent
756 };
757 
hv_msi_get_int_vector(struct irq_data * irqd)758 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
759 {
760 	return irqd->parent_data->hwirq;
761 }
762 
763 /*
764  * @nr_bm_irqs:		Indicates the number of IRQs that were allocated from
765  *			the bitmap.
766  * @nr_dom_irqs:	Indicates the number of IRQs that were allocated from
767  *			the parent domain.
768  */
hv_pci_vec_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_bm_irqs,unsigned int nr_dom_irqs)769 static void hv_pci_vec_irq_free(struct irq_domain *domain,
770 				unsigned int virq,
771 				unsigned int nr_bm_irqs,
772 				unsigned int nr_dom_irqs)
773 {
774 	struct hv_pci_chip_data *chip_data = domain->host_data;
775 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
776 	int first = d->hwirq - HV_PCI_MSI_SPI_START;
777 	int i;
778 
779 	mutex_lock(&chip_data->map_lock);
780 	bitmap_release_region(chip_data->spi_map,
781 			      first,
782 			      get_count_order(nr_bm_irqs));
783 	mutex_unlock(&chip_data->map_lock);
784 	for (i = 0; i < nr_dom_irqs; i++) {
785 		if (i)
786 			d = irq_domain_get_irq_data(domain, virq + i);
787 		irq_domain_reset_irq_data(d);
788 	}
789 
790 	irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
791 }
792 
hv_pci_vec_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)793 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
794 				       unsigned int virq,
795 				       unsigned int nr_irqs)
796 {
797 	hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
798 }
799 
hv_pci_vec_alloc_device_irq(struct irq_domain * domain,unsigned int nr_irqs,irq_hw_number_t * hwirq)800 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
801 				       unsigned int nr_irqs,
802 				       irq_hw_number_t *hwirq)
803 {
804 	struct hv_pci_chip_data *chip_data = domain->host_data;
805 	int index;
806 
807 	/* Find and allocate region from the SPI bitmap */
808 	mutex_lock(&chip_data->map_lock);
809 	index = bitmap_find_free_region(chip_data->spi_map,
810 					HV_PCI_MSI_SPI_NR,
811 					get_count_order(nr_irqs));
812 	mutex_unlock(&chip_data->map_lock);
813 	if (index < 0)
814 		return -ENOSPC;
815 
816 	*hwirq = index + HV_PCI_MSI_SPI_START;
817 
818 	return 0;
819 }
820 
hv_pci_vec_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)821 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
822 					   unsigned int virq,
823 					   irq_hw_number_t hwirq)
824 {
825 	struct irq_fwspec fwspec;
826 	struct irq_data *d;
827 	int ret;
828 
829 	fwspec.fwnode = domain->parent->fwnode;
830 	if (is_of_node(fwspec.fwnode)) {
831 		/* SPI lines for OF translations start at offset 32 */
832 		fwspec.param_count = 3;
833 		fwspec.param[0] = 0;
834 		fwspec.param[1] = hwirq - 32;
835 		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
836 	} else {
837 		fwspec.param_count = 2;
838 		fwspec.param[0] = hwirq;
839 		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
840 	}
841 
842 	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
843 	if (ret)
844 		return ret;
845 
846 	/*
847 	 * Since the interrupt specifier is not coming from ACPI or DT, the
848 	 * trigger type will need to be set explicitly. Otherwise, it will be
849 	 * set to whatever is in the GIC configuration.
850 	 */
851 	d = irq_domain_get_irq_data(domain->parent, virq);
852 
853 	return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
854 }
855 
hv_pci_vec_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)856 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
857 				       unsigned int virq, unsigned int nr_irqs,
858 				       void *args)
859 {
860 	irq_hw_number_t hwirq;
861 	unsigned int i;
862 	int ret;
863 
864 	ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
865 	if (ret)
866 		return ret;
867 
868 	for (i = 0; i < nr_irqs; i++) {
869 		ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
870 						      hwirq + i);
871 		if (ret) {
872 			hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
873 			return ret;
874 		}
875 
876 		irq_domain_set_hwirq_and_chip(domain, virq + i,
877 					      hwirq + i,
878 					      &hv_arm64_msi_irq_chip,
879 					      domain->host_data);
880 		pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
881 	}
882 
883 	return 0;
884 }
885 
886 /*
887  * Pick the first cpu as the irq affinity that can be temporarily used for
888  * composing MSI from the hypervisor. GIC will eventually set the right
889  * affinity for the irq and the 'unmask' will retarget the interrupt to that
890  * cpu.
891  */
hv_pci_vec_irq_domain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)892 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
893 					  struct irq_data *irqd, bool reserve)
894 {
895 	int cpu = cpumask_first(cpu_present_mask);
896 
897 	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
898 
899 	return 0;
900 }
901 
902 static const struct irq_domain_ops hv_pci_domain_ops = {
903 	.alloc	= hv_pci_vec_irq_domain_alloc,
904 	.free	= hv_pci_vec_irq_domain_free,
905 	.activate = hv_pci_vec_irq_domain_activate,
906 };
907 
908 #ifdef CONFIG_OF
909 
hv_pci_of_irq_domain_parent(void)910 static struct irq_domain *hv_pci_of_irq_domain_parent(void)
911 {
912 	struct device_node *parent;
913 	struct irq_domain *domain;
914 
915 	parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
916 	if (!parent)
917 		return NULL;
918 	domain = irq_find_host(parent);
919 	of_node_put(parent);
920 
921 	return domain;
922 }
923 
924 #endif
925 
926 #ifdef CONFIG_ACPI
927 
hv_pci_acpi_irq_domain_parent(void)928 static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
929 {
930 	acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
931 
932 	gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
933 	if (!gsi_domain_disp_fn)
934 		return NULL;
935 	return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
936 				     DOMAIN_BUS_ANY);
937 }
938 
939 #endif
940 
hv_pci_irqchip_init(void)941 static int hv_pci_irqchip_init(void)
942 {
943 	static struct hv_pci_chip_data *chip_data;
944 	struct fwnode_handle *fn = NULL;
945 	struct irq_domain *irq_domain_parent = NULL;
946 	int ret = -ENOMEM;
947 
948 	chip_data = kzalloc_obj(*chip_data);
949 	if (!chip_data)
950 		return ret;
951 
952 	mutex_init(&chip_data->map_lock);
953 	fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
954 	if (!fn)
955 		goto free_chip;
956 
957 	/*
958 	 * IRQ domain once enabled, should not be removed since there is no
959 	 * way to ensure that all the corresponding devices are also gone and
960 	 * no interrupts will be generated.
961 	 */
962 #ifdef CONFIG_ACPI
963 	if (!acpi_disabled)
964 		irq_domain_parent = hv_pci_acpi_irq_domain_parent();
965 #endif
966 #ifdef CONFIG_OF
967 	if (!irq_domain_parent)
968 		irq_domain_parent = hv_pci_of_irq_domain_parent();
969 #endif
970 	if (!irq_domain_parent) {
971 		WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
972 		ret = -EINVAL;
973 		goto free_chip;
974 	}
975 
976 	hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
977 		HV_PCI_MSI_SPI_NR,
978 		fn, &hv_pci_domain_ops,
979 		chip_data);
980 
981 	if (!hv_msi_gic_irq_domain) {
982 		pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
983 		goto free_chip;
984 	}
985 
986 	return 0;
987 
988 free_chip:
989 	kfree(chip_data);
990 	if (fn)
991 		irq_domain_free_fwnode(fn);
992 
993 	return ret;
994 }
995 
hv_pci_get_root_domain(void)996 static struct irq_domain *hv_pci_get_root_domain(void)
997 {
998 	return hv_msi_gic_irq_domain;
999 }
1000 
1001 /*
1002  * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
1003  * registers which Hyper-V already supports, so no hypercall needed.
1004  */
hv_arch_irq_unmask(struct irq_data * data)1005 static void hv_arch_irq_unmask(struct irq_data *data) { }
1006 #endif /* CONFIG_ARM64 */
1007 
1008 /**
1009  * hv_pci_generic_compl() - Invoked for a completion packet
1010  * @context:		Set up by the sender of the packet.
1011  * @resp:		The response packet
1012  * @resp_packet_size:	Size in bytes of the packet
1013  *
1014  * This function is used to trigger an event and report status
1015  * for any message for which the completion packet contains a
1016  * status and nothing else.
1017  */
hv_pci_generic_compl(void * context,struct pci_response * resp,int resp_packet_size)1018 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
1019 				 int resp_packet_size)
1020 {
1021 	struct hv_pci_compl *comp_pkt = context;
1022 
1023 	comp_pkt->completion_status = resp->status;
1024 	complete(&comp_pkt->host_event);
1025 }
1026 
1027 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1028 						u32 wslot);
1029 
get_pcichild(struct hv_pci_dev * hpdev)1030 static void get_pcichild(struct hv_pci_dev *hpdev)
1031 {
1032 	refcount_inc(&hpdev->refs);
1033 }
1034 
put_pcichild(struct hv_pci_dev * hpdev)1035 static void put_pcichild(struct hv_pci_dev *hpdev)
1036 {
1037 	if (refcount_dec_and_test(&hpdev->refs))
1038 		kfree(hpdev);
1039 }
1040 
1041 /*
1042  * There is no good way to get notified from vmbus_onoffer_rescind(),
1043  * so let's use polling here, since this is not a hot path.
1044  */
wait_for_response(struct hv_device * hdev,struct completion * comp)1045 static int wait_for_response(struct hv_device *hdev,
1046 			     struct completion *comp)
1047 {
1048 	while (true) {
1049 		if (hdev->channel->rescind) {
1050 			dev_warn_once(&hdev->device, "The device is gone.\n");
1051 			return -ENODEV;
1052 		}
1053 
1054 		if (wait_for_completion_timeout(comp, HZ / 10))
1055 			break;
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
1063  * @devfn:	The Linux representation of PCI slot
1064  *
1065  * Windows uses a slightly different representation of PCI slot.
1066  *
1067  * Return: The Windows representation
1068  */
devfn_to_wslot(int devfn)1069 static u32 devfn_to_wslot(int devfn)
1070 {
1071 	union win_slot_encoding wslot;
1072 
1073 	wslot.slot = 0;
1074 	wslot.bits.dev = PCI_SLOT(devfn);
1075 	wslot.bits.func = PCI_FUNC(devfn);
1076 
1077 	return wslot.slot;
1078 }
1079 
1080 /**
1081  * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1082  * @wslot:	The Windows representation of PCI slot
1083  *
1084  * Windows uses a slightly different representation of PCI slot.
1085  *
1086  * Return: The Linux representation
1087  */
wslot_to_devfn(u32 wslot)1088 static int wslot_to_devfn(u32 wslot)
1089 {
1090 	union win_slot_encoding slot_no;
1091 
1092 	slot_no.slot = wslot;
1093 	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1094 }
1095 
hv_pci_read_mmio(struct device * dev,phys_addr_t gpa,int size,u32 * val)1096 static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1097 {
1098 	struct hv_mmio_read_input *in;
1099 	struct hv_mmio_read_output *out;
1100 	u64 ret;
1101 
1102 	/*
1103 	 * Must be called with interrupts disabled so it is safe
1104 	 * to use the per-cpu input argument page.  Use it for
1105 	 * both input and output.
1106 	 */
1107 	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1108 	out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1109 	in->gpa = gpa;
1110 	in->size = size;
1111 
1112 	ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1113 	if (hv_result_success(ret)) {
1114 		switch (size) {
1115 		case 1:
1116 			*val = *(u8 *)(out->data);
1117 			break;
1118 		case 2:
1119 			*val = *(u16 *)(out->data);
1120 			break;
1121 		default:
1122 			*val = *(u32 *)(out->data);
1123 			break;
1124 		}
1125 	} else
1126 		dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1127 				ret, gpa, size);
1128 }
1129 
hv_pci_write_mmio(struct device * dev,phys_addr_t gpa,int size,u32 val)1130 static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1131 {
1132 	struct hv_mmio_write_input *in;
1133 	u64 ret;
1134 
1135 	/*
1136 	 * Must be called with interrupts disabled so it is safe
1137 	 * to use the per-cpu input argument memory.
1138 	 */
1139 	in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1140 	in->gpa = gpa;
1141 	in->size = size;
1142 	switch (size) {
1143 	case 1:
1144 		*(u8 *)(in->data) = val;
1145 		break;
1146 	case 2:
1147 		*(u16 *)(in->data) = val;
1148 		break;
1149 	default:
1150 		*(u32 *)(in->data) = val;
1151 		break;
1152 	}
1153 
1154 	ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1155 	if (!hv_result_success(ret))
1156 		dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1157 				ret, gpa, size);
1158 }
1159 
1160 /*
1161  * PCI Configuration Space for these root PCI buses is implemented as a pair
1162  * of pages in memory-mapped I/O space.  Writing to the first page chooses
1163  * the PCI function being written or read.  Once the first page has been
1164  * written to, the following page maps in the entire configuration space of
1165  * the function.
1166  */
1167 
1168 /**
1169  * _hv_pcifront_read_config() - Internal PCI config read
1170  * @hpdev:	The PCI driver's representation of the device
1171  * @where:	Offset within config space
1172  * @size:	Size of the transfer
1173  * @val:	Pointer to the buffer receiving the data
1174  */
_hv_pcifront_read_config(struct hv_pci_dev * hpdev,int where,int size,u32 * val)1175 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1176 				     int size, u32 *val)
1177 {
1178 	struct hv_pcibus_device *hbus = hpdev->hbus;
1179 	struct device *dev = &hbus->hdev->device;
1180 	int offset = where + CFG_PAGE_OFFSET;
1181 	unsigned long flags;
1182 
1183 	/*
1184 	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1185 	 */
1186 	if (where + size <= PCI_COMMAND) {
1187 		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1188 	} else if (where >= PCI_CLASS_REVISION && where + size <=
1189 		   PCI_CACHE_LINE_SIZE) {
1190 		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1191 		       PCI_CLASS_REVISION, size);
1192 	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1193 		   PCI_ROM_ADDRESS) {
1194 		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1195 		       PCI_SUBSYSTEM_VENDOR_ID, size);
1196 	} else if (where >= PCI_ROM_ADDRESS && where + size <=
1197 		   PCI_CAPABILITY_LIST) {
1198 		/* ROM BARs are unimplemented */
1199 		*val = 0;
1200 	} else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
1201 		   (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
1202 		/*
1203 		 * Interrupt Line and Interrupt PIN are hard-wired to zero
1204 		 * because this front-end only supports message-signaled
1205 		 * interrupts.
1206 		 */
1207 		*val = 0;
1208 	} else if (where + size <= CFG_PAGE_SIZE) {
1209 
1210 		spin_lock_irqsave(&hbus->config_lock, flags);
1211 		if (hbus->use_calls) {
1212 			phys_addr_t addr = hbus->mem_config->start + offset;
1213 
1214 			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1215 						hpdev->desc.win_slot.slot);
1216 			hv_pci_read_mmio(dev, addr, size, val);
1217 		} else {
1218 			void __iomem *addr = hbus->cfg_addr + offset;
1219 
1220 			/* Choose the function to be read. (See comment above) */
1221 			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1222 			/* Make sure the function was chosen before reading. */
1223 			mb();
1224 			/* Read from that function's config space. */
1225 			switch (size) {
1226 			case 1:
1227 				*val = readb(addr);
1228 				break;
1229 			case 2:
1230 				*val = readw(addr);
1231 				break;
1232 			default:
1233 				*val = readl(addr);
1234 				break;
1235 			}
1236 			/*
1237 			 * Make sure the read was done before we release the
1238 			 * spinlock allowing consecutive reads/writes.
1239 			 */
1240 			mb();
1241 		}
1242 		spin_unlock_irqrestore(&hbus->config_lock, flags);
1243 	} else {
1244 		dev_err(dev, "Attempt to read beyond a function's config space.\n");
1245 	}
1246 }
1247 
hv_pcifront_get_vendor_id(struct hv_pci_dev * hpdev)1248 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1249 {
1250 	struct hv_pcibus_device *hbus = hpdev->hbus;
1251 	struct device *dev = &hbus->hdev->device;
1252 	u32 val;
1253 	u16 ret;
1254 	unsigned long flags;
1255 
1256 	spin_lock_irqsave(&hbus->config_lock, flags);
1257 
1258 	if (hbus->use_calls) {
1259 		phys_addr_t addr = hbus->mem_config->start +
1260 					 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1261 
1262 		hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1263 					hpdev->desc.win_slot.slot);
1264 		hv_pci_read_mmio(dev, addr, 2, &val);
1265 		ret = val;  /* Truncates to 16 bits */
1266 	} else {
1267 		void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1268 					     PCI_VENDOR_ID;
1269 		/* Choose the function to be read. (See comment above) */
1270 		writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1271 		/* Make sure the function was chosen before we start reading. */
1272 		mb();
1273 		/* Read from that function's config space. */
1274 		ret = readw(addr);
1275 		/*
1276 		 * mb() is not required here, because the
1277 		 * spin_unlock_irqrestore() is a barrier.
1278 		 */
1279 	}
1280 
1281 	spin_unlock_irqrestore(&hbus->config_lock, flags);
1282 
1283 	return ret;
1284 }
1285 
1286 /**
1287  * _hv_pcifront_write_config() - Internal PCI config write
1288  * @hpdev:	The PCI driver's representation of the device
1289  * @where:	Offset within config space
1290  * @size:	Size of the transfer
1291  * @val:	The data being transferred
1292  */
_hv_pcifront_write_config(struct hv_pci_dev * hpdev,int where,int size,u32 val)1293 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1294 				      int size, u32 val)
1295 {
1296 	struct hv_pcibus_device *hbus = hpdev->hbus;
1297 	struct device *dev = &hbus->hdev->device;
1298 	int offset = where + CFG_PAGE_OFFSET;
1299 	unsigned long flags;
1300 
1301 	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1302 	    where + size <= PCI_CAPABILITY_LIST) {
1303 		/* SSIDs and ROM BARs are read-only */
1304 	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1305 		spin_lock_irqsave(&hbus->config_lock, flags);
1306 
1307 		if (hbus->use_calls) {
1308 			phys_addr_t addr = hbus->mem_config->start + offset;
1309 
1310 			hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1311 						hpdev->desc.win_slot.slot);
1312 			hv_pci_write_mmio(dev, addr, size, val);
1313 		} else {
1314 			void __iomem *addr = hbus->cfg_addr + offset;
1315 
1316 			/* Choose the function to write. (See comment above) */
1317 			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1318 			/* Make sure the function was chosen before writing. */
1319 			wmb();
1320 			/* Write to that function's config space. */
1321 			switch (size) {
1322 			case 1:
1323 				writeb(val, addr);
1324 				break;
1325 			case 2:
1326 				writew(val, addr);
1327 				break;
1328 			default:
1329 				writel(val, addr);
1330 				break;
1331 			}
1332 			/*
1333 			 * Make sure the write was done before we release the
1334 			 * spinlock allowing consecutive reads/writes.
1335 			 */
1336 			mb();
1337 		}
1338 		spin_unlock_irqrestore(&hbus->config_lock, flags);
1339 	} else {
1340 		dev_err(dev, "Attempt to write beyond a function's config space.\n");
1341 	}
1342 }
1343 
1344 /**
1345  * hv_pcifront_read_config() - Read configuration space
1346  * @bus: PCI Bus structure
1347  * @devfn: Device/function
1348  * @where: Offset from base
1349  * @size: Byte/word/dword
1350  * @val: Value to be read
1351  *
1352  * Return: PCIBIOS_SUCCESSFUL on success
1353  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1354  */
hv_pcifront_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)1355 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1356 				   int where, int size, u32 *val)
1357 {
1358 	struct hv_pcibus_device *hbus =
1359 		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1360 	struct hv_pci_dev *hpdev;
1361 
1362 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1363 	if (!hpdev)
1364 		return PCIBIOS_DEVICE_NOT_FOUND;
1365 
1366 	_hv_pcifront_read_config(hpdev, where, size, val);
1367 
1368 	put_pcichild(hpdev);
1369 	return PCIBIOS_SUCCESSFUL;
1370 }
1371 
1372 /**
1373  * hv_pcifront_write_config() - Write configuration space
1374  * @bus: PCI Bus structure
1375  * @devfn: Device/function
1376  * @where: Offset from base
1377  * @size: Byte/word/dword
1378  * @val: Value to be written to device
1379  *
1380  * Return: PCIBIOS_SUCCESSFUL on success
1381  *	   PCIBIOS_DEVICE_NOT_FOUND on failure
1382  */
hv_pcifront_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)1383 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1384 				    int where, int size, u32 val)
1385 {
1386 	struct hv_pcibus_device *hbus =
1387 	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1388 	struct hv_pci_dev *hpdev;
1389 
1390 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1391 	if (!hpdev)
1392 		return PCIBIOS_DEVICE_NOT_FOUND;
1393 
1394 	_hv_pcifront_write_config(hpdev, where, size, val);
1395 
1396 	put_pcichild(hpdev);
1397 	return PCIBIOS_SUCCESSFUL;
1398 }
1399 
1400 /* PCIe operations */
1401 static struct pci_ops hv_pcifront_ops = {
1402 	.read  = hv_pcifront_read_config,
1403 	.write = hv_pcifront_write_config,
1404 };
1405 
1406 /*
1407  * Paravirtual backchannel
1408  *
1409  * Hyper-V SR-IOV provides a backchannel mechanism in software for
1410  * communication between a VF driver and a PF driver.  These
1411  * "configuration blocks" are similar in concept to PCI configuration space,
1412  * but instead of doing reads and writes in 32-bit chunks through a very slow
1413  * path, packets of up to 128 bytes can be sent or received asynchronously.
1414  *
1415  * Nearly every SR-IOV device contains just such a communications channel in
1416  * hardware, so using this one in software is usually optional.  Using the
1417  * software channel, however, allows driver implementers to leverage software
1418  * tools that fuzz the communications channel looking for vulnerabilities.
1419  *
1420  * The usage model for these packets puts the responsibility for reading or
1421  * writing on the VF driver.  The VF driver sends a read or a write packet,
1422  * indicating which "block" is being referred to by number.
1423  *
1424  * If the PF driver wishes to initiate communication, it can "invalidate" one or
1425  * more of the first 64 blocks.  This invalidation is delivered via a callback
1426  * supplied to the VF driver by this driver.
1427  *
1428  * No protocol is implied, except that supplied by the PF and VF drivers.
1429  */
1430 
1431 struct hv_read_config_compl {
1432 	struct hv_pci_compl comp_pkt;
1433 	void *buf;
1434 	unsigned int len;
1435 	unsigned int bytes_returned;
1436 };
1437 
1438 /**
1439  * hv_pci_read_config_compl() - Invoked when a response packet
1440  * for a read config block operation arrives.
1441  * @context:		Identifies the read config operation
1442  * @resp:		The response packet itself
1443  * @resp_packet_size:	Size in bytes of the response packet
1444  */
hv_pci_read_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1445 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1446 				     int resp_packet_size)
1447 {
1448 	struct hv_read_config_compl *comp = context;
1449 	struct pci_read_block_response *read_resp =
1450 		(struct pci_read_block_response *)resp;
1451 	unsigned int data_len, hdr_len;
1452 
1453 	hdr_len = offsetof(struct pci_read_block_response, bytes);
1454 	if (resp_packet_size < hdr_len) {
1455 		comp->comp_pkt.completion_status = -1;
1456 		goto out;
1457 	}
1458 
1459 	data_len = resp_packet_size - hdr_len;
1460 	if (data_len > 0 && read_resp->status == 0) {
1461 		comp->bytes_returned = min(comp->len, data_len);
1462 		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1463 	} else {
1464 		comp->bytes_returned = 0;
1465 	}
1466 
1467 	comp->comp_pkt.completion_status = read_resp->status;
1468 out:
1469 	complete(&comp->comp_pkt.host_event);
1470 }
1471 
1472 /**
1473  * hv_read_config_block() - Sends a read config block request to
1474  * the back-end driver running in the Hyper-V parent partition.
1475  * @pdev:		The PCI driver's representation for this device.
1476  * @buf:		Buffer into which the config block will be copied.
1477  * @len:		Size in bytes of buf.
1478  * @block_id:		Identifies the config block which has been requested.
1479  * @bytes_returned:	Size which came back from the back-end driver.
1480  *
1481  * Return: 0 on success, -errno on failure
1482  */
hv_read_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id,unsigned int * bytes_returned)1483 static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1484 				unsigned int len, unsigned int block_id,
1485 				unsigned int *bytes_returned)
1486 {
1487 	struct hv_pcibus_device *hbus =
1488 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1489 			     sysdata);
1490 	struct {
1491 		struct pci_packet pkt;
1492 		char buf[sizeof(struct pci_read_block)];
1493 	} pkt;
1494 	struct hv_read_config_compl comp_pkt;
1495 	struct pci_read_block *read_blk;
1496 	int ret;
1497 
1498 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1499 		return -EINVAL;
1500 
1501 	init_completion(&comp_pkt.comp_pkt.host_event);
1502 	comp_pkt.buf = buf;
1503 	comp_pkt.len = len;
1504 
1505 	memset(&pkt, 0, sizeof(pkt));
1506 	pkt.pkt.completion_func = hv_pci_read_config_compl;
1507 	pkt.pkt.compl_ctxt = &comp_pkt;
1508 	read_blk = (struct pci_read_block *)pkt.buf;
1509 	read_blk->message_type.type = PCI_READ_BLOCK;
1510 	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1511 	read_blk->block_id = block_id;
1512 	read_blk->bytes_requested = len;
1513 
1514 	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1515 			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
1516 			       VM_PKT_DATA_INBAND,
1517 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1518 	if (ret)
1519 		return ret;
1520 
1521 	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1522 	if (ret)
1523 		return ret;
1524 
1525 	if (comp_pkt.comp_pkt.completion_status != 0 ||
1526 	    comp_pkt.bytes_returned == 0) {
1527 		dev_err(&hbus->hdev->device,
1528 			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
1529 			comp_pkt.comp_pkt.completion_status,
1530 			comp_pkt.bytes_returned);
1531 		return -EIO;
1532 	}
1533 
1534 	*bytes_returned = comp_pkt.bytes_returned;
1535 	return 0;
1536 }
1537 
1538 /**
1539  * hv_pci_write_config_compl() - Invoked when a response packet for a write
1540  * config block operation arrives.
1541  * @context:		Identifies the write config operation
1542  * @resp:		The response packet itself
1543  * @resp_packet_size:	Size in bytes of the response packet
1544  */
hv_pci_write_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1545 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1546 				      int resp_packet_size)
1547 {
1548 	struct hv_pci_compl *comp_pkt = context;
1549 
1550 	comp_pkt->completion_status = resp->status;
1551 	complete(&comp_pkt->host_event);
1552 }
1553 
1554 /**
1555  * hv_write_config_block() - Sends a write config block request to the
1556  * back-end driver running in the Hyper-V parent partition.
1557  * @pdev:		The PCI driver's representation for this device.
1558  * @buf:		Buffer from which the config block will	be copied.
1559  * @len:		Size in bytes of buf.
1560  * @block_id:		Identifies the config block which is being written.
1561  *
1562  * Return: 0 on success, -errno on failure
1563  */
hv_write_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id)1564 static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1565 				unsigned int len, unsigned int block_id)
1566 {
1567 	struct hv_pcibus_device *hbus =
1568 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1569 			     sysdata);
1570 	struct {
1571 		struct pci_packet pkt;
1572 		char buf[sizeof(struct pci_write_block)];
1573 		u32 reserved;
1574 	} pkt;
1575 	struct hv_pci_compl comp_pkt;
1576 	struct pci_write_block *write_blk;
1577 	u32 pkt_size;
1578 	int ret;
1579 
1580 	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1581 		return -EINVAL;
1582 
1583 	init_completion(&comp_pkt.host_event);
1584 
1585 	memset(&pkt, 0, sizeof(pkt));
1586 	pkt.pkt.completion_func = hv_pci_write_config_compl;
1587 	pkt.pkt.compl_ctxt = &comp_pkt;
1588 	write_blk = (struct pci_write_block *)pkt.buf;
1589 	write_blk->message_type.type = PCI_WRITE_BLOCK;
1590 	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1591 	write_blk->block_id = block_id;
1592 	write_blk->byte_count = len;
1593 	memcpy(write_blk->bytes, buf, len);
1594 	pkt_size = offsetof(struct pci_write_block, bytes) + len;
1595 	/*
1596 	 * This quirk is required on some hosts shipped around 2018, because
1597 	 * these hosts don't check the pkt_size correctly (new hosts have been
1598 	 * fixed since early 2019). The quirk is also safe on very old hosts
1599 	 * and new hosts, because, on them, what really matters is the length
1600 	 * specified in write_blk->byte_count.
1601 	 */
1602 	pkt_size += sizeof(pkt.reserved);
1603 
1604 	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1605 			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1606 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1607 	if (ret)
1608 		return ret;
1609 
1610 	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1611 	if (ret)
1612 		return ret;
1613 
1614 	if (comp_pkt.completion_status != 0) {
1615 		dev_err(&hbus->hdev->device,
1616 			"Write Config Block failed: 0x%x\n",
1617 			comp_pkt.completion_status);
1618 		return -EIO;
1619 	}
1620 
1621 	return 0;
1622 }
1623 
1624 /**
1625  * hv_register_block_invalidate() - Invoked when a config block invalidation
1626  * arrives from the back-end driver.
1627  * @pdev:		The PCI driver's representation for this device.
1628  * @context:		Identifies the device.
1629  * @block_invalidate:	Identifies all of the blocks being invalidated.
1630  *
1631  * Return: 0 on success, -errno on failure
1632  */
hv_register_block_invalidate(struct pci_dev * pdev,void * context,void (* block_invalidate)(void * context,u64 block_mask))1633 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1634 					void (*block_invalidate)(void *context,
1635 								 u64 block_mask))
1636 {
1637 	struct hv_pcibus_device *hbus =
1638 		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1639 			     sysdata);
1640 	struct hv_pci_dev *hpdev;
1641 
1642 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1643 	if (!hpdev)
1644 		return -ENODEV;
1645 
1646 	hpdev->block_invalidate = block_invalidate;
1647 	hpdev->invalidate_context = context;
1648 
1649 	put_pcichild(hpdev);
1650 	return 0;
1651 
1652 }
1653 
1654 /* Interrupt management hooks */
hv_int_desc_free(struct hv_pci_dev * hpdev,struct tran_int_desc * int_desc)1655 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1656 			     struct tran_int_desc *int_desc)
1657 {
1658 	struct pci_delete_interrupt *int_pkt;
1659 	struct {
1660 		struct pci_packet pkt;
1661 		u8 buffer[sizeof(struct pci_delete_interrupt)];
1662 	} ctxt;
1663 
1664 	if (!int_desc->vector_count) {
1665 		kfree(int_desc);
1666 		return;
1667 	}
1668 	memset(&ctxt, 0, sizeof(ctxt));
1669 	int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
1670 	int_pkt->message_type.type =
1671 		PCI_DELETE_INTERRUPT_MESSAGE;
1672 	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1673 	int_pkt->int_desc = *int_desc;
1674 	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1675 			 0, VM_PKT_DATA_INBAND, 0);
1676 	kfree(int_desc);
1677 }
1678 
1679 /**
1680  * hv_msi_free() - Free the MSI.
1681  * @domain:	The interrupt domain pointer
1682  * @irq:	Identifies the IRQ.
1683  *
1684  * The Hyper-V parent partition and hypervisor are tracking the
1685  * messages that are in use, keeping the interrupt redirection
1686  * table up to date.  This callback sends a message that frees
1687  * the IRT entry and related tracking nonsense.
1688  */
hv_msi_free(struct irq_domain * domain,unsigned int irq)1689 static void hv_msi_free(struct irq_domain *domain, unsigned int irq)
1690 {
1691 	struct hv_pcibus_device *hbus;
1692 	struct hv_pci_dev *hpdev;
1693 	struct pci_dev *pdev;
1694 	struct tran_int_desc *int_desc;
1695 	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1696 	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1697 
1698 	pdev = msi_desc_to_pci_dev(msi);
1699 	hbus = domain->host_data;
1700 	int_desc = irq_data_get_irq_chip_data(irq_data);
1701 	if (!int_desc)
1702 		return;
1703 
1704 	irq_data->chip_data = NULL;
1705 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1706 	if (!hpdev) {
1707 		kfree(int_desc);
1708 		return;
1709 	}
1710 
1711 	hv_int_desc_free(hpdev, int_desc);
1712 	put_pcichild(hpdev);
1713 }
1714 
hv_irq_mask(struct irq_data * data)1715 static void hv_irq_mask(struct irq_data *data)
1716 {
1717 	if (data->parent_data->chip->irq_mask)
1718 		irq_chip_mask_parent(data);
1719 }
1720 
hv_irq_unmask(struct irq_data * data)1721 static void hv_irq_unmask(struct irq_data *data)
1722 {
1723 	hv_arch_irq_unmask(data);
1724 
1725 	if (data->parent_data->chip->irq_unmask)
1726 		irq_chip_unmask_parent(data);
1727 }
1728 
1729 struct compose_comp_ctxt {
1730 	struct hv_pci_compl comp_pkt;
1731 	struct tran_int_desc int_desc;
1732 };
1733 
hv_pci_compose_compl(void * context,struct pci_response * resp,int resp_packet_size)1734 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1735 				 int resp_packet_size)
1736 {
1737 	struct compose_comp_ctxt *comp_pkt = context;
1738 	struct pci_create_int_response *int_resp =
1739 		(struct pci_create_int_response *)resp;
1740 
1741 	if (resp_packet_size < sizeof(*int_resp)) {
1742 		comp_pkt->comp_pkt.completion_status = -1;
1743 		goto out;
1744 	}
1745 	comp_pkt->comp_pkt.completion_status = resp->status;
1746 	comp_pkt->int_desc = int_resp->int_desc;
1747 out:
1748 	complete(&comp_pkt->comp_pkt.host_event);
1749 }
1750 
hv_compose_msi_req_v1(struct pci_create_interrupt * int_pkt,u32 slot,u8 vector,u16 vector_count)1751 static u32 hv_compose_msi_req_v1(
1752 	struct pci_create_interrupt *int_pkt,
1753 	u32 slot, u8 vector, u16 vector_count)
1754 {
1755 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1756 	int_pkt->wslot.slot = slot;
1757 	int_pkt->int_desc.vector = vector;
1758 	int_pkt->int_desc.vector_count = vector_count;
1759 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1760 
1761 	/*
1762 	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1763 	 * hv_irq_unmask().
1764 	 */
1765 	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1766 
1767 	return sizeof(*int_pkt);
1768 }
1769 
1770 /*
1771  * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1772  * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1773  * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1774  * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1775  * not irrelevant because Hyper-V chooses the physical CPU to handle the
1776  * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1777  * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1778  * but assigning too many vPCI device interrupts to the same pCPU can cause a
1779  * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1780  * to spread out the pCPUs that it selects.
1781  *
1782  * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1783  * to always return the same dummy vCPU, because a second call to
1784  * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1785  * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1786  * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1787  * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1788  * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1789  * the same pCPU, even though the vCPUs will be spread out by later calls
1790  * to hv_irq_unmask(), but that is the best we can do now.
1791  *
1792  * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1793  * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1794  * enhancement is planned for a future version. With that enhancement, the
1795  * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1796  * device will be spread across multiple pCPUs.
1797  */
1798 
1799 /*
1800  * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1801  * by subsequent retarget in hv_irq_unmask().
1802  */
hv_compose_msi_req_get_cpu(const struct cpumask * affinity)1803 static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1804 {
1805 	return cpumask_first_and(affinity, cpu_online_mask);
1806 }
1807 
1808 /*
1809  * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1810  */
hv_compose_multi_msi_req_get_cpu(void)1811 static int hv_compose_multi_msi_req_get_cpu(void)
1812 {
1813 	static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1814 
1815 	/* -1 means starting with CPU 0 */
1816 	static int cpu_next = -1;
1817 
1818 	unsigned long flags;
1819 	int cpu;
1820 
1821 	spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1822 
1823 	cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
1824 	cpu = cpu_next;
1825 
1826 	spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1827 
1828 	return cpu;
1829 }
1830 
hv_compose_msi_req_v2(struct pci_create_interrupt2 * int_pkt,int cpu,u32 slot,u8 vector,u16 vector_count)1831 static u32 hv_compose_msi_req_v2(
1832 	struct pci_create_interrupt2 *int_pkt, int cpu,
1833 	u32 slot, u8 vector, u16 vector_count)
1834 {
1835 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1836 	int_pkt->wslot.slot = slot;
1837 	int_pkt->int_desc.vector = vector;
1838 	int_pkt->int_desc.vector_count = vector_count;
1839 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1840 	int_pkt->int_desc.processor_array[0] =
1841 		hv_cpu_number_to_vp_number(cpu);
1842 	int_pkt->int_desc.processor_count = 1;
1843 
1844 	return sizeof(*int_pkt);
1845 }
1846 
hv_compose_msi_req_v3(struct pci_create_interrupt3 * int_pkt,int cpu,u32 slot,u32 vector,u16 vector_count)1847 static u32 hv_compose_msi_req_v3(
1848 	struct pci_create_interrupt3 *int_pkt, int cpu,
1849 	u32 slot, u32 vector, u16 vector_count)
1850 {
1851 	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1852 	int_pkt->wslot.slot = slot;
1853 	int_pkt->int_desc.vector = vector;
1854 	int_pkt->int_desc.reserved = 0;
1855 	int_pkt->int_desc.vector_count = vector_count;
1856 	int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1857 	int_pkt->int_desc.processor_array[0] =
1858 		hv_cpu_number_to_vp_number(cpu);
1859 	int_pkt->int_desc.processor_count = 1;
1860 
1861 	return sizeof(*int_pkt);
1862 }
1863 
1864 /**
1865  * hv_compose_msi_msg() - Supplies a valid MSI address/data
1866  * @data:	Everything about this MSI
1867  * @msg:	Buffer that is filled in by this function
1868  *
1869  * This function unpacks the IRQ looking for target CPU set, IDT
1870  * vector and mode and sends a message to the parent partition
1871  * asking for a mapping for that tuple in this partition.  The
1872  * response supplies a data value and address to which that data
1873  * should be written to trigger that interrupt.
1874  */
hv_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1875 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1876 {
1877 	struct hv_pcibus_device *hbus;
1878 	struct vmbus_channel *channel;
1879 	struct hv_pci_dev *hpdev;
1880 	struct pci_bus *pbus;
1881 	struct pci_dev *pdev;
1882 	const struct cpumask *dest;
1883 	struct compose_comp_ctxt comp;
1884 	struct tran_int_desc *int_desc;
1885 	struct msi_desc *msi_desc;
1886 	/*
1887 	 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1888 	 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1889 	 */
1890 	u16 vector_count;
1891 	u32 vector;
1892 	struct {
1893 		struct pci_packet pci_pkt;
1894 		union {
1895 			struct pci_create_interrupt v1;
1896 			struct pci_create_interrupt2 v2;
1897 			struct pci_create_interrupt3 v3;
1898 		} int_pkts;
1899 	} __packed ctxt;
1900 	bool multi_msi;
1901 	u64 trans_id;
1902 	u32 size;
1903 	int ret;
1904 	int cpu;
1905 
1906 	msi_desc  = irq_data_get_msi_desc(data);
1907 	multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1908 		    msi_desc->nvec_used > 1;
1909 
1910 	/* Reuse the previous allocation */
1911 	if (data->chip_data && multi_msi) {
1912 		int_desc = data->chip_data;
1913 		msg->address_hi = int_desc->address >> 32;
1914 		msg->address_lo = int_desc->address & 0xffffffff;
1915 		msg->data = int_desc->data;
1916 		return;
1917 	}
1918 
1919 	pdev = msi_desc_to_pci_dev(msi_desc);
1920 	dest = irq_data_get_effective_affinity_mask(data);
1921 	pbus = pdev->bus;
1922 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1923 	channel = hbus->hdev->channel;
1924 	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1925 	if (!hpdev)
1926 		goto return_null_message;
1927 
1928 	/* Free any previous message that might have already been composed. */
1929 	if (data->chip_data && !multi_msi) {
1930 		int_desc = data->chip_data;
1931 		data->chip_data = NULL;
1932 		hv_int_desc_free(hpdev, int_desc);
1933 	}
1934 
1935 	int_desc = kzalloc_obj(*int_desc, GFP_ATOMIC);
1936 	if (!int_desc)
1937 		goto drop_reference;
1938 
1939 	if (multi_msi) {
1940 		/*
1941 		 * If this is not the first MSI of Multi MSI, we already have
1942 		 * a mapping.  Can exit early.
1943 		 */
1944 		if (msi_desc->irq != data->irq) {
1945 			data->chip_data = int_desc;
1946 			int_desc->address = msi_desc->msg.address_lo |
1947 					    (u64)msi_desc->msg.address_hi << 32;
1948 			int_desc->data = msi_desc->msg.data +
1949 					 (data->irq - msi_desc->irq);
1950 			msg->address_hi = msi_desc->msg.address_hi;
1951 			msg->address_lo = msi_desc->msg.address_lo;
1952 			msg->data = int_desc->data;
1953 			put_pcichild(hpdev);
1954 			return;
1955 		}
1956 		/*
1957 		 * The vector we select here is a dummy value.  The correct
1958 		 * value gets sent to the hypervisor in unmask().  This needs
1959 		 * to be aligned with the count, and also not zero.  Multi-msi
1960 		 * is powers of 2 up to 32, so 32 will always work here.
1961 		 */
1962 		vector = 32;
1963 		vector_count = msi_desc->nvec_used;
1964 		cpu = hv_compose_multi_msi_req_get_cpu();
1965 	} else {
1966 		vector = hv_msi_get_int_vector(data);
1967 		vector_count = 1;
1968 		cpu = hv_compose_msi_req_get_cpu(dest);
1969 	}
1970 
1971 	/*
1972 	 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1973 	 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1974 	 * for better readability.
1975 	 */
1976 	memset(&ctxt, 0, sizeof(ctxt));
1977 	init_completion(&comp.comp_pkt.host_event);
1978 	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1979 	ctxt.pci_pkt.compl_ctxt = &comp;
1980 
1981 	switch (hbus->protocol_version) {
1982 	case PCI_PROTOCOL_VERSION_1_1:
1983 		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1984 					hpdev->desc.win_slot.slot,
1985 					(u8)vector,
1986 					vector_count);
1987 		break;
1988 
1989 	case PCI_PROTOCOL_VERSION_1_2:
1990 	case PCI_PROTOCOL_VERSION_1_3:
1991 		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1992 					cpu,
1993 					hpdev->desc.win_slot.slot,
1994 					(u8)vector,
1995 					vector_count);
1996 		break;
1997 
1998 	case PCI_PROTOCOL_VERSION_1_4:
1999 		size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
2000 					cpu,
2001 					hpdev->desc.win_slot.slot,
2002 					vector,
2003 					vector_count);
2004 		break;
2005 
2006 	default:
2007 		/* As we only negotiate protocol versions known to this driver,
2008 		 * this path should never hit. However, this is it not a hot
2009 		 * path so we print a message to aid future updates.
2010 		 */
2011 		dev_err(&hbus->hdev->device,
2012 			"Unexpected vPCI protocol, update driver.");
2013 		goto free_int_desc;
2014 	}
2015 
2016 	ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
2017 				     size, (unsigned long)&ctxt.pci_pkt,
2018 				     &trans_id, VM_PKT_DATA_INBAND,
2019 				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2020 	if (ret) {
2021 		dev_err(&hbus->hdev->device,
2022 			"Sending request for interrupt failed: 0x%x",
2023 			comp.comp_pkt.completion_status);
2024 		goto free_int_desc;
2025 	}
2026 
2027 	/*
2028 	 * Prevents hv_pci_onchannelcallback() from running concurrently
2029 	 * in the tasklet.
2030 	 */
2031 	tasklet_disable_in_atomic(&channel->callback_event);
2032 
2033 	/*
2034 	 * Since this function is called with IRQ locks held, can't
2035 	 * do normal wait for completion; instead poll.
2036 	 */
2037 	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
2038 		unsigned long flags;
2039 
2040 		/* 0xFFFF means an invalid PCI VENDOR ID. */
2041 		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
2042 			dev_err_once(&hbus->hdev->device,
2043 				     "the device has gone\n");
2044 			goto enable_tasklet;
2045 		}
2046 
2047 		/*
2048 		 * Make sure that the ring buffer data structure doesn't get
2049 		 * freed while we dereference the ring buffer pointer.  Test
2050 		 * for the channel's onchannel_callback being NULL within a
2051 		 * sched_lock critical section.  See also the inline comments
2052 		 * in vmbus_reset_channel_cb().
2053 		 */
2054 		spin_lock_irqsave(&channel->sched_lock, flags);
2055 		if (unlikely(channel->onchannel_callback == NULL)) {
2056 			spin_unlock_irqrestore(&channel->sched_lock, flags);
2057 			goto enable_tasklet;
2058 		}
2059 		hv_pci_onchannelcallback(hbus);
2060 		spin_unlock_irqrestore(&channel->sched_lock, flags);
2061 
2062 		udelay(100);
2063 	}
2064 
2065 	tasklet_enable(&channel->callback_event);
2066 
2067 	if (comp.comp_pkt.completion_status < 0) {
2068 		dev_err(&hbus->hdev->device,
2069 			"Request for interrupt failed: 0x%x",
2070 			comp.comp_pkt.completion_status);
2071 		goto free_int_desc;
2072 	}
2073 
2074 	/*
2075 	 * Record the assignment so that this can be unwound later. Using
2076 	 * irq_set_chip_data() here would be appropriate, but the lock it takes
2077 	 * is already held.
2078 	 */
2079 	*int_desc = comp.int_desc;
2080 	data->chip_data = int_desc;
2081 
2082 	/* Pass up the result. */
2083 	msg->address_hi = comp.int_desc.address >> 32;
2084 	msg->address_lo = comp.int_desc.address & 0xffffffff;
2085 	msg->data = comp.int_desc.data;
2086 
2087 	put_pcichild(hpdev);
2088 	return;
2089 
2090 enable_tasklet:
2091 	tasklet_enable(&channel->callback_event);
2092 	/*
2093 	 * The completion packet on the stack becomes invalid after 'return';
2094 	 * remove the ID from the VMbus requestor if the identifier is still
2095 	 * mapped to/associated with the packet.  (The identifier could have
2096 	 * been 're-used', i.e., already removed and (re-)mapped.)
2097 	 *
2098 	 * Cf. hv_pci_onchannelcallback().
2099 	 */
2100 	vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2101 free_int_desc:
2102 	kfree(int_desc);
2103 drop_reference:
2104 	put_pcichild(hpdev);
2105 return_null_message:
2106 	msg->address_hi = 0;
2107 	msg->address_lo = 0;
2108 	msg->data = 0;
2109 }
2110 
hv_pcie_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)2111 static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
2112 				      struct irq_domain *real_parent, struct msi_domain_info *info)
2113 {
2114 	struct irq_chip *chip = info->chip;
2115 
2116 	if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
2117 		return false;
2118 
2119 	info->ops->msi_prepare = hv_msi_prepare;
2120 
2121 	chip->irq_set_affinity = irq_chip_set_affinity_parent;
2122 
2123 	if (IS_ENABLED(CONFIG_X86))
2124 		chip->flags |= IRQCHIP_MOVE_DEFERRED;
2125 
2126 	return true;
2127 }
2128 
2129 #define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS		| \
2130 				    MSI_FLAG_USE_DEF_CHIP_OPS		| \
2131 				    MSI_FLAG_PCI_MSI_MASK_PARENT)
2132 #define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI		| \
2133 				     MSI_FLAG_PCI_MSIX			| \
2134 				     MSI_FLAG_PCI_MSIX_ALLOC_DYN	| \
2135 				     MSI_GENERIC_FLAGS_MASK)
2136 
2137 static const struct msi_parent_ops hv_pcie_msi_parent_ops = {
2138 	.required_flags		= HV_PCIE_MSI_FLAGS_REQUIRED,
2139 	.supported_flags	= HV_PCIE_MSI_FLAGS_SUPPORTED,
2140 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
2141 	.chip_flags		= HV_MSI_CHIP_FLAGS,
2142 	.prefix			= "HV-",
2143 	.init_dev_msi_info	= hv_pcie_init_dev_msi_info,
2144 };
2145 
2146 /* HW Interrupt Chip Descriptor */
2147 static struct irq_chip hv_msi_irq_chip = {
2148 	.name			= "Hyper-V PCIe MSI",
2149 	.irq_compose_msi_msg	= hv_compose_msi_msg,
2150 	.irq_set_affinity	= irq_chip_set_affinity_parent,
2151 	.irq_ack		= irq_chip_ack_parent,
2152 	.irq_eoi		= irq_chip_eoi_parent,
2153 	.irq_mask		= hv_irq_mask,
2154 	.irq_unmask		= hv_irq_unmask,
2155 };
2156 
hv_pcie_domain_alloc(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs,void * arg)2157 static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
2158 			       void *arg)
2159 {
2160 	/*
2161 	 * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg()
2162 	 * should be moved here.
2163 	 */
2164 	int ret;
2165 
2166 	ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg);
2167 	if (ret < 0)
2168 		return ret;
2169 
2170 	for (int i = 0; i < nr_irqs; i++) {
2171 		irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL);
2172 		if (IS_ENABLED(CONFIG_X86))
2173 			__irq_set_handler(virq + i, handle_edge_irq, 0, "edge");
2174 	}
2175 
2176 	return 0;
2177 }
2178 
hv_pcie_domain_free(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs)2179 static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
2180 {
2181 	for (int i = 0; i < nr_irqs; i++)
2182 		hv_msi_free(d, virq + i);
2183 
2184 	irq_domain_free_irqs_top(d, virq, nr_irqs);
2185 }
2186 
2187 static const struct irq_domain_ops hv_pcie_domain_ops = {
2188 	.alloc	= hv_pcie_domain_alloc,
2189 	.free	= hv_pcie_domain_free,
2190 };
2191 
2192 /**
2193  * hv_pcie_init_irq_domain() - Initialize IRQ domain
2194  * @hbus:	The root PCI bus
2195  *
2196  * This function creates an IRQ domain which will be used for
2197  * interrupts from devices that have been passed through.  These
2198  * devices only support MSI and MSI-X, not line-based interrupts
2199  * or simulations of line-based interrupts through PCIe's
2200  * fabric-layer messages.  Because interrupts are remapped, we
2201  * can support multi-message MSI here.
2202  *
2203  * Return: '0' on success and error value on failure
2204  */
hv_pcie_init_irq_domain(struct hv_pcibus_device * hbus)2205 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2206 {
2207 	struct irq_domain_info info = {
2208 		.fwnode		= hbus->fwnode,
2209 		.ops		= &hv_pcie_domain_ops,
2210 		.host_data	= hbus,
2211 		.parent		= hv_pci_get_root_domain(),
2212 	};
2213 
2214 	hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops);
2215 	if (!hbus->irq_domain) {
2216 		dev_err(&hbus->hdev->device,
2217 			"Failed to build an MSI IRQ domain\n");
2218 		return -ENODEV;
2219 	}
2220 
2221 	dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2222 
2223 	return 0;
2224 }
2225 
2226 /**
2227  * get_bar_size() - Get the address space consumed by a BAR
2228  * @bar_val:	Value that a BAR returned after -1 was written
2229  *              to it.
2230  *
2231  * This function returns the size of the BAR, rounded up to 1
2232  * page.  It has to be rounded up because the hypervisor's page
2233  * table entry that maps the BAR into the VM can't specify an
2234  * offset within a page.  The invariant is that the hypervisor
2235  * must place any BARs of smaller than page length at the
2236  * beginning of a page.
2237  *
2238  * Return:	Size in bytes of the consumed MMIO space.
2239  */
get_bar_size(u64 bar_val)2240 static u64 get_bar_size(u64 bar_val)
2241 {
2242 	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2243 			PAGE_SIZE);
2244 }
2245 
2246 /**
2247  * survey_child_resources() - Total all MMIO requirements
2248  * @hbus:	Root PCI bus, as understood by this driver
2249  */
survey_child_resources(struct hv_pcibus_device * hbus)2250 static void survey_child_resources(struct hv_pcibus_device *hbus)
2251 {
2252 	struct hv_pci_dev *hpdev;
2253 	resource_size_t bar_size = 0;
2254 	unsigned long flags;
2255 	struct completion *event;
2256 	u64 bar_val;
2257 	int i;
2258 
2259 	/* If nobody is waiting on the answer, don't compute it. */
2260 	event = xchg(&hbus->survey_event, NULL);
2261 	if (!event)
2262 		return;
2263 
2264 	/* If the answer has already been computed, go with it. */
2265 	if (hbus->low_mmio_space || hbus->high_mmio_space) {
2266 		complete(event);
2267 		return;
2268 	}
2269 
2270 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2271 
2272 	/*
2273 	 * Due to an interesting quirk of the PCI spec, all memory regions
2274 	 * for a child device are a power of 2 in size and aligned in memory,
2275 	 * so it's sufficient to just add them up without tracking alignment.
2276 	 */
2277 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2278 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2279 			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2280 				dev_err(&hbus->hdev->device,
2281 					"There's an I/O BAR in this list!\n");
2282 
2283 			if (hpdev->probed_bar[i] != 0) {
2284 				/*
2285 				 * A probed BAR has all the upper bits set that
2286 				 * can be changed.
2287 				 */
2288 
2289 				bar_val = hpdev->probed_bar[i];
2290 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2291 					bar_val |=
2292 					((u64)hpdev->probed_bar[++i] << 32);
2293 				else
2294 					bar_val |= 0xffffffff00000000ULL;
2295 
2296 				bar_size = get_bar_size(bar_val);
2297 
2298 				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2299 					hbus->high_mmio_space += bar_size;
2300 				else
2301 					hbus->low_mmio_space += bar_size;
2302 			}
2303 		}
2304 	}
2305 
2306 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2307 	complete(event);
2308 }
2309 
2310 /**
2311  * prepopulate_bars() - Fill in BARs with defaults
2312  * @hbus:	Root PCI bus, as understood by this driver
2313  *
2314  * The core PCI driver code seems much, much happier if the BARs
2315  * for a device have values upon first scan. So fill them in.
2316  * The algorithm below works down from large sizes to small,
2317  * attempting to pack the assignments optimally. The assumption,
2318  * enforced in other parts of the code, is that the beginning of
2319  * the memory-mapped I/O space will be aligned on the largest
2320  * BAR size.
2321  */
prepopulate_bars(struct hv_pcibus_device * hbus)2322 static void prepopulate_bars(struct hv_pcibus_device *hbus)
2323 {
2324 	resource_size_t high_size = 0;
2325 	resource_size_t low_size = 0;
2326 	resource_size_t high_base = 0;
2327 	resource_size_t low_base = 0;
2328 	resource_size_t bar_size;
2329 	struct hv_pci_dev *hpdev;
2330 	unsigned long flags;
2331 	u64 bar_val;
2332 	u32 command;
2333 	bool high;
2334 	int i;
2335 
2336 	if (hbus->low_mmio_space) {
2337 		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2338 		low_base = hbus->low_mmio_res->start;
2339 	}
2340 
2341 	if (hbus->high_mmio_space) {
2342 		high_size = 1ULL <<
2343 			(63 - __builtin_clzll(hbus->high_mmio_space));
2344 		high_base = hbus->high_mmio_res->start;
2345 	}
2346 
2347 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2348 
2349 	/*
2350 	 * Clear the memory enable bit, in case it's already set. This occurs
2351 	 * in the suspend path of hibernation, where the device is suspended,
2352 	 * resumed and suspended again: see hibernation_snapshot() and
2353 	 * hibernation_platform_enter().
2354 	 *
2355 	 * If the memory enable bit is already set, Hyper-V silently ignores
2356 	 * the below BAR updates, and the related PCI device driver can not
2357 	 * work, because reading from the device register(s) always returns
2358 	 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2359 	 */
2360 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2361 		_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2362 		command &= ~PCI_COMMAND_MEMORY;
2363 		_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2364 	}
2365 
2366 	/* Pick addresses for the BARs. */
2367 	do {
2368 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2369 			for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2370 				bar_val = hpdev->probed_bar[i];
2371 				if (bar_val == 0)
2372 					continue;
2373 				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2374 				if (high) {
2375 					bar_val |=
2376 						((u64)hpdev->probed_bar[i + 1]
2377 						 << 32);
2378 				} else {
2379 					bar_val |= 0xffffffffULL << 32;
2380 				}
2381 				bar_size = get_bar_size(bar_val);
2382 				if (high) {
2383 					if (high_size != bar_size) {
2384 						i++;
2385 						continue;
2386 					}
2387 					_hv_pcifront_write_config(hpdev,
2388 						PCI_BASE_ADDRESS_0 + (4 * i),
2389 						4,
2390 						(u32)(high_base & 0xffffff00));
2391 					i++;
2392 					_hv_pcifront_write_config(hpdev,
2393 						PCI_BASE_ADDRESS_0 + (4 * i),
2394 						4, (u32)(high_base >> 32));
2395 					high_base += bar_size;
2396 				} else {
2397 					if (low_size != bar_size)
2398 						continue;
2399 					_hv_pcifront_write_config(hpdev,
2400 						PCI_BASE_ADDRESS_0 + (4 * i),
2401 						4,
2402 						(u32)(low_base & 0xffffff00));
2403 					low_base += bar_size;
2404 				}
2405 			}
2406 			if (high_size <= 1 && low_size <= 1) {
2407 				/*
2408 				 * No need to set the PCI_COMMAND_MEMORY bit as
2409 				 * the core PCI driver doesn't require the bit
2410 				 * to be pre-set. Actually here we intentionally
2411 				 * keep the bit off so that the PCI BAR probing
2412 				 * in the core PCI driver doesn't cause Hyper-V
2413 				 * to unnecessarily unmap/map the virtual BARs
2414 				 * from/to the physical BARs multiple times.
2415 				 * This reduces the VM boot time significantly
2416 				 * if the BAR sizes are huge.
2417 				 */
2418 				break;
2419 			}
2420 		}
2421 
2422 		high_size >>= 1;
2423 		low_size >>= 1;
2424 	}  while (high_size || low_size);
2425 
2426 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2427 }
2428 
2429 /*
2430  * Assign entries in sysfs pci slot directory.
2431  *
2432  * Note that this function does not need to lock the children list
2433  * because it is called from pci_devices_present_work which
2434  * is serialized with hv_eject_device_work because they are on the
2435  * same ordered workqueue. Therefore hbus->children list will not change
2436  * even when pci_create_slot sleeps.
2437  */
hv_pci_assign_slots(struct hv_pcibus_device * hbus)2438 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2439 {
2440 	struct hv_pci_dev *hpdev;
2441 	char name[SLOT_NAME_SIZE];
2442 	int slot_nr;
2443 
2444 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2445 		if (hpdev->pci_slot)
2446 			continue;
2447 
2448 		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2449 		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2450 		hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2451 					  name, NULL);
2452 		if (IS_ERR(hpdev->pci_slot)) {
2453 			pr_warn("pci_create slot %s failed\n", name);
2454 			hpdev->pci_slot = NULL;
2455 		}
2456 	}
2457 }
2458 
2459 /*
2460  * Remove entries in sysfs pci slot directory.
2461  */
hv_pci_remove_slots(struct hv_pcibus_device * hbus)2462 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2463 {
2464 	struct hv_pci_dev *hpdev;
2465 
2466 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2467 		if (!hpdev->pci_slot)
2468 			continue;
2469 		pci_destroy_slot(hpdev->pci_slot);
2470 		hpdev->pci_slot = NULL;
2471 	}
2472 }
2473 
2474 /*
2475  * Set NUMA node for the devices on the bus
2476  */
hv_pci_assign_numa_node(struct hv_pcibus_device * hbus)2477 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2478 {
2479 	struct pci_dev *dev;
2480 	struct pci_bus *bus = hbus->bridge->bus;
2481 	struct hv_pci_dev *hv_dev;
2482 
2483 	list_for_each_entry(dev, &bus->devices, bus_list) {
2484 		hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2485 		if (!hv_dev)
2486 			continue;
2487 
2488 		/*
2489 		 * If the Hyper-V host doesn't provide a NUMA node for the
2490 		 * device, default to node 0. With NUMA_NO_NODE the kernel
2491 		 * may spread work across NUMA nodes, which degrades
2492 		 * performance on Hyper-V.
2493 		 */
2494 		set_dev_node(&dev->dev, 0);
2495 
2496 		if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2497 		    hv_dev->desc.virtual_numa_node < num_possible_nodes())
2498 			/*
2499 			 * The kernel may boot with some NUMA nodes offline
2500 			 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2501 			 * "numa=off". In those cases, adjust the host provided
2502 			 * NUMA node to a valid NUMA node used by the kernel.
2503 			 */
2504 			set_dev_node(&dev->dev,
2505 				     numa_map_to_online_node(
2506 					     hv_dev->desc.virtual_numa_node));
2507 
2508 		put_pcichild(hv_dev);
2509 	}
2510 }
2511 
2512 /**
2513  * create_root_hv_pci_bus() - Expose a new root PCI bus
2514  * @hbus:	Root PCI bus, as understood by this driver
2515  *
2516  * Return: 0 on success, -errno on failure
2517  */
create_root_hv_pci_bus(struct hv_pcibus_device * hbus)2518 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2519 {
2520 	int error;
2521 	struct pci_host_bridge *bridge = hbus->bridge;
2522 
2523 	bridge->dev.parent = &hbus->hdev->device;
2524 	bridge->sysdata = &hbus->sysdata;
2525 	bridge->ops = &hv_pcifront_ops;
2526 
2527 	error = pci_scan_root_bus_bridge(bridge);
2528 	if (error)
2529 		return error;
2530 
2531 	pci_lock_rescan_remove();
2532 	hv_pci_assign_numa_node(hbus);
2533 	pci_bus_assign_resources(bridge->bus);
2534 	hv_pci_assign_slots(hbus);
2535 	pci_bus_add_devices(bridge->bus);
2536 	pci_unlock_rescan_remove();
2537 	hbus->state = hv_pcibus_installed;
2538 	return 0;
2539 }
2540 
2541 struct q_res_req_compl {
2542 	struct completion host_event;
2543 	struct hv_pci_dev *hpdev;
2544 };
2545 
2546 /**
2547  * q_resource_requirements() - Query Resource Requirements
2548  * @context:		The completion context.
2549  * @resp:		The response that came from the host.
2550  * @resp_packet_size:	The size in bytes of resp.
2551  *
2552  * This function is invoked on completion of a Query Resource
2553  * Requirements packet.
2554  */
q_resource_requirements(void * context,struct pci_response * resp,int resp_packet_size)2555 static void q_resource_requirements(void *context, struct pci_response *resp,
2556 				    int resp_packet_size)
2557 {
2558 	struct q_res_req_compl *completion = context;
2559 	struct pci_q_res_req_response *q_res_req =
2560 		(struct pci_q_res_req_response *)resp;
2561 	s32 status;
2562 	int i;
2563 
2564 	status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2565 	if (status < 0) {
2566 		dev_err(&completion->hpdev->hbus->hdev->device,
2567 			"query resource requirements failed: %x\n",
2568 			status);
2569 	} else {
2570 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2571 			completion->hpdev->probed_bar[i] =
2572 				q_res_req->probed_bar[i];
2573 		}
2574 	}
2575 
2576 	complete(&completion->host_event);
2577 }
2578 
2579 /**
2580  * new_pcichild_device() - Create a new child device
2581  * @hbus:	The internal struct tracking this root PCI bus.
2582  * @desc:	The information supplied so far from the host
2583  *              about the device.
2584  *
2585  * This function creates the tracking structure for a new child
2586  * device and kicks off the process of figuring out what it is.
2587  *
2588  * Return: Pointer to the new tracking struct
2589  */
new_pcichild_device(struct hv_pcibus_device * hbus,struct hv_pcidev_description * desc)2590 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2591 		struct hv_pcidev_description *desc)
2592 {
2593 	struct hv_pci_dev *hpdev;
2594 	struct pci_child_message *res_req;
2595 	struct q_res_req_compl comp_pkt;
2596 	struct {
2597 		struct pci_packet init_packet;
2598 		u8 buffer[sizeof(struct pci_child_message)];
2599 	} pkt;
2600 	unsigned long flags;
2601 	int ret;
2602 
2603 	hpdev = kzalloc_obj(*hpdev);
2604 	if (!hpdev)
2605 		return NULL;
2606 
2607 	hpdev->hbus = hbus;
2608 
2609 	memset(&pkt, 0, sizeof(pkt));
2610 	init_completion(&comp_pkt.host_event);
2611 	comp_pkt.hpdev = hpdev;
2612 	pkt.init_packet.compl_ctxt = &comp_pkt;
2613 	pkt.init_packet.completion_func = q_resource_requirements;
2614 	res_req = (struct pci_child_message *)pkt.buffer;
2615 	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2616 	res_req->wslot.slot = desc->win_slot.slot;
2617 
2618 	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2619 			       sizeof(struct pci_child_message),
2620 			       (unsigned long)&pkt.init_packet,
2621 			       VM_PKT_DATA_INBAND,
2622 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2623 	if (ret)
2624 		goto error;
2625 
2626 	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2627 		goto error;
2628 
2629 	hpdev->desc = *desc;
2630 	refcount_set(&hpdev->refs, 1);
2631 	get_pcichild(hpdev);
2632 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2633 
2634 	list_add_tail(&hpdev->list_entry, &hbus->children);
2635 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2636 	return hpdev;
2637 
2638 error:
2639 	kfree(hpdev);
2640 	return NULL;
2641 }
2642 
2643 /**
2644  * get_pcichild_wslot() - Find device from slot
2645  * @hbus:	Root PCI bus, as understood by this driver
2646  * @wslot:	Location on the bus
2647  *
2648  * This function looks up a PCI device and returns the internal
2649  * representation of it.  It acquires a reference on it, so that
2650  * the device won't be deleted while somebody is using it.  The
2651  * caller is responsible for calling put_pcichild() to release
2652  * this reference.
2653  *
2654  * Return:	Internal representation of a PCI device
2655  */
get_pcichild_wslot(struct hv_pcibus_device * hbus,u32 wslot)2656 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2657 					     u32 wslot)
2658 {
2659 	unsigned long flags;
2660 	struct hv_pci_dev *iter, *hpdev = NULL;
2661 
2662 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2663 	list_for_each_entry(iter, &hbus->children, list_entry) {
2664 		if (iter->desc.win_slot.slot == wslot) {
2665 			hpdev = iter;
2666 			get_pcichild(hpdev);
2667 			break;
2668 		}
2669 	}
2670 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2671 
2672 	return hpdev;
2673 }
2674 
2675 /**
2676  * pci_devices_present_work() - Handle new list of child devices
2677  * @work:	Work struct embedded in struct hv_dr_work
2678  *
2679  * "Bus Relations" is the Windows term for "children of this
2680  * bus."  The terminology is preserved here for people trying to
2681  * debug the interaction between Hyper-V and Linux.  This
2682  * function is called when the parent partition reports a list
2683  * of functions that should be observed under this PCI Express
2684  * port (bus).
2685  *
2686  * This function updates the list, and must tolerate being
2687  * called multiple times with the same information.  The typical
2688  * number of child devices is one, with very atypical cases
2689  * involving three or four, so the algorithms used here can be
2690  * simple and inefficient.
2691  *
2692  * It must also treat the omission of a previously observed device as
2693  * notification that the device no longer exists.
2694  *
2695  * Note that this function is serialized with hv_eject_device_work(),
2696  * because both are pushed to the ordered workqueue hbus->wq.
2697  */
pci_devices_present_work(struct work_struct * work)2698 static void pci_devices_present_work(struct work_struct *work)
2699 {
2700 	u32 child_no;
2701 	bool found;
2702 	struct hv_pcidev_description *new_desc;
2703 	struct hv_pci_dev *hpdev;
2704 	struct hv_pcibus_device *hbus;
2705 	struct list_head removed;
2706 	struct hv_dr_work *dr_wrk;
2707 	struct hv_dr_state *dr = NULL;
2708 	unsigned long flags;
2709 
2710 	dr_wrk = container_of(work, struct hv_dr_work, wrk);
2711 	hbus = dr_wrk->bus;
2712 	kfree(dr_wrk);
2713 
2714 	INIT_LIST_HEAD(&removed);
2715 
2716 	/* Pull this off the queue and process it if it was the last one. */
2717 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2718 	while (!list_empty(&hbus->dr_list)) {
2719 		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2720 				      list_entry);
2721 		list_del(&dr->list_entry);
2722 
2723 		/* Throw this away if the list still has stuff in it. */
2724 		if (!list_empty(&hbus->dr_list)) {
2725 			kfree(dr);
2726 			continue;
2727 		}
2728 	}
2729 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2730 
2731 	if (!dr)
2732 		return;
2733 
2734 	mutex_lock(&hbus->state_lock);
2735 
2736 	/* First, mark all existing children as reported missing. */
2737 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2738 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
2739 		hpdev->reported_missing = true;
2740 	}
2741 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2742 
2743 	/* Next, add back any reported devices. */
2744 	for (child_no = 0; child_no < dr->device_count; child_no++) {
2745 		found = false;
2746 		new_desc = &dr->func[child_no];
2747 
2748 		spin_lock_irqsave(&hbus->device_list_lock, flags);
2749 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2750 			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2751 			    (hpdev->desc.v_id == new_desc->v_id) &&
2752 			    (hpdev->desc.d_id == new_desc->d_id) &&
2753 			    (hpdev->desc.ser == new_desc->ser)) {
2754 				hpdev->reported_missing = false;
2755 				found = true;
2756 			}
2757 		}
2758 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2759 
2760 		if (!found) {
2761 			hpdev = new_pcichild_device(hbus, new_desc);
2762 			if (!hpdev)
2763 				dev_err(&hbus->hdev->device,
2764 					"couldn't record a child device.\n");
2765 		}
2766 	}
2767 
2768 	/* Move missing children to a list on the stack. */
2769 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2770 	do {
2771 		found = false;
2772 		list_for_each_entry(hpdev, &hbus->children, list_entry) {
2773 			if (hpdev->reported_missing) {
2774 				found = true;
2775 				put_pcichild(hpdev);
2776 				list_move_tail(&hpdev->list_entry, &removed);
2777 				break;
2778 			}
2779 		}
2780 	} while (found);
2781 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2782 
2783 	/* Delete everything that should no longer exist. */
2784 	while (!list_empty(&removed)) {
2785 		hpdev = list_first_entry(&removed, struct hv_pci_dev,
2786 					 list_entry);
2787 		list_del(&hpdev->list_entry);
2788 
2789 		if (hpdev->pci_slot)
2790 			pci_destroy_slot(hpdev->pci_slot);
2791 
2792 		put_pcichild(hpdev);
2793 	}
2794 
2795 	switch (hbus->state) {
2796 	case hv_pcibus_installed:
2797 		/*
2798 		 * Tell the core to rescan bus
2799 		 * because there may have been changes.
2800 		 */
2801 		pci_lock_rescan_remove();
2802 		pci_scan_child_bus(hbus->bridge->bus);
2803 		hv_pci_assign_numa_node(hbus);
2804 		hv_pci_assign_slots(hbus);
2805 		pci_unlock_rescan_remove();
2806 		break;
2807 
2808 	case hv_pcibus_init:
2809 	case hv_pcibus_probed:
2810 		survey_child_resources(hbus);
2811 		break;
2812 
2813 	default:
2814 		break;
2815 	}
2816 
2817 	mutex_unlock(&hbus->state_lock);
2818 
2819 	kfree(dr);
2820 }
2821 
2822 /**
2823  * hv_pci_start_relations_work() - Queue work to start device discovery
2824  * @hbus:	Root PCI bus, as understood by this driver
2825  * @dr:		The list of children returned from host
2826  *
2827  * Return:  0 on success, -errno on failure
2828  */
hv_pci_start_relations_work(struct hv_pcibus_device * hbus,struct hv_dr_state * dr)2829 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2830 				       struct hv_dr_state *dr)
2831 {
2832 	struct hv_dr_work *dr_wrk;
2833 	unsigned long flags;
2834 	bool pending_dr;
2835 
2836 	if (hbus->state == hv_pcibus_removing) {
2837 		dev_info(&hbus->hdev->device,
2838 			 "PCI VMBus BUS_RELATIONS: ignored\n");
2839 		return -ENOENT;
2840 	}
2841 
2842 	dr_wrk = kzalloc_obj(*dr_wrk, GFP_NOWAIT);
2843 	if (!dr_wrk)
2844 		return -ENOMEM;
2845 
2846 	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2847 	dr_wrk->bus = hbus;
2848 
2849 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2850 	/*
2851 	 * If pending_dr is true, we have already queued a work,
2852 	 * which will see the new dr. Otherwise, we need to
2853 	 * queue a new work.
2854 	 */
2855 	pending_dr = !list_empty(&hbus->dr_list);
2856 	list_add_tail(&dr->list_entry, &hbus->dr_list);
2857 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2858 
2859 	if (pending_dr)
2860 		kfree(dr_wrk);
2861 	else
2862 		queue_work(hbus->wq, &dr_wrk->wrk);
2863 
2864 	return 0;
2865 }
2866 
2867 /**
2868  * hv_pci_devices_present() - Handle list of new children
2869  * @hbus:      Root PCI bus, as understood by this driver
2870  * @relations: Packet from host listing children
2871  *
2872  * Process a new list of devices on the bus. The list of devices is
2873  * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2874  * whenever a new list of devices for this bus appears.
2875  */
hv_pci_devices_present(struct hv_pcibus_device * hbus,struct pci_bus_relations * relations)2876 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2877 				   struct pci_bus_relations *relations)
2878 {
2879 	struct hv_dr_state *dr;
2880 	int i;
2881 
2882 	dr = kzalloc_flex(*dr, func, relations->device_count, GFP_NOWAIT);
2883 	if (!dr)
2884 		return;
2885 
2886 	dr->device_count = relations->device_count;
2887 	for (i = 0; i < dr->device_count; i++) {
2888 		dr->func[i].v_id = relations->func[i].v_id;
2889 		dr->func[i].d_id = relations->func[i].d_id;
2890 		dr->func[i].rev = relations->func[i].rev;
2891 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2892 		dr->func[i].subclass = relations->func[i].subclass;
2893 		dr->func[i].base_class = relations->func[i].base_class;
2894 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2895 		dr->func[i].win_slot = relations->func[i].win_slot;
2896 		dr->func[i].ser = relations->func[i].ser;
2897 	}
2898 
2899 	if (hv_pci_start_relations_work(hbus, dr))
2900 		kfree(dr);
2901 }
2902 
2903 /**
2904  * hv_pci_devices_present2() - Handle list of new children
2905  * @hbus:	Root PCI bus, as understood by this driver
2906  * @relations:	Packet from host listing children
2907  *
2908  * This function is the v2 version of hv_pci_devices_present()
2909  */
hv_pci_devices_present2(struct hv_pcibus_device * hbus,struct pci_bus_relations2 * relations)2910 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2911 				    struct pci_bus_relations2 *relations)
2912 {
2913 	struct hv_dr_state *dr;
2914 	int i;
2915 
2916 	dr = kzalloc_flex(*dr, func, relations->device_count, GFP_NOWAIT);
2917 	if (!dr)
2918 		return;
2919 
2920 	dr->device_count = relations->device_count;
2921 	for (i = 0; i < dr->device_count; i++) {
2922 		dr->func[i].v_id = relations->func[i].v_id;
2923 		dr->func[i].d_id = relations->func[i].d_id;
2924 		dr->func[i].rev = relations->func[i].rev;
2925 		dr->func[i].prog_intf = relations->func[i].prog_intf;
2926 		dr->func[i].subclass = relations->func[i].subclass;
2927 		dr->func[i].base_class = relations->func[i].base_class;
2928 		dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2929 		dr->func[i].win_slot = relations->func[i].win_slot;
2930 		dr->func[i].ser = relations->func[i].ser;
2931 		dr->func[i].flags = relations->func[i].flags;
2932 		dr->func[i].virtual_numa_node =
2933 			relations->func[i].virtual_numa_node;
2934 	}
2935 
2936 	if (hv_pci_start_relations_work(hbus, dr))
2937 		kfree(dr);
2938 }
2939 
2940 /**
2941  * hv_eject_device_work() - Asynchronously handles ejection
2942  * @work:	Work struct embedded in internal device struct
2943  *
2944  * This function handles ejecting a device.  Windows will
2945  * attempt to gracefully eject a device, waiting 60 seconds to
2946  * hear back from the guest OS that this completed successfully.
2947  * If this timer expires, the device will be forcibly removed.
2948  */
hv_eject_device_work(struct work_struct * work)2949 static void hv_eject_device_work(struct work_struct *work)
2950 {
2951 	struct pci_eject_response *ejct_pkt;
2952 	struct hv_pcibus_device *hbus;
2953 	struct hv_pci_dev *hpdev;
2954 	struct pci_dev *pdev;
2955 	unsigned long flags;
2956 	int wslot;
2957 	struct {
2958 		struct pci_packet pkt;
2959 		u8 buffer[sizeof(struct pci_eject_response)];
2960 	} ctxt;
2961 
2962 	hpdev = container_of(work, struct hv_pci_dev, wrk);
2963 	hbus = hpdev->hbus;
2964 
2965 	mutex_lock(&hbus->state_lock);
2966 
2967 	/*
2968 	 * Ejection can come before or after the PCI bus has been set up, so
2969 	 * attempt to find it and tear down the bus state, if it exists.  This
2970 	 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2971 	 * because hbus->bridge->bus may not exist yet.
2972 	 */
2973 	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2974 	pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2975 	if (pdev) {
2976 		pci_lock_rescan_remove();
2977 		pci_stop_and_remove_bus_device(pdev);
2978 		pci_dev_put(pdev);
2979 		pci_unlock_rescan_remove();
2980 	}
2981 
2982 	spin_lock_irqsave(&hbus->device_list_lock, flags);
2983 	list_del(&hpdev->list_entry);
2984 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2985 
2986 	if (hpdev->pci_slot)
2987 		pci_destroy_slot(hpdev->pci_slot);
2988 
2989 	memset(&ctxt, 0, sizeof(ctxt));
2990 	ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
2991 	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2992 	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2993 	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2994 			 sizeof(*ejct_pkt), 0,
2995 			 VM_PKT_DATA_INBAND, 0);
2996 
2997 	/* For the get_pcichild() in hv_pci_eject_device() */
2998 	put_pcichild(hpdev);
2999 	/* For the two refs got in new_pcichild_device() */
3000 	put_pcichild(hpdev);
3001 	put_pcichild(hpdev);
3002 	/* hpdev has been freed. Do not use it any more. */
3003 
3004 	mutex_unlock(&hbus->state_lock);
3005 }
3006 
3007 /**
3008  * hv_pci_eject_device() - Handles device ejection
3009  * @hpdev:	Internal device tracking struct
3010  *
3011  * This function is invoked when an ejection packet arrives.  It
3012  * just schedules work so that we don't re-enter the packet
3013  * delivery code handling the ejection.
3014  */
hv_pci_eject_device(struct hv_pci_dev * hpdev)3015 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
3016 {
3017 	struct hv_pcibus_device *hbus = hpdev->hbus;
3018 	struct hv_device *hdev = hbus->hdev;
3019 
3020 	if (hbus->state == hv_pcibus_removing) {
3021 		dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
3022 		return;
3023 	}
3024 
3025 	get_pcichild(hpdev);
3026 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
3027 	queue_work(hbus->wq, &hpdev->wrk);
3028 }
3029 
3030 /**
3031  * hv_pci_onchannelcallback() - Handles incoming packets
3032  * @context:	Internal bus tracking struct
3033  *
3034  * This function is invoked whenever the host sends a packet to
3035  * this channel (which is private to this root PCI bus).
3036  */
hv_pci_onchannelcallback(void * context)3037 static void hv_pci_onchannelcallback(void *context)
3038 {
3039 	const int packet_size = 0x100;
3040 	int ret;
3041 	struct hv_pcibus_device *hbus = context;
3042 	struct vmbus_channel *chan = hbus->hdev->channel;
3043 	u32 bytes_recvd;
3044 	u64 req_id, req_addr;
3045 	struct vmpacket_descriptor *desc;
3046 	unsigned char *buffer;
3047 	int bufferlen = packet_size;
3048 	struct pci_packet *comp_packet;
3049 	struct pci_response *response;
3050 	struct pci_incoming_message *new_message;
3051 	struct pci_bus_relations *bus_rel;
3052 	struct pci_bus_relations2 *bus_rel2;
3053 	struct pci_dev_inval_block *inval;
3054 	struct pci_dev_incoming *dev_message;
3055 	struct hv_pci_dev *hpdev;
3056 	unsigned long flags;
3057 
3058 	buffer = kmalloc(bufferlen, GFP_ATOMIC);
3059 	if (!buffer)
3060 		return;
3061 
3062 	while (1) {
3063 		ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
3064 					   &bytes_recvd, &req_id);
3065 
3066 		if (ret == -ENOBUFS) {
3067 			kfree(buffer);
3068 			/* Handle large packet */
3069 			bufferlen = bytes_recvd;
3070 			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
3071 			if (!buffer)
3072 				return;
3073 			continue;
3074 		}
3075 
3076 		/* Zero length indicates there are no more packets. */
3077 		if (ret || !bytes_recvd)
3078 			break;
3079 
3080 		/*
3081 		 * All incoming packets must be at least as large as a
3082 		 * response.
3083 		 */
3084 		if (bytes_recvd <= sizeof(struct pci_response))
3085 			continue;
3086 		desc = (struct vmpacket_descriptor *)buffer;
3087 
3088 		switch (desc->type) {
3089 		case VM_PKT_COMP:
3090 
3091 			lock_requestor(chan, flags);
3092 			req_addr = __vmbus_request_addr_match(chan, req_id,
3093 							      VMBUS_RQST_ADDR_ANY);
3094 			if (req_addr == VMBUS_RQST_ERROR) {
3095 				unlock_requestor(chan, flags);
3096 				dev_err(&hbus->hdev->device,
3097 					"Invalid transaction ID %llx\n",
3098 					req_id);
3099 				break;
3100 			}
3101 			comp_packet = (struct pci_packet *)req_addr;
3102 			response = (struct pci_response *)buffer;
3103 			/*
3104 			 * Call ->completion_func() within the critical section to make
3105 			 * sure that the packet pointer is still valid during the call:
3106 			 * here 'valid' means that there's a task still waiting for the
3107 			 * completion, and that the packet data is still on the waiting
3108 			 * task's stack.  Cf. hv_compose_msi_msg().
3109 			 */
3110 			comp_packet->completion_func(comp_packet->compl_ctxt,
3111 						     response,
3112 						     bytes_recvd);
3113 			unlock_requestor(chan, flags);
3114 			break;
3115 
3116 		case VM_PKT_DATA_INBAND:
3117 
3118 			new_message = (struct pci_incoming_message *)buffer;
3119 			switch (new_message->message_type.type) {
3120 			case PCI_BUS_RELATIONS:
3121 
3122 				bus_rel = (struct pci_bus_relations *)buffer;
3123 				if (bytes_recvd < sizeof(*bus_rel) ||
3124 				    bytes_recvd <
3125 					struct_size(bus_rel, func,
3126 						    bus_rel->device_count)) {
3127 					dev_err(&hbus->hdev->device,
3128 						"bus relations too small\n");
3129 					break;
3130 				}
3131 
3132 				hv_pci_devices_present(hbus, bus_rel);
3133 				break;
3134 
3135 			case PCI_BUS_RELATIONS2:
3136 
3137 				bus_rel2 = (struct pci_bus_relations2 *)buffer;
3138 				if (bytes_recvd < sizeof(*bus_rel2) ||
3139 				    bytes_recvd <
3140 					struct_size(bus_rel2, func,
3141 						    bus_rel2->device_count)) {
3142 					dev_err(&hbus->hdev->device,
3143 						"bus relations v2 too small\n");
3144 					break;
3145 				}
3146 
3147 				hv_pci_devices_present2(hbus, bus_rel2);
3148 				break;
3149 
3150 			case PCI_EJECT:
3151 
3152 				dev_message = (struct pci_dev_incoming *)buffer;
3153 				if (bytes_recvd < sizeof(*dev_message)) {
3154 					dev_err(&hbus->hdev->device,
3155 						"eject message too small\n");
3156 					break;
3157 				}
3158 				hpdev = get_pcichild_wslot(hbus,
3159 						      dev_message->wslot.slot);
3160 				if (hpdev) {
3161 					hv_pci_eject_device(hpdev);
3162 					put_pcichild(hpdev);
3163 				}
3164 				break;
3165 
3166 			case PCI_INVALIDATE_BLOCK:
3167 
3168 				inval = (struct pci_dev_inval_block *)buffer;
3169 				if (bytes_recvd < sizeof(*inval)) {
3170 					dev_err(&hbus->hdev->device,
3171 						"invalidate message too small\n");
3172 					break;
3173 				}
3174 				hpdev = get_pcichild_wslot(hbus,
3175 							   inval->wslot.slot);
3176 				if (hpdev) {
3177 					if (hpdev->block_invalidate) {
3178 						hpdev->block_invalidate(
3179 						    hpdev->invalidate_context,
3180 						    inval->block_mask);
3181 					}
3182 					put_pcichild(hpdev);
3183 				}
3184 				break;
3185 
3186 			default:
3187 				dev_warn(&hbus->hdev->device,
3188 					"Unimplemented protocol message %x\n",
3189 					new_message->message_type.type);
3190 				break;
3191 			}
3192 			break;
3193 
3194 		default:
3195 			dev_err(&hbus->hdev->device,
3196 				"unhandled packet type %d, tid %llx len %d\n",
3197 				desc->type, req_id, bytes_recvd);
3198 			break;
3199 		}
3200 	}
3201 
3202 	kfree(buffer);
3203 }
3204 
3205 /**
3206  * hv_pci_protocol_negotiation() - Set up protocol
3207  * @hdev:		VMBus's tracking struct for this root PCI bus.
3208  * @version:		Array of supported channel protocol versions in
3209  *			the order of probing - highest go first.
3210  * @num_version:	Number of elements in the version array.
3211  *
3212  * This driver is intended to support running on Windows 10
3213  * (server) and later versions. It will not run on earlier
3214  * versions, as they assume that many of the operations which
3215  * Linux needs accomplished with a spinlock held were done via
3216  * asynchronous messaging via VMBus.  Windows 10 increases the
3217  * surface area of PCI emulation so that these actions can take
3218  * place by suspending a virtual processor for their duration.
3219  *
3220  * This function negotiates the channel protocol version,
3221  * failing if the host doesn't support the necessary protocol
3222  * level.
3223  */
hv_pci_protocol_negotiation(struct hv_device * hdev,enum pci_protocol_version_t version[],int num_version)3224 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3225 				       enum pci_protocol_version_t version[],
3226 				       int num_version)
3227 {
3228 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3229 	struct pci_version_request *version_req;
3230 	struct hv_pci_compl comp_pkt;
3231 	struct pci_packet *pkt;
3232 	int ret;
3233 	int i;
3234 
3235 	/*
3236 	 * Initiate the handshake with the host and negotiate
3237 	 * a version that the host can support. We start with the
3238 	 * highest version number and go down if the host cannot
3239 	 * support it.
3240 	 */
3241 	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3242 	if (!pkt)
3243 		return -ENOMEM;
3244 
3245 	init_completion(&comp_pkt.host_event);
3246 	pkt->completion_func = hv_pci_generic_compl;
3247 	pkt->compl_ctxt = &comp_pkt;
3248 	version_req = (struct pci_version_request *)(pkt + 1);
3249 	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3250 
3251 	for (i = 0; i < num_version; i++) {
3252 		version_req->protocol_version = version[i];
3253 		ret = vmbus_sendpacket(hdev->channel, version_req,
3254 				sizeof(struct pci_version_request),
3255 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
3256 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3257 		if (!ret)
3258 			ret = wait_for_response(hdev, &comp_pkt.host_event);
3259 
3260 		if (ret) {
3261 			dev_err(&hdev->device,
3262 				"PCI Pass-through VSP failed to request version: %d",
3263 				ret);
3264 			goto exit;
3265 		}
3266 
3267 		if (comp_pkt.completion_status >= 0) {
3268 			hbus->protocol_version = version[i];
3269 			dev_info(&hdev->device,
3270 				"PCI VMBus probing: Using version %#x\n",
3271 				hbus->protocol_version);
3272 			goto exit;
3273 		}
3274 
3275 		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3276 			dev_err(&hdev->device,
3277 				"PCI Pass-through VSP failed version request: %#x",
3278 				comp_pkt.completion_status);
3279 			ret = -EPROTO;
3280 			goto exit;
3281 		}
3282 
3283 		reinit_completion(&comp_pkt.host_event);
3284 	}
3285 
3286 	dev_err(&hdev->device,
3287 		"PCI pass-through VSP failed to find supported version");
3288 	ret = -EPROTO;
3289 
3290 exit:
3291 	kfree(pkt);
3292 	return ret;
3293 }
3294 
3295 /**
3296  * hv_pci_free_bridge_windows() - Release memory regions for the
3297  * bus
3298  * @hbus:	Root PCI bus, as understood by this driver
3299  */
hv_pci_free_bridge_windows(struct hv_pcibus_device * hbus)3300 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3301 {
3302 	/*
3303 	 * Set the resources back to the way they looked when they
3304 	 * were allocated by setting IORESOURCE_BUSY again.
3305 	 */
3306 
3307 	if (hbus->low_mmio_space && hbus->low_mmio_res) {
3308 		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3309 		vmbus_free_mmio(hbus->low_mmio_res->start,
3310 				resource_size(hbus->low_mmio_res));
3311 	}
3312 
3313 	if (hbus->high_mmio_space && hbus->high_mmio_res) {
3314 		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3315 		vmbus_free_mmio(hbus->high_mmio_res->start,
3316 				resource_size(hbus->high_mmio_res));
3317 	}
3318 }
3319 
3320 /**
3321  * hv_pci_allocate_bridge_windows() - Allocate memory regions
3322  * for the bus
3323  * @hbus:	Root PCI bus, as understood by this driver
3324  *
3325  * This function calls vmbus_allocate_mmio(), which is itself a
3326  * bit of a compromise.  Ideally, we might change the pnp layer
3327  * in the kernel such that it comprehends either PCI devices
3328  * which are "grandchildren of ACPI," with some intermediate bus
3329  * node (in this case, VMBus) or change it such that it
3330  * understands VMBus.  The pnp layer, however, has been declared
3331  * deprecated, and not subject to change.
3332  *
3333  * The workaround, implemented here, is to ask VMBus to allocate
3334  * MMIO space for this bus.  VMBus itself knows which ranges are
3335  * appropriate by looking at its own ACPI objects.  Then, after
3336  * these ranges are claimed, they're modified to look like they
3337  * would have looked if the ACPI and pnp code had allocated
3338  * bridge windows.  These descriptors have to exist in this form
3339  * in order to satisfy the code which will get invoked when the
3340  * endpoint PCI function driver calls request_mem_region() or
3341  * request_mem_region_exclusive().
3342  *
3343  * Return: 0 on success, -errno on failure
3344  */
hv_pci_allocate_bridge_windows(struct hv_pcibus_device * hbus)3345 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3346 {
3347 	resource_size_t align;
3348 	int ret;
3349 
3350 	if (hbus->low_mmio_space) {
3351 		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3352 		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3353 					  (u64)(u32)0xffffffff,
3354 					  hbus->low_mmio_space,
3355 					  align, false);
3356 		if (ret) {
3357 			dev_err(&hbus->hdev->device,
3358 				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3359 				hbus->low_mmio_space);
3360 			return ret;
3361 		}
3362 
3363 		/* Modify this resource to become a bridge window. */
3364 		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3365 		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3366 		pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3367 	}
3368 
3369 	if (hbus->high_mmio_space) {
3370 		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3371 		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3372 					  0x100000000, -1,
3373 					  hbus->high_mmio_space, align,
3374 					  false);
3375 		if (ret) {
3376 			dev_err(&hbus->hdev->device,
3377 				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3378 				hbus->high_mmio_space);
3379 			goto release_low_mmio;
3380 		}
3381 
3382 		/* Modify this resource to become a bridge window. */
3383 		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3384 		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3385 		pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3386 	}
3387 
3388 	return 0;
3389 
3390 release_low_mmio:
3391 	if (hbus->low_mmio_res) {
3392 		vmbus_free_mmio(hbus->low_mmio_res->start,
3393 				resource_size(hbus->low_mmio_res));
3394 	}
3395 
3396 	return ret;
3397 }
3398 
3399 /**
3400  * hv_allocate_config_window() - Find MMIO space for PCI Config
3401  * @hbus:	Root PCI bus, as understood by this driver
3402  *
3403  * This function claims memory-mapped I/O space for accessing
3404  * configuration space for the functions on this bus.
3405  *
3406  * Return: 0 on success, -errno on failure
3407  */
hv_allocate_config_window(struct hv_pcibus_device * hbus)3408 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3409 {
3410 	int ret;
3411 
3412 	/*
3413 	 * Set up a region of MMIO space to use for accessing configuration
3414 	 * space.
3415 	 */
3416 	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3417 				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3418 	if (ret)
3419 		return ret;
3420 
3421 	/*
3422 	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3423 	 * resource claims (those which cannot be overlapped) and the ranges
3424 	 * which are valid for the children of this bus, which are intended
3425 	 * to be overlapped by those children.  Set the flag on this claim
3426 	 * meaning that this region can't be overlapped.
3427 	 */
3428 
3429 	hbus->mem_config->flags |= IORESOURCE_BUSY;
3430 
3431 	return 0;
3432 }
3433 
hv_free_config_window(struct hv_pcibus_device * hbus)3434 static void hv_free_config_window(struct hv_pcibus_device *hbus)
3435 {
3436 	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3437 }
3438 
3439 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3440 
3441 /**
3442  * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3443  * @hdev:	VMBus's tracking struct for this root PCI bus
3444  *
3445  * Return: 0 on success, -errno on failure
3446  */
hv_pci_enter_d0(struct hv_device * hdev)3447 static int hv_pci_enter_d0(struct hv_device *hdev)
3448 {
3449 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3450 	struct pci_bus_d0_entry *d0_entry;
3451 	struct hv_pci_compl comp_pkt;
3452 	struct pci_packet *pkt;
3453 	bool retry = true;
3454 	int ret;
3455 
3456 enter_d0_retry:
3457 	/*
3458 	 * Tell the host that the bus is ready to use, and moved into the
3459 	 * powered-on state.  This includes telling the host which region
3460 	 * of memory-mapped I/O space has been chosen for configuration space
3461 	 * access.
3462 	 */
3463 	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3464 	if (!pkt)
3465 		return -ENOMEM;
3466 
3467 	init_completion(&comp_pkt.host_event);
3468 	pkt->completion_func = hv_pci_generic_compl;
3469 	pkt->compl_ctxt = &comp_pkt;
3470 	d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
3471 	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3472 	d0_entry->mmio_base = hbus->mem_config->start;
3473 
3474 	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3475 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
3476 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3477 	if (!ret)
3478 		ret = wait_for_response(hdev, &comp_pkt.host_event);
3479 
3480 	if (ret)
3481 		goto exit;
3482 
3483 	/*
3484 	 * In certain case (Kdump) the pci device of interest was
3485 	 * not cleanly shut down and resource is still held on host
3486 	 * side, the host could return invalid device status.
3487 	 * We need to explicitly request host to release the resource
3488 	 * and try to enter D0 again.
3489 	 */
3490 	if (comp_pkt.completion_status < 0 && retry) {
3491 		retry = false;
3492 
3493 		dev_err(&hdev->device, "Retrying D0 Entry\n");
3494 
3495 		/*
3496 		 * Hv_pci_bus_exit() calls hv_send_resource_released()
3497 		 * to free up resources of its child devices.
3498 		 * In the kdump kernel we need to set the
3499 		 * wslot_res_allocated to 255 so it scans all child
3500 		 * devices to release resources allocated in the
3501 		 * normal kernel before panic happened.
3502 		 */
3503 		hbus->wslot_res_allocated = 255;
3504 
3505 		ret = hv_pci_bus_exit(hdev, true);
3506 
3507 		if (ret == 0) {
3508 			kfree(pkt);
3509 			goto enter_d0_retry;
3510 		}
3511 		dev_err(&hdev->device,
3512 			"Retrying D0 failed with ret %d\n", ret);
3513 	}
3514 
3515 	if (comp_pkt.completion_status < 0) {
3516 		dev_err(&hdev->device,
3517 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
3518 			comp_pkt.completion_status);
3519 		ret = -EPROTO;
3520 		goto exit;
3521 	}
3522 
3523 	ret = 0;
3524 
3525 exit:
3526 	kfree(pkt);
3527 	return ret;
3528 }
3529 
3530 /**
3531  * hv_pci_query_relations() - Ask host to send list of child
3532  * devices
3533  * @hdev:	VMBus's tracking struct for this root PCI bus
3534  *
3535  * Return: 0 on success, -errno on failure
3536  */
hv_pci_query_relations(struct hv_device * hdev)3537 static int hv_pci_query_relations(struct hv_device *hdev)
3538 {
3539 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3540 	struct pci_message message;
3541 	struct completion comp;
3542 	int ret;
3543 
3544 	/* Ask the host to send along the list of child devices */
3545 	init_completion(&comp);
3546 	if (cmpxchg(&hbus->survey_event, NULL, &comp))
3547 		return -ENOTEMPTY;
3548 
3549 	memset(&message, 0, sizeof(message));
3550 	message.type = PCI_QUERY_BUS_RELATIONS;
3551 
3552 	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3553 			       0, VM_PKT_DATA_INBAND, 0);
3554 	if (!ret)
3555 		ret = wait_for_response(hdev, &comp);
3556 
3557 	/*
3558 	 * In the case of fast device addition/removal, it's possible that
3559 	 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
3560 	 * already got a PCI_BUS_RELATIONS* message from the host and the
3561 	 * channel callback already scheduled a work to hbus->wq, which can be
3562 	 * running pci_devices_present_work() -> survey_child_resources() ->
3563 	 * complete(&hbus->survey_event), even after hv_pci_query_relations()
3564 	 * exits and the stack variable 'comp' is no longer valid; as a result,
3565 	 * a hang or a page fault may happen when the complete() calls
3566 	 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
3567 	 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
3568 	 * -ENODEV, there can't be any more work item scheduled to hbus->wq
3569 	 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
3570 	 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
3571 	 * channel->rescind = true.
3572 	 */
3573 	flush_workqueue(hbus->wq);
3574 
3575 	return ret;
3576 }
3577 
3578 /**
3579  * hv_send_resources_allocated() - Report local resource choices
3580  * @hdev:	VMBus's tracking struct for this root PCI bus
3581  *
3582  * The host OS is expecting to be sent a request as a message
3583  * which contains all the resources that the device will use.
3584  * The response contains those same resources, "translated"
3585  * which is to say, the values which should be used by the
3586  * hardware, when it delivers an interrupt.  (MMIO resources are
3587  * used in local terms.)  This is nice for Windows, and lines up
3588  * with the FDO/PDO split, which doesn't exist in Linux.  Linux
3589  * is deeply expecting to scan an emulated PCI configuration
3590  * space.  So this message is sent here only to drive the state
3591  * machine on the host forward.
3592  *
3593  * Return: 0 on success, -errno on failure
3594  */
hv_send_resources_allocated(struct hv_device * hdev)3595 static int hv_send_resources_allocated(struct hv_device *hdev)
3596 {
3597 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3598 	struct pci_resources_assigned *res_assigned;
3599 	struct pci_resources_assigned2 *res_assigned2;
3600 	struct hv_pci_compl comp_pkt;
3601 	struct hv_pci_dev *hpdev;
3602 	struct pci_packet *pkt;
3603 	size_t size_res;
3604 	int wslot;
3605 	int ret;
3606 
3607 	size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3608 			? sizeof(*res_assigned) : sizeof(*res_assigned2);
3609 
3610 	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3611 	if (!pkt)
3612 		return -ENOMEM;
3613 
3614 	ret = 0;
3615 
3616 	for (wslot = 0; wslot < 256; wslot++) {
3617 		hpdev = get_pcichild_wslot(hbus, wslot);
3618 		if (!hpdev)
3619 			continue;
3620 
3621 		memset(pkt, 0, sizeof(*pkt) + size_res);
3622 		init_completion(&comp_pkt.host_event);
3623 		pkt->completion_func = hv_pci_generic_compl;
3624 		pkt->compl_ctxt = &comp_pkt;
3625 
3626 		if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3627 			res_assigned =
3628 				(struct pci_resources_assigned *)(pkt + 1);
3629 			res_assigned->message_type.type =
3630 				PCI_RESOURCES_ASSIGNED;
3631 			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3632 		} else {
3633 			res_assigned2 =
3634 				(struct pci_resources_assigned2 *)(pkt + 1);
3635 			res_assigned2->message_type.type =
3636 				PCI_RESOURCES_ASSIGNED2;
3637 			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3638 		}
3639 		put_pcichild(hpdev);
3640 
3641 		ret = vmbus_sendpacket(hdev->channel, pkt + 1,
3642 				size_res, (unsigned long)pkt,
3643 				VM_PKT_DATA_INBAND,
3644 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3645 		if (!ret)
3646 			ret = wait_for_response(hdev, &comp_pkt.host_event);
3647 		if (ret)
3648 			break;
3649 
3650 		if (comp_pkt.completion_status < 0) {
3651 			ret = -EPROTO;
3652 			dev_err(&hdev->device,
3653 				"resource allocated returned 0x%x",
3654 				comp_pkt.completion_status);
3655 			break;
3656 		}
3657 
3658 		hbus->wslot_res_allocated = wslot;
3659 	}
3660 
3661 	kfree(pkt);
3662 	return ret;
3663 }
3664 
3665 /**
3666  * hv_send_resources_released() - Report local resources
3667  * released
3668  * @hdev:	VMBus's tracking struct for this root PCI bus
3669  *
3670  * Return: 0 on success, -errno on failure
3671  */
hv_send_resources_released(struct hv_device * hdev)3672 static int hv_send_resources_released(struct hv_device *hdev)
3673 {
3674 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3675 	struct pci_child_message pkt;
3676 	struct hv_pci_dev *hpdev;
3677 	int wslot;
3678 	int ret;
3679 
3680 	for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3681 		hpdev = get_pcichild_wslot(hbus, wslot);
3682 		if (!hpdev)
3683 			continue;
3684 
3685 		memset(&pkt, 0, sizeof(pkt));
3686 		pkt.message_type.type = PCI_RESOURCES_RELEASED;
3687 		pkt.wslot.slot = hpdev->desc.win_slot.slot;
3688 
3689 		put_pcichild(hpdev);
3690 
3691 		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3692 				       VM_PKT_DATA_INBAND, 0);
3693 		if (ret)
3694 			return ret;
3695 
3696 		hbus->wslot_res_allocated = wslot - 1;
3697 	}
3698 
3699 	hbus->wslot_res_allocated = -1;
3700 
3701 	return 0;
3702 }
3703 
3704 /**
3705  * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3706  * @hdev:	VMBus's tracking struct for this root PCI bus
3707  * @dev_id:	Identifies the device itself
3708  *
3709  * Return: 0 on success, -errno on failure
3710  */
hv_pci_probe(struct hv_device * hdev,const struct hv_vmbus_device_id * dev_id)3711 static int hv_pci_probe(struct hv_device *hdev,
3712 			const struct hv_vmbus_device_id *dev_id)
3713 {
3714 	struct pci_host_bridge *bridge;
3715 	struct hv_pcibus_device *hbus;
3716 	int ret, dom;
3717 	u16 dom_req;
3718 	char *name;
3719 
3720 	bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3721 	if (!bridge)
3722 		return -ENOMEM;
3723 
3724 	hbus = kzalloc_obj(*hbus);
3725 	if (!hbus)
3726 		return -ENOMEM;
3727 
3728 	hbus->bridge = bridge;
3729 	mutex_init(&hbus->state_lock);
3730 	hbus->state = hv_pcibus_init;
3731 	hbus->wslot_res_allocated = -1;
3732 
3733 	/*
3734 	 * The PCI bus "domain" is what is called "segment" in ACPI and other
3735 	 * specs. Pull it from the instance ID, to get something usually
3736 	 * unique. In rare cases of collision, we will find out another number
3737 	 * not in use.
3738 	 *
3739 	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3740 	 * together with this guest driver can guarantee that (1) The only
3741 	 * domain used by Gen1 VMs for something that looks like a physical
3742 	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3743 	 * (2) There will be no overlap between domains (after fixing possible
3744 	 * collisions) in the same VM.
3745 	 *
3746 	 * Because Gen1 VMs use domain 0, don't allow picking domain 0 here,
3747 	 * even if bytes 4 and 5 of the instance GUID are both zero. For wider
3748 	 * userspace compatibility, limit the domain ID to a 16-bit value.
3749 	 */
3750 	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3751 	dom = pci_bus_find_emul_domain_nr(dom_req, 1, U16_MAX);
3752 	if (dom < 0) {
3753 		dev_err(&hdev->device,
3754 			"Unable to use dom# 0x%x or other numbers", dom_req);
3755 		ret = -EINVAL;
3756 		goto free_bus;
3757 	}
3758 
3759 	if (dom != dom_req)
3760 		dev_info(&hdev->device,
3761 			 "PCI dom# 0x%x has collision, using 0x%x",
3762 			 dom_req, dom);
3763 
3764 	hbus->bridge->domain_nr = dom;
3765 #ifdef CONFIG_X86
3766 	hbus->sysdata.domain = dom;
3767 	hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3768 #elif defined(CONFIG_ARM64)
3769 	/*
3770 	 * Set the PCI bus parent to be the corresponding VMbus
3771 	 * device. Then the VMbus device will be assigned as the
3772 	 * ACPI companion in pcibios_root_bridge_prepare() and
3773 	 * pci_dma_configure() will propagate device coherence
3774 	 * information to devices created on the bus.
3775 	 */
3776 	hbus->sysdata.parent = hdev->device.parent;
3777 	hbus->use_calls = false;
3778 #endif
3779 
3780 	hbus->hdev = hdev;
3781 	INIT_LIST_HEAD(&hbus->children);
3782 	INIT_LIST_HEAD(&hbus->dr_list);
3783 	spin_lock_init(&hbus->config_lock);
3784 	spin_lock_init(&hbus->device_list_lock);
3785 	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3786 					   hbus->bridge->domain_nr);
3787 	if (!hbus->wq) {
3788 		ret = -ENOMEM;
3789 		goto free_bus;
3790 	}
3791 
3792 	hdev->channel->next_request_id_callback = vmbus_next_request_id;
3793 	hdev->channel->request_addr_callback = vmbus_request_addr;
3794 	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3795 
3796 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3797 			 hv_pci_onchannelcallback, hbus);
3798 	if (ret)
3799 		goto destroy_wq;
3800 
3801 	hv_set_drvdata(hdev, hbus);
3802 
3803 	ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3804 					  ARRAY_SIZE(pci_protocol_versions));
3805 	if (ret)
3806 		goto close;
3807 
3808 	ret = hv_allocate_config_window(hbus);
3809 	if (ret)
3810 		goto close;
3811 
3812 	hbus->cfg_addr = ioremap(hbus->mem_config->start,
3813 				 PCI_CONFIG_MMIO_LENGTH);
3814 	if (!hbus->cfg_addr) {
3815 		dev_err(&hdev->device,
3816 			"Unable to map a virtual address for config space\n");
3817 		ret = -ENOMEM;
3818 		goto free_config;
3819 	}
3820 
3821 	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3822 	if (!name) {
3823 		ret = -ENOMEM;
3824 		goto unmap;
3825 	}
3826 
3827 	hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3828 	kfree(name);
3829 	if (!hbus->fwnode) {
3830 		ret = -ENOMEM;
3831 		goto unmap;
3832 	}
3833 
3834 	ret = hv_pcie_init_irq_domain(hbus);
3835 	if (ret)
3836 		goto free_fwnode;
3837 
3838 	ret = hv_pci_query_relations(hdev);
3839 	if (ret)
3840 		goto free_irq_domain;
3841 
3842 	mutex_lock(&hbus->state_lock);
3843 
3844 	ret = hv_pci_enter_d0(hdev);
3845 	if (ret)
3846 		goto release_state_lock;
3847 
3848 	ret = hv_pci_allocate_bridge_windows(hbus);
3849 	if (ret)
3850 		goto exit_d0;
3851 
3852 	ret = hv_send_resources_allocated(hdev);
3853 	if (ret)
3854 		goto free_windows;
3855 
3856 	prepopulate_bars(hbus);
3857 
3858 	hbus->state = hv_pcibus_probed;
3859 
3860 	ret = create_root_hv_pci_bus(hbus);
3861 	if (ret)
3862 		goto free_windows;
3863 
3864 	mutex_unlock(&hbus->state_lock);
3865 	return 0;
3866 
3867 free_windows:
3868 	hv_pci_free_bridge_windows(hbus);
3869 exit_d0:
3870 	(void) hv_pci_bus_exit(hdev, true);
3871 release_state_lock:
3872 	mutex_unlock(&hbus->state_lock);
3873 free_irq_domain:
3874 	irq_domain_remove(hbus->irq_domain);
3875 free_fwnode:
3876 	irq_domain_free_fwnode(hbus->fwnode);
3877 unmap:
3878 	iounmap(hbus->cfg_addr);
3879 free_config:
3880 	hv_free_config_window(hbus);
3881 close:
3882 	vmbus_close(hdev->channel);
3883 destroy_wq:
3884 	destroy_workqueue(hbus->wq);
3885 free_bus:
3886 	kfree(hbus);
3887 	return ret;
3888 }
3889 
hv_pci_bus_exit(struct hv_device * hdev,bool keep_devs)3890 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3891 {
3892 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3893 	struct vmbus_channel *chan = hdev->channel;
3894 	struct {
3895 		struct pci_packet teardown_packet;
3896 		u8 buffer[sizeof(struct pci_message)];
3897 	} pkt;
3898 	struct pci_message *msg;
3899 	struct hv_pci_compl comp_pkt;
3900 	struct hv_pci_dev *hpdev, *tmp;
3901 	unsigned long flags;
3902 	u64 trans_id;
3903 	int ret;
3904 
3905 	/*
3906 	 * After the host sends the RESCIND_CHANNEL message, it doesn't
3907 	 * access the per-channel ringbuffer any longer.
3908 	 */
3909 	if (chan->rescind)
3910 		return 0;
3911 
3912 	if (!keep_devs) {
3913 		struct list_head removed;
3914 
3915 		/* Move all present children to the list on stack */
3916 		INIT_LIST_HEAD(&removed);
3917 		spin_lock_irqsave(&hbus->device_list_lock, flags);
3918 		list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3919 			list_move_tail(&hpdev->list_entry, &removed);
3920 		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3921 
3922 		/* Remove all children in the list */
3923 		list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3924 			list_del(&hpdev->list_entry);
3925 			if (hpdev->pci_slot)
3926 				pci_destroy_slot(hpdev->pci_slot);
3927 			/* For the two refs got in new_pcichild_device() */
3928 			put_pcichild(hpdev);
3929 			put_pcichild(hpdev);
3930 		}
3931 	}
3932 
3933 	ret = hv_send_resources_released(hdev);
3934 	if (ret) {
3935 		dev_err(&hdev->device,
3936 			"Couldn't send resources released packet(s)\n");
3937 		return ret;
3938 	}
3939 
3940 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3941 	init_completion(&comp_pkt.host_event);
3942 	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3943 	pkt.teardown_packet.compl_ctxt = &comp_pkt;
3944 	msg = (struct pci_message *)pkt.buffer;
3945 	msg->type = PCI_BUS_D0EXIT;
3946 
3947 	ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
3948 				     (unsigned long)&pkt.teardown_packet,
3949 				     &trans_id, VM_PKT_DATA_INBAND,
3950 				     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3951 	if (ret)
3952 		return ret;
3953 
3954 	if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3955 		/*
3956 		 * The completion packet on the stack becomes invalid after
3957 		 * 'return'; remove the ID from the VMbus requestor if the
3958 		 * identifier is still mapped to/associated with the packet.
3959 		 *
3960 		 * Cf. hv_pci_onchannelcallback().
3961 		 */
3962 		vmbus_request_addr_match(chan, trans_id,
3963 					 (unsigned long)&pkt.teardown_packet);
3964 		return -ETIMEDOUT;
3965 	}
3966 
3967 	return 0;
3968 }
3969 
3970 /**
3971  * hv_pci_remove() - Remove routine for this VMBus channel
3972  * @hdev:	VMBus's tracking struct for this root PCI bus
3973  */
hv_pci_remove(struct hv_device * hdev)3974 static void hv_pci_remove(struct hv_device *hdev)
3975 {
3976 	struct hv_pcibus_device *hbus;
3977 
3978 	hbus = hv_get_drvdata(hdev);
3979 	if (hbus->state == hv_pcibus_installed) {
3980 		tasklet_disable(&hdev->channel->callback_event);
3981 		hbus->state = hv_pcibus_removing;
3982 		tasklet_enable(&hdev->channel->callback_event);
3983 		destroy_workqueue(hbus->wq);
3984 		hbus->wq = NULL;
3985 		/*
3986 		 * At this point, no work is running or can be scheduled
3987 		 * on hbus-wq. We can't race with hv_pci_devices_present()
3988 		 * or hv_pci_eject_device(), it's safe to proceed.
3989 		 */
3990 
3991 		/* Remove the bus from PCI's point of view. */
3992 		pci_lock_rescan_remove();
3993 		pci_stop_root_bus(hbus->bridge->bus);
3994 		hv_pci_remove_slots(hbus);
3995 		pci_remove_root_bus(hbus->bridge->bus);
3996 		pci_unlock_rescan_remove();
3997 	}
3998 
3999 	hv_pci_bus_exit(hdev, false);
4000 
4001 	vmbus_close(hdev->channel);
4002 
4003 	iounmap(hbus->cfg_addr);
4004 	hv_free_config_window(hbus);
4005 	hv_pci_free_bridge_windows(hbus);
4006 	irq_domain_remove(hbus->irq_domain);
4007 	irq_domain_free_fwnode(hbus->fwnode);
4008 
4009 	kfree(hbus);
4010 }
4011 
hv_pci_suspend(struct hv_device * hdev)4012 static int hv_pci_suspend(struct hv_device *hdev)
4013 {
4014 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4015 	enum hv_pcibus_state old_state;
4016 	int ret;
4017 
4018 	/*
4019 	 * hv_pci_suspend() must make sure there are no pending work items
4020 	 * before calling vmbus_close(), since it runs in a process context
4021 	 * as a callback in dpm_suspend().  When it starts to run, the channel
4022 	 * callback hv_pci_onchannelcallback(), which runs in a tasklet
4023 	 * context, can be still running concurrently and scheduling new work
4024 	 * items onto hbus->wq in hv_pci_devices_present() and
4025 	 * hv_pci_eject_device(), and the work item handlers can access the
4026 	 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
4027 	 * the work item handler pci_devices_present_work() ->
4028 	 * new_pcichild_device() writes to the vmbus channel.
4029 	 *
4030 	 * To eliminate the race, hv_pci_suspend() disables the channel
4031 	 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
4032 	 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
4033 	 * it knows that no new work item can be scheduled, and then it flushes
4034 	 * hbus->wq and safely closes the vmbus channel.
4035 	 */
4036 	tasklet_disable(&hdev->channel->callback_event);
4037 
4038 	/* Change the hbus state to prevent new work items. */
4039 	old_state = hbus->state;
4040 	if (hbus->state == hv_pcibus_installed)
4041 		hbus->state = hv_pcibus_removing;
4042 
4043 	tasklet_enable(&hdev->channel->callback_event);
4044 
4045 	if (old_state != hv_pcibus_installed)
4046 		return -EINVAL;
4047 
4048 	flush_workqueue(hbus->wq);
4049 
4050 	ret = hv_pci_bus_exit(hdev, true);
4051 	if (ret)
4052 		return ret;
4053 
4054 	vmbus_close(hdev->channel);
4055 
4056 	return 0;
4057 }
4058 
hv_pci_restore_msi_msg(struct pci_dev * pdev,void * arg)4059 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
4060 {
4061 	struct irq_data *irq_data;
4062 	struct msi_desc *entry;
4063 
4064 	if (!pdev->msi_enabled && !pdev->msix_enabled)
4065 		return 0;
4066 
4067 	guard(msi_descs_lock)(&pdev->dev);
4068 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
4069 		irq_data = irq_get_irq_data(entry->irq);
4070 		if (WARN_ON_ONCE(!irq_data))
4071 			return -EINVAL;
4072 		hv_compose_msi_msg(irq_data, &entry->msg);
4073 	}
4074 	return 0;
4075 }
4076 
4077 /*
4078  * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
4079  * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
4080  * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
4081  * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
4082  * Table entries.
4083  */
hv_pci_restore_msi_state(struct hv_pcibus_device * hbus)4084 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
4085 {
4086 	pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
4087 }
4088 
hv_pci_resume(struct hv_device * hdev)4089 static int hv_pci_resume(struct hv_device *hdev)
4090 {
4091 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4092 	enum pci_protocol_version_t version[1];
4093 	int ret;
4094 
4095 	hbus->state = hv_pcibus_init;
4096 
4097 	hdev->channel->next_request_id_callback = vmbus_next_request_id;
4098 	hdev->channel->request_addr_callback = vmbus_request_addr;
4099 	hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4100 
4101 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4102 			 hv_pci_onchannelcallback, hbus);
4103 	if (ret)
4104 		return ret;
4105 
4106 	/* Only use the version that was in use before hibernation. */
4107 	version[0] = hbus->protocol_version;
4108 	ret = hv_pci_protocol_negotiation(hdev, version, 1);
4109 	if (ret)
4110 		goto out;
4111 
4112 	ret = hv_pci_query_relations(hdev);
4113 	if (ret)
4114 		goto out;
4115 
4116 	mutex_lock(&hbus->state_lock);
4117 
4118 	ret = hv_pci_enter_d0(hdev);
4119 	if (ret)
4120 		goto release_state_lock;
4121 
4122 	ret = hv_send_resources_allocated(hdev);
4123 	if (ret)
4124 		goto release_state_lock;
4125 
4126 	prepopulate_bars(hbus);
4127 
4128 	hv_pci_restore_msi_state(hbus);
4129 
4130 	hbus->state = hv_pcibus_installed;
4131 	mutex_unlock(&hbus->state_lock);
4132 	return 0;
4133 
4134 release_state_lock:
4135 	mutex_unlock(&hbus->state_lock);
4136 out:
4137 	vmbus_close(hdev->channel);
4138 	return ret;
4139 }
4140 
4141 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4142 	/* PCI Pass-through Class ID */
4143 	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4144 	{ HV_PCIE_GUID, },
4145 	{ },
4146 };
4147 
4148 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4149 
4150 static struct hv_driver hv_pci_drv = {
4151 	.name		= "hv_pci",
4152 	.id_table	= hv_pci_id_table,
4153 	.probe		= hv_pci_probe,
4154 	.remove		= hv_pci_remove,
4155 	.suspend	= hv_pci_suspend,
4156 	.resume		= hv_pci_resume,
4157 };
4158 
exit_hv_pci_drv(void)4159 static void __exit exit_hv_pci_drv(void)
4160 {
4161 	vmbus_driver_unregister(&hv_pci_drv);
4162 
4163 	hvpci_block_ops.read_block = NULL;
4164 	hvpci_block_ops.write_block = NULL;
4165 	hvpci_block_ops.reg_blk_invalidate = NULL;
4166 }
4167 
init_hv_pci_drv(void)4168 static int __init init_hv_pci_drv(void)
4169 {
4170 	int ret;
4171 
4172 	if (!hv_is_hyperv_initialized())
4173 		return -ENODEV;
4174 
4175 	if (hv_root_partition() && !hv_nested)
4176 		return -ENODEV;
4177 
4178 	ret = hv_pci_irqchip_init();
4179 	if (ret)
4180 		return ret;
4181 
4182 	/* Initialize PCI block r/w interface */
4183 	hvpci_block_ops.read_block = hv_read_config_block;
4184 	hvpci_block_ops.write_block = hv_write_config_block;
4185 	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4186 
4187 	return vmbus_driver_register(&hv_pci_drv);
4188 }
4189 
4190 module_init(init_hv_pci_drv);
4191 module_exit(exit_hv_pci_drv);
4192 
4193 MODULE_DESCRIPTION("Hyper-V PCI");
4194 MODULE_LICENSE("GPL v2");
4195