xref: /linux/include/linux/vdpa.h (revision a353e7260b5951a62dce43630ae9265accd96a4b) !
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VDPA_H
3 #define _LINUX_VDPA_H
4 
5 #include <linux/kernel.h>
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/virtio.h>
9 #include <linux/vhost_iotlb.h>
10 #include <linux/virtio_net.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/if_ether.h>
13 
14 /**
15  * struct vdpa_callback - vDPA callback definition.
16  * @callback: interrupt callback function
17  * @private: the data passed to the callback function
18  * @trigger: the eventfd for the callback (Optional).
19  *           When it is set, the vDPA driver must guarantee that
20  *           signaling it is functional equivalent to triggering
21  *           the callback. Then vDPA parent can signal it directly
22  *           instead of triggering the callback.
23  */
24 struct vdpa_callback {
25 	irqreturn_t (*callback)(void *data);
26 	void *private;
27 	struct eventfd_ctx *trigger;
28 };
29 
30 /**
31  * struct vdpa_notification_area - vDPA notification area
32  * @addr: base address of the notification area
33  * @size: size of the notification area
34  */
35 struct vdpa_notification_area {
36 	resource_size_t addr;
37 	resource_size_t size;
38 };
39 
40 /**
41  * struct vdpa_vq_state_split - vDPA split virtqueue state
42  * @avail_index: available index
43  */
44 struct vdpa_vq_state_split {
45 	u16	avail_index;
46 };
47 
48 /**
49  * struct vdpa_vq_state_packed - vDPA packed virtqueue state
50  * @last_avail_counter: last driver ring wrap counter observed by device
51  * @last_avail_idx: device available index
52  * @last_used_counter: device ring wrap counter
53  * @last_used_idx: used index
54  */
55 struct vdpa_vq_state_packed {
56 	u16	last_avail_counter:1;
57 	u16	last_avail_idx:15;
58 	u16	last_used_counter:1;
59 	u16	last_used_idx:15;
60 };
61 
62 struct vdpa_vq_state {
63 	union {
64 		struct vdpa_vq_state_split split;
65 		struct vdpa_vq_state_packed packed;
66 	};
67 };
68 
69 struct vdpa_mgmt_dev;
70 
71 /**
72  * struct vdpa_device - representation of a vDPA device
73  * @dev: underlying device
74  * @vmap: the metadata passed to upper layer to be used for mapping
75  * @driver_override: driver name to force a match; do not set directly,
76  *                   because core frees it; use driver_set_override() to
77  *                   set or clear it.
78  * @config: the configuration ops for this device.
79  * @map: the map ops for this device
80  * @cf_lock: Protects get and set access to configuration layout.
81  * @index: device index
82  * @features_valid: were features initialized? for legacy guests
83  * @ngroups: the number of virtqueue groups
84  * @nas: the number of address spaces
85  * @use_va: indicate whether virtual address must be used by this device
86  * @nvqs: maximum number of supported virtqueues
87  * @mdev: management device pointer; caller must setup when registering device as part
88  *	  of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
89  */
90 struct vdpa_device {
91 	struct device dev;
92 	union virtio_map vmap;
93 	const char *driver_override;
94 	const struct vdpa_config_ops *config;
95 	const struct virtio_map_ops *map;
96 	struct rw_semaphore cf_lock; /* Protects get/set config */
97 	unsigned int index;
98 	bool features_valid;
99 	bool use_va;
100 	u32 nvqs;
101 	struct vdpa_mgmt_dev *mdev;
102 	unsigned int ngroups;
103 	unsigned int nas;
104 };
105 
106 /**
107  * struct vdpa_iova_range - the IOVA range support by the device
108  * @first: start of the IOVA range
109  * @last: end of the IOVA range
110  */
111 struct vdpa_iova_range {
112 	u64 first;
113 	u64 last;
114 };
115 
116 struct vdpa_dev_set_config {
117 	u64 device_features;
118 	struct {
119 		u8 mac[ETH_ALEN];
120 		u16 mtu;
121 		u16 max_vq_pairs;
122 	} net;
123 	u64 mask;
124 };
125 
126 /**
127  * struct vdpa_map_file - file area for device memory mapping
128  * @file: vma->vm_file for the mapping
129  * @offset: mapping offset in the vm_file
130  */
131 struct vdpa_map_file {
132 	struct file *file;
133 	u64 offset;
134 };
135 
136 /**
137  * struct vdpa_config_ops - operations for configuring a vDPA device.
138  * Note: vDPA device drivers are required to implement all of the
139  * operations unless it is mentioned to be optional in the following
140  * list.
141  *
142  * @set_vq_address:		Set the address of virtqueue
143  *				@vdev: vdpa device
144  *				@idx: virtqueue index
145  *				@desc_area: address of desc area
146  *				@driver_area: address of driver area
147  *				@device_area: address of device area
148  *				Returns integer: success (0) or error (< 0)
149  * @set_vq_num:			Set the size of virtqueue
150  *				@vdev: vdpa device
151  *				@idx: virtqueue index
152  *				@num: the size of virtqueue
153  * @kick_vq:			Kick the virtqueue
154  *				@vdev: vdpa device
155  *				@idx: virtqueue index
156  * @kick_vq_with_data:		Kick the virtqueue and supply extra data
157  *				(only if VIRTIO_F_NOTIFICATION_DATA is negotiated)
158  *				@vdev: vdpa device
159  *				@data for split virtqueue:
160  *				16 bits vqn and 16 bits next available index.
161  *				@data for packed virtqueue:
162  *				16 bits vqn, 15 least significant bits of
163  *				next available index and 1 bit next_wrap.
164  * @set_vq_cb:			Set the interrupt callback function for
165  *				a virtqueue
166  *				@vdev: vdpa device
167  *				@idx: virtqueue index
168  *				@cb: virtio-vdev interrupt callback structure
169  * @set_vq_ready:		Set ready status for a virtqueue
170  *				@vdev: vdpa device
171  *				@idx: virtqueue index
172  *				@ready: ready (true) not ready(false)
173  * @get_vq_ready:		Get ready status for a virtqueue
174  *				@vdev: vdpa device
175  *				@idx: virtqueue index
176  *				Returns boolean: ready (true) or not (false)
177  * @set_vq_state:		Set the state for a virtqueue
178  *				@vdev: vdpa device
179  *				@idx: virtqueue index
180  *				@state: pointer to set virtqueue state (last_avail_idx)
181  *				Returns integer: success (0) or error (< 0)
182  * @get_vq_state:		Get the state for a virtqueue
183  *				@vdev: vdpa device
184  *				@idx: virtqueue index
185  *				@state: pointer to returned state (last_avail_idx)
186  * @get_vendor_vq_stats:	Get the vendor statistics of a device.
187  *				@vdev: vdpa device
188  *				@idx: virtqueue index
189  *				@msg: socket buffer holding stats message
190  *				@extack: extack for reporting error messages
191  *				Returns integer: success (0) or error (< 0)
192  * @get_vq_notification:	Get the notification area for a virtqueue (optional)
193  *				@vdev: vdpa device
194  *				@idx: virtqueue index
195  *				Returns the notification area
196  * @get_vq_irq:			Get the irq number of a virtqueue (optional,
197  *				but must implemented if require vq irq offloading)
198  *				@vdev: vdpa device
199  *				@idx: virtqueue index
200  *				Returns int: irq number of a virtqueue,
201  *				negative number if no irq assigned.
202  * @get_vq_size:		Get the size of a specific virtqueue (optional)
203  *				@vdev: vdpa device
204  *				@idx: virtqueue index
205  *				Return u16: the size of the virtqueue
206  * @get_vq_align:		Get the virtqueue align requirement
207  *				for the device
208  *				@vdev: vdpa device
209  *				Returns virtqueue algin requirement
210  * @get_vq_group:		Get the group id for a specific
211  *				virtqueue (optional)
212  *				@vdev: vdpa device
213  *				@idx: virtqueue index
214  *				Returns u32: group id for this virtqueue
215  * @get_vq_desc_group:		Get the group id for the descriptor table of
216  *				a specific virtqueue (optional)
217  *				@vdev: vdpa device
218  *				@idx: virtqueue index
219  *				Returns u32: group id for the descriptor table
220  *				portion of this virtqueue. Could be different
221  *				than the one from @get_vq_group, in which case
222  *				the access to the descriptor table can be
223  *				confined to a separate asid, isolating from
224  *				the virtqueue's buffer address access.
225  * @get_device_features:	Get virtio features supported by the device
226  *				@vdev: vdpa device
227  *				Returns the virtio features support by the
228  *				device
229  * @get_backend_features:	Get parent-specific backend features (optional)
230  *				Returns the vdpa features supported by the
231  *				device.
232  * @set_driver_features:	Set virtio features supported by the driver
233  *				@vdev: vdpa device
234  *				@features: feature support by the driver
235  *				Returns integer: success (0) or error (< 0)
236  * @get_driver_features:	Get the virtio driver features in action
237  *				@vdev: vdpa device
238  *				Returns the virtio features accepted
239  * @set_config_cb:		Set the config interrupt callback
240  *				@vdev: vdpa device
241  *				@cb: virtio-vdev interrupt callback structure
242  * @get_vq_num_max:		Get the max size of virtqueue
243  *				@vdev: vdpa device
244  *				Returns u16: max size of virtqueue
245  * @get_vq_num_min:		Get the min size of virtqueue (optional)
246  *				@vdev: vdpa device
247  *				Returns u16: min size of virtqueue
248  * @get_device_id:		Get virtio device id
249  *				@vdev: vdpa device
250  *				Returns u32: virtio device id
251  * @get_vendor_id:		Get id for the vendor that provides this device
252  *				@vdev: vdpa device
253  *				Returns u32: virtio vendor id
254  * @get_status:			Get the device status
255  *				@vdev: vdpa device
256  *				Returns u8: virtio device status
257  * @set_status:			Set the device status
258  *				@vdev: vdpa device
259  *				@status: virtio device status
260  * @reset:			Reset device
261  *				@vdev: vdpa device
262  *				Returns integer: success (0) or error (< 0)
263  * @compat_reset:		Reset device with compatibility quirks to
264  *				accommodate older userspace. Only needed by
265  *				parent driver which used to have bogus reset
266  *				behaviour, and has to maintain such behaviour
267  *				for compatibility with older userspace.
268  *				Historically compliant driver only has to
269  *				implement .reset, Historically non-compliant
270  *				driver should implement both.
271  *				@vdev: vdpa device
272  *				@flags: compatibility quirks for reset
273  *				Returns integer: success (0) or error (< 0)
274  * @suspend:			Suspend the device (optional)
275  *				@vdev: vdpa device
276  *				Returns integer: success (0) or error (< 0)
277  * @resume:			Resume the device (optional)
278  *				@vdev: vdpa device
279  *				Returns integer: success (0) or error (< 0)
280  * @get_config_size:		Get the size of the configuration space includes
281  *				fields that are conditional on feature bits.
282  *				@vdev: vdpa device
283  *				Returns size_t: configuration size
284  * @get_config:			Read from device specific configuration space
285  *				@vdev: vdpa device
286  *				@offset: offset from the beginning of
287  *				configuration space
288  *				@buf: buffer used to read to
289  *				@len: the length to read from
290  *				configuration space
291  * @set_config:			Write to device specific configuration space
292  *				@vdev: vdpa device
293  *				@offset: offset from the beginning of
294  *				configuration space
295  *				@buf: buffer used to write from
296  *				@len: the length to write to
297  *				configuration space
298  * @get_generation:		Get device config generation (optional)
299  *				@vdev: vdpa device
300  *				Returns u32: device generation
301  * @get_iova_range:		Get supported iova range (optional)
302  *				@vdev: vdpa device
303  *				Returns the iova range supported by
304  *				the device.
305  * @set_vq_affinity:		Set the affinity of virtqueue (optional)
306  *				@vdev: vdpa device
307  *				@idx: virtqueue index
308  *				@cpu_mask: the affinity mask
309  *				Returns integer: success (0) or error (< 0)
310  * @get_vq_affinity:		Get the affinity of virtqueue (optional)
311  *				@vdev: vdpa device
312  *				@idx: virtqueue index
313  *				Returns the affinity mask
314  * @set_group_asid:		Set address space identifier for a
315  *				virtqueue group (optional).  Caller must
316  *				prevent this from being executed concurrently
317  *				with set_status.
318  *				@vdev: vdpa device
319  *				@group: virtqueue group
320  *				@asid: address space id for this group
321  *				Returns integer: success (0) or error (< 0)
322  * @set_map:			Set device memory mapping (optional)
323  *				Needed for device that using device
324  *				specific DMA translation (on-chip IOMMU)
325  *				@vdev: vdpa device
326  *				@asid: address space identifier
327  *				@iotlb: vhost memory mapping to be
328  *				used by the vDPA
329  *				Returns integer: success (0) or error (< 0)
330  * @dma_map:			Map an area of PA to IOVA (optional)
331  *				Needed for device that using device
332  *				specific DMA translation (on-chip IOMMU)
333  *				and preferring incremental map.
334  *				@vdev: vdpa device
335  *				@asid: address space identifier
336  *				@iova: iova to be mapped
337  *				@size: size of the area
338  *				@pa: physical address for the map
339  *				@perm: device access permission (VHOST_MAP_XX)
340  *				Returns integer: success (0) or error (< 0)
341  * @dma_unmap:			Unmap an area of IOVA (optional but
342  *				must be implemented with dma_map)
343  *				Needed for device that using device
344  *				specific DMA translation (on-chip IOMMU)
345  *				and preferring incremental unmap.
346  *				@vdev: vdpa device
347  *				@asid: address space identifier
348  *				@iova: iova to be unmapped
349  *				@size: size of the area
350  *				Returns integer: success (0) or error (< 0)
351  * @reset_map:			Reset device memory mapping to the default
352  *				state (optional)
353  *				Needed for devices that are using device
354  *				specific DMA translation and prefer mapping
355  *				to be decoupled from the virtio life cycle,
356  *				i.e. device .reset op does not reset mapping
357  *				@vdev: vdpa device
358  *				@asid: address space identifier
359  *				Returns integer: success (0) or error (< 0)
360  * @get_vq_map:		Get the map metadata for a specific
361  *				virtqueue (optional)
362  *				@vdev: vdpa device
363  *				@idx: virtqueue index
364  *				Returns map token union error (NULL)
365  * @bind_mm:			Bind the device to a specific address space
366  *				so the vDPA framework can use VA when this
367  *				callback is implemented. (optional)
368  *				@vdev: vdpa device
369  *				@mm: address space to bind
370  * @unbind_mm:			Unbind the device from the address space
371  *				bound using the bind_mm callback. (optional)
372  *				@vdev: vdpa device
373  * @free:			Free resources that belongs to vDPA (optional)
374  *				@vdev: vdpa device
375  */
376 struct vdpa_config_ops {
377 	/* Virtqueue ops */
378 	int (*set_vq_address)(struct vdpa_device *vdev,
379 			      u16 idx, u64 desc_area, u64 driver_area,
380 			      u64 device_area);
381 	void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
382 	void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
383 	void (*kick_vq_with_data)(struct vdpa_device *vdev, u32 data);
384 	void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
385 			  struct vdpa_callback *cb);
386 	void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
387 	bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
388 	int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
389 			    const struct vdpa_vq_state *state);
390 	int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
391 			    struct vdpa_vq_state *state);
392 	int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
393 				   struct sk_buff *msg,
394 				   struct netlink_ext_ack *extack);
395 	struct vdpa_notification_area
396 	(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
397 	/* vq irq is not expected to be changed once DRIVER_OK is set */
398 	int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
399 	u16 (*get_vq_size)(struct vdpa_device *vdev, u16 idx);
400 
401 	/* Device ops */
402 	u32 (*get_vq_align)(struct vdpa_device *vdev);
403 	u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
404 	u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx);
405 	u64 (*get_device_features)(struct vdpa_device *vdev);
406 	u64 (*get_backend_features)(const struct vdpa_device *vdev);
407 	int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
408 	u64 (*get_driver_features)(struct vdpa_device *vdev);
409 	void (*set_config_cb)(struct vdpa_device *vdev,
410 			      struct vdpa_callback *cb);
411 	u16 (*get_vq_num_max)(struct vdpa_device *vdev);
412 	u16 (*get_vq_num_min)(struct vdpa_device *vdev);
413 	u32 (*get_device_id)(struct vdpa_device *vdev);
414 	u32 (*get_vendor_id)(struct vdpa_device *vdev);
415 	u8 (*get_status)(struct vdpa_device *vdev);
416 	void (*set_status)(struct vdpa_device *vdev, u8 status);
417 	int (*reset)(struct vdpa_device *vdev);
418 	int (*compat_reset)(struct vdpa_device *vdev, u32 flags);
419 #define VDPA_RESET_F_CLEAN_MAP 1
420 	int (*suspend)(struct vdpa_device *vdev);
421 	int (*resume)(struct vdpa_device *vdev);
422 	size_t (*get_config_size)(struct vdpa_device *vdev);
423 	void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
424 			   void *buf, unsigned int len);
425 	void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
426 			   const void *buf, unsigned int len);
427 	u32 (*get_generation)(struct vdpa_device *vdev);
428 	struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
429 	int (*set_vq_affinity)(struct vdpa_device *vdev, u16 idx,
430 			       const struct cpumask *cpu_mask);
431 	const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev,
432 						 u16 idx);
433 
434 	/* DMA ops */
435 	int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
436 		       struct vhost_iotlb *iotlb);
437 	int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
438 		       u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
439 	int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
440 			 u64 iova, u64 size);
441 	int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
442 	int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
443 			      unsigned int asid);
444 	union virtio_map (*get_vq_map)(struct vdpa_device *vdev, u16 idx);
445 	int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
446 	void (*unbind_mm)(struct vdpa_device *vdev);
447 
448 	/* Free device resources */
449 	void (*free)(struct vdpa_device *vdev);
450 };
451 
452 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
453 					const struct vdpa_config_ops *config,
454 					const struct virtio_map_ops *map,
455 					unsigned int ngroups, unsigned int nas,
456 					size_t size, const char *name,
457 					bool use_va);
458 
459 /**
460  * vdpa_alloc_device - allocate and initilaize a vDPA device
461  *
462  * @dev_struct: the type of the parent structure
463  * @member: the name of struct vdpa_device within the @dev_struct
464  * @parent: the parent device
465  * @config: the bus operations that is supported by this device
466  * @map: the map operations that is supported by this device
467  * @ngroups: the number of virtqueue groups supported by this device
468  * @nas: the number of address spaces
469  * @name: name of the vdpa device
470  * @use_va: indicate whether virtual address must be used by this device
471  *
472  * Return allocated data structure or ERR_PTR upon error
473  */
474 #define vdpa_alloc_device(dev_struct, member, parent, config, map, \
475 			  ngroups, nas, name, use_va)		   \
476 			  container_of((__vdpa_alloc_device( \
477 				       parent, config, map, ngroups, nas, \
478 				       (sizeof(dev_struct) + \
479 				       BUILD_BUG_ON_ZERO(offsetof( \
480 				       dev_struct, member))), name, use_va)), \
481 				       dev_struct, member)
482 
483 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
484 void vdpa_unregister_device(struct vdpa_device *vdev);
485 
486 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
487 void _vdpa_unregister_device(struct vdpa_device *vdev);
488 
489 /**
490  * struct vdpa_driver - operations for a vDPA driver
491  * @driver: underlying device driver
492  * @probe: the function to call when a device is found.  Returns 0 or -errno.
493  * @remove: the function to call when a device is removed.
494  */
495 struct vdpa_driver {
496 	struct device_driver driver;
497 	int (*probe)(struct vdpa_device *vdev);
498 	void (*remove)(struct vdpa_device *vdev);
499 };
500 
501 #define vdpa_register_driver(drv) \
502 	__vdpa_register_driver(drv, THIS_MODULE)
503 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
504 void vdpa_unregister_driver(struct vdpa_driver *drv);
505 
506 #define module_vdpa_driver(__vdpa_driver) \
507 	module_driver(__vdpa_driver, vdpa_register_driver,	\
508 		      vdpa_unregister_driver)
509 
drv_to_vdpa(struct device_driver * driver)510 static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
511 {
512 	return container_of(driver, struct vdpa_driver, driver);
513 }
514 
dev_to_vdpa(struct device * _dev)515 static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
516 {
517 	return container_of(_dev, struct vdpa_device, dev);
518 }
519 
vdpa_get_drvdata(const struct vdpa_device * vdev)520 static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
521 {
522 	return dev_get_drvdata(&vdev->dev);
523 }
524 
vdpa_set_drvdata(struct vdpa_device * vdev,void * data)525 static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
526 {
527 	dev_set_drvdata(&vdev->dev, data);
528 }
529 
vdpa_get_map(struct vdpa_device * vdev)530 static inline union virtio_map vdpa_get_map(struct vdpa_device *vdev)
531 {
532 	return vdev->vmap;
533 }
534 
vdpa_reset(struct vdpa_device * vdev,u32 flags)535 static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
536 {
537 	const struct vdpa_config_ops *ops = vdev->config;
538 	int ret;
539 
540 	down_write(&vdev->cf_lock);
541 	vdev->features_valid = false;
542 	if (ops->compat_reset && flags)
543 		ret = ops->compat_reset(vdev, flags);
544 	else
545 		ret = ops->reset(vdev);
546 	up_write(&vdev->cf_lock);
547 	return ret;
548 }
549 
vdpa_set_features_unlocked(struct vdpa_device * vdev,u64 features)550 static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
551 {
552 	const struct vdpa_config_ops *ops = vdev->config;
553 	int ret;
554 
555 	vdev->features_valid = true;
556 	ret = ops->set_driver_features(vdev, features);
557 
558 	return ret;
559 }
560 
vdpa_set_features(struct vdpa_device * vdev,u64 features)561 static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
562 {
563 	int ret;
564 
565 	down_write(&vdev->cf_lock);
566 	ret = vdpa_set_features_unlocked(vdev, features);
567 	up_write(&vdev->cf_lock);
568 
569 	return ret;
570 }
571 
572 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
573 		     void *buf, unsigned int len);
574 void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
575 		     const void *buf, unsigned int length);
576 void vdpa_set_status(struct vdpa_device *vdev, u8 status);
577 
578 /**
579  * struct vdpa_mgmtdev_ops - vdpa device ops
580  * @dev_add: Add a vdpa device using alloc and register
581  *	     @mdev: parent device to use for device addition
582  *	     @name: name of the new vdpa device
583  *	     @config: config attributes to apply to the device under creation
584  *	     Driver need to add a new device using _vdpa_register_device()
585  *	     after fully initializing the vdpa device. Driver must return 0
586  *	     on success or appropriate error code.
587  * @dev_del: Remove a vdpa device using unregister
588  *	     @mdev: parent device to use for device removal
589  *	     @dev: vdpa device to remove
590  *	     Driver need to remove the specified device by calling
591  *	     _vdpa_unregister_device().
592  * @dev_set_attr: change a vdpa device's attr after it was create
593  *	     @mdev: parent device to use for device
594  *	     @dev: vdpa device structure
595  *	     @config:Attributes to be set for the device.
596  *	     The driver needs to check the mask of the structure and then set
597  *	     the related information to the vdpa device. The driver must return 0
598  *	     if set successfully.
599  */
600 struct vdpa_mgmtdev_ops {
601 	int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
602 		       const struct vdpa_dev_set_config *config);
603 	void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
604 	int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
605 			    const struct vdpa_dev_set_config *config);
606 };
607 
608 /**
609  * struct vdpa_mgmt_dev - vdpa management device
610  * @device: Management parent device
611  * @ops: operations supported by management device
612  * @id_table: Pointer to device id table of supported ids
613  * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
614  *		      management device support during dev_add callback
615  * @list: list entry
616  * @supported_features: features supported by device
617  * @max_supported_vqs: maximum number of virtqueues supported by device
618  */
619 struct vdpa_mgmt_dev {
620 	struct device *device;
621 	const struct vdpa_mgmtdev_ops *ops;
622 	struct virtio_device_id *id_table;
623 	u64 config_attr_mask;
624 	struct list_head list;
625 	u64 supported_features;
626 	u32 max_supported_vqs;
627 };
628 
629 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
630 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
631 
632 #endif /* _LINUX_VDPA_H */
633