xref: /linux/include/linux/virtio_config.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4 
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11 
12 struct irq_affinity;
13 
14 struct virtio_shm_region {
15 	u64 addr;
16 	u64 len;
17 };
18 
19 typedef void vq_callback_t(struct virtqueue *);
20 
21 /**
22  * struct virtqueue_info - Info for a virtqueue passed to find_vqs().
23  * @name: virtqueue description. Used mainly for debugging, NULL for
24  *        a virtqueue unused by the driver.
25  * @callback: A callback to invoke on a used buffer notification.
26  *            NULL for a virtqueue that does not need a callback.
27  * @ctx: A flag to indicate to maintain an extra context per virtqueue.
28  */
29 struct virtqueue_info {
30 	const char *name;
31 	vq_callback_t *callback;
32 	bool ctx;
33 };
34 
35 /**
36  * struct virtio_config_ops - operations for configuring a virtio device
37  * Note: Do not assume that a transport implements all of the operations
38  *       getting/setting a value as a simple read/write! Generally speaking,
39  *       any of @get/@set, @get_status/@set_status, or @get_features/
40  *       @finalize_features are NOT safe to be called from an atomic
41  *       context.
42  * @get: read the value of a configuration field
43  *	vdev: the virtio_device
44  *	offset: the offset of the configuration field
45  *	buf: the buffer to write the field value into.
46  *	len: the length of the buffer
47  * @set: write the value of a configuration field
48  *	vdev: the virtio_device
49  *	offset: the offset of the configuration field
50  *	buf: the buffer to read the field value from.
51  *	len: the length of the buffer
52  * @generation: config generation counter (optional)
53  *	vdev: the virtio_device
54  *	Returns the config generation counter
55  * @get_status: read the status byte
56  *	vdev: the virtio_device
57  *	Returns the status byte
58  * @set_status: write the status byte
59  *	vdev: the virtio_device
60  *	status: the new status byte
61  * @reset: reset the device
62  *	vdev: the virtio device
63  *	After this, status and feature negotiation must be done again
64  *	Device must not be reset from its vq/config callbacks, or in
65  *	parallel with being added/removed.
66  * @find_vqs: find virtqueues and instantiate them.
67  *	vdev: the virtio_device
68  *	nvqs: the number of virtqueues to find
69  *	vqs: on success, includes new virtqueues
70  *	vqs_info: array of virtqueue info structures
71  *	Returns 0 on success or error status
72  * @del_vqs: free virtqueues found by find_vqs().
73  * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
74  *      The function guarantees that all memory operations on the
75  *      queue before it are visible to the vring_interrupt() that is
76  *      called after it.
77  *      vdev: the virtio_device
78  * @get_features: get the array of feature bits for this device.
79  *	vdev: the virtio_device
80  *	Returns the first 64 feature bits.
81  * @get_extended_features:
82  *      vdev: the virtio_device
83  *      Returns the first VIRTIO_FEATURES_MAX feature bits (all we currently
84  *      need).
85  * @finalize_features: confirm what device features we'll be using.
86  *	vdev: the virtio_device
87  *	This sends the driver feature bits to the device: it can change
88  *	the dev->feature bits if it wants.
89  *	Note that despite the name this	can be called any number of
90  *	times.
91  *	Returns 0 on success or error status
92  * @bus_name: return the bus name associated with the device (optional)
93  *	vdev: the virtio_device
94  *      This returns a pointer to the bus name a la pci_name from which
95  *      the caller can then copy.
96  * @set_vq_affinity: set the affinity for a virtqueue (optional).
97  * @get_vq_affinity: get the affinity for a virtqueue (optional).
98  * @get_shm_region: get a shared memory region based on the index.
99  * @disable_vq_and_reset: reset a queue individually (optional).
100  *	vq: the virtqueue
101  *	Returns 0 on success or error status
102  *	disable_vq_and_reset will guarantee that the callbacks are disabled and
103  *	synchronized.
104  *	Except for the callback, the caller should guarantee that the vring is
105  *	not accessed by any functions of virtqueue.
106  * @enable_vq_after_reset: enable a reset queue
107  *	vq: the virtqueue
108  *	Returns 0 on success or error status
109  *	If disable_vq_and_reset is set, then enable_vq_after_reset must also be
110  *	set.
111  */
112 struct virtio_config_ops {
113 	void (*get)(struct virtio_device *vdev, unsigned offset,
114 		    void *buf, unsigned len);
115 	void (*set)(struct virtio_device *vdev, unsigned offset,
116 		    const void *buf, unsigned len);
117 	u32 (*generation)(struct virtio_device *vdev);
118 	u8 (*get_status)(struct virtio_device *vdev);
119 	void (*set_status)(struct virtio_device *vdev, u8 status);
120 	void (*reset)(struct virtio_device *vdev);
121 	int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs,
122 			struct virtqueue *vqs[],
123 			struct virtqueue_info vqs_info[],
124 			struct irq_affinity *desc);
125 	void (*del_vqs)(struct virtio_device *);
126 	void (*synchronize_cbs)(struct virtio_device *);
127 	u64 (*get_features)(struct virtio_device *vdev);
128 	void (*get_extended_features)(struct virtio_device *vdev,
129 				      u64 *features);
130 	int (*finalize_features)(struct virtio_device *vdev);
131 	const char *(*bus_name)(struct virtio_device *vdev);
132 	int (*set_vq_affinity)(struct virtqueue *vq,
133 			       const struct cpumask *cpu_mask);
134 	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
135 						 int index);
136 	bool (*get_shm_region)(struct virtio_device *vdev,
137 			       struct virtio_shm_region *region, u8 id);
138 	int (*disable_vq_and_reset)(struct virtqueue *vq);
139 	int (*enable_vq_after_reset)(struct virtqueue *vq);
140 };
141 
142 /* If driver didn't advertise the feature, it will never appear. */
143 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
144 					 unsigned int fbit);
145 
146 /**
147  * __virtio_test_bit - helper to test feature bits. For use by transports.
148  *                     Devices should normally use virtio_has_feature,
149  *                     which includes more checks.
150  * @vdev: the device
151  * @fbit: the feature bit
152  */
__virtio_test_bit(const struct virtio_device * vdev,unsigned int fbit)153 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
154 				     unsigned int fbit)
155 {
156 	return virtio_features_test_bit(vdev->features_array, fbit);
157 }
158 
159 /**
160  * __virtio_set_bit - helper to set feature bits. For use by transports.
161  * @vdev: the device
162  * @fbit: the feature bit
163  */
__virtio_set_bit(struct virtio_device * vdev,unsigned int fbit)164 static inline void __virtio_set_bit(struct virtio_device *vdev,
165 				    unsigned int fbit)
166 {
167 	virtio_features_set_bit(vdev->features_array, fbit);
168 }
169 
170 /**
171  * __virtio_clear_bit - helper to clear feature bits. For use by transports.
172  * @vdev: the device
173  * @fbit: the feature bit
174  */
__virtio_clear_bit(struct virtio_device * vdev,unsigned int fbit)175 static inline void __virtio_clear_bit(struct virtio_device *vdev,
176 				      unsigned int fbit)
177 {
178 	virtio_features_clear_bit(vdev->features_array, fbit);
179 }
180 
181 /**
182  * virtio_has_feature - helper to determine if this device has this feature.
183  * @vdev: the device
184  * @fbit: the feature bit
185  */
virtio_has_feature(const struct virtio_device * vdev,unsigned int fbit)186 static inline bool virtio_has_feature(const struct virtio_device *vdev,
187 				      unsigned int fbit)
188 {
189 	if (fbit < VIRTIO_TRANSPORT_F_START)
190 		virtio_check_driver_offered_feature(vdev, fbit);
191 
192 	return __virtio_test_bit(vdev, fbit);
193 }
194 
virtio_get_features(struct virtio_device * vdev,u64 * features)195 static inline void virtio_get_features(struct virtio_device *vdev,
196 				       u64 *features)
197 {
198 	if (vdev->config->get_extended_features) {
199 		vdev->config->get_extended_features(vdev, features);
200 		return;
201 	}
202 
203 	virtio_features_from_u64(features, vdev->config->get_features(vdev));
204 }
205 
206 /**
207  * virtio_has_dma_quirk - determine whether this device has the DMA quirk
208  * @vdev: the device
209  */
virtio_has_dma_quirk(const struct virtio_device * vdev)210 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
211 {
212 	/*
213 	 * Note the reverse polarity of the quirk feature (compared to most
214 	 * other features), this is for compatibility with legacy systems.
215 	 */
216 	return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
217 }
218 
219 static inline
virtio_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],struct irq_affinity * desc)220 int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
221 		    struct virtqueue *vqs[],
222 		    struct virtqueue_info vqs_info[],
223 		    struct irq_affinity *desc)
224 {
225 	return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc);
226 }
227 
228 static inline
virtio_find_single_vq(struct virtio_device * vdev,vq_callback_t * c,const char * n)229 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
230 					vq_callback_t *c, const char *n)
231 {
232 	struct virtqueue_info vqs_info[] = {
233 		{ n, c },
234 	};
235 	struct virtqueue *vq;
236 	int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL);
237 
238 	if (err < 0)
239 		return ERR_PTR(err);
240 	return vq;
241 }
242 
243 /**
244  * virtio_synchronize_cbs - synchronize with virtqueue callbacks
245  * @dev: the virtio device
246  */
247 static inline
virtio_synchronize_cbs(struct virtio_device * dev)248 void virtio_synchronize_cbs(struct virtio_device *dev)
249 {
250 	if (dev->config->synchronize_cbs) {
251 		dev->config->synchronize_cbs(dev);
252 	} else {
253 		/*
254 		 * A best effort fallback to synchronize with
255 		 * interrupts, preemption and softirq disabled
256 		 * regions. See comment above synchronize_rcu().
257 		 */
258 		synchronize_rcu();
259 	}
260 }
261 
262 /**
263  * virtio_device_ready - enable vq use in probe function
264  * @dev: the virtio device
265  *
266  * Driver must call this to use vqs in the probe function.
267  *
268  * Note: vqs are enabled automatically after probe returns.
269  */
270 static inline
virtio_device_ready(struct virtio_device * dev)271 void virtio_device_ready(struct virtio_device *dev)
272 {
273 	unsigned status = dev->config->get_status(dev);
274 
275 	WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
276 
277 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
278 	/*
279 	 * The virtio_synchronize_cbs() makes sure vring_interrupt()
280 	 * will see the driver specific setup if it sees vq->broken
281 	 * as false (even if the notifications come before DRIVER_OK).
282 	 */
283 	virtio_synchronize_cbs(dev);
284 	__virtio_unbreak_device(dev);
285 #endif
286 	/*
287 	 * The transport should ensure the visibility of vq->broken
288 	 * before setting DRIVER_OK. See the comments for the transport
289 	 * specific set_status() method.
290 	 *
291 	 * A well behaved device will only notify a virtqueue after
292 	 * DRIVER_OK, this means the device should "see" the coherenct
293 	 * memory write that set vq->broken as false which is done by
294 	 * the driver when it sees DRIVER_OK, then the following
295 	 * driver's vring_interrupt() will see vq->broken as false so
296 	 * we won't lose any notification.
297 	 */
298 	dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
299 }
300 
301 static inline
virtio_bus_name(struct virtio_device * vdev)302 const char *virtio_bus_name(struct virtio_device *vdev)
303 {
304 	if (!vdev->config->bus_name)
305 		return "virtio";
306 	return vdev->config->bus_name(vdev);
307 }
308 
309 /**
310  * virtqueue_set_affinity - setting affinity for a virtqueue
311  * @vq: the virtqueue
312  * @cpu_mask: the cpu mask
313  *
314  * Pay attention the function are best-effort: the affinity hint may not be set
315  * due to config support, irq type and sharing.
316  *
317  */
318 static inline
virtqueue_set_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)319 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
320 {
321 	struct virtio_device *vdev = vq->vdev;
322 	if (vdev->config->set_vq_affinity)
323 		return vdev->config->set_vq_affinity(vq, cpu_mask);
324 	return 0;
325 }
326 
327 static inline
virtio_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)328 bool virtio_get_shm_region(struct virtio_device *vdev,
329 			   struct virtio_shm_region *region, u8 id)
330 {
331 	if (!region->len)
332 		return false;
333 	if (!vdev->config->get_shm_region)
334 		return false;
335 	return vdev->config->get_shm_region(vdev, region, id);
336 }
337 
virtio_is_little_endian(struct virtio_device * vdev)338 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
339 {
340 	return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
341 		virtio_legacy_is_little_endian();
342 }
343 
344 /* Memory accessors */
virtio16_to_cpu(struct virtio_device * vdev,__virtio16 val)345 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
346 {
347 	return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
348 }
349 
cpu_to_virtio16(struct virtio_device * vdev,u16 val)350 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
351 {
352 	return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
353 }
354 
virtio32_to_cpu(struct virtio_device * vdev,__virtio32 val)355 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
356 {
357 	return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
358 }
359 
cpu_to_virtio32(struct virtio_device * vdev,u32 val)360 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
361 {
362 	return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
363 }
364 
virtio64_to_cpu(struct virtio_device * vdev,__virtio64 val)365 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
366 {
367 	return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
368 }
369 
cpu_to_virtio64(struct virtio_device * vdev,u64 val)370 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
371 {
372 	return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
373 }
374 
375 #define virtio_to_cpu(vdev, x) \
376 	_Generic((x), \
377 		__u8: (x), \
378 		__virtio16: virtio16_to_cpu((vdev), (x)), \
379 		__virtio32: virtio32_to_cpu((vdev), (x)), \
380 		__virtio64: virtio64_to_cpu((vdev), (x)) \
381 		)
382 
383 #define cpu_to_virtio(vdev, x, m) \
384 	_Generic((m), \
385 		__u8: (x), \
386 		__virtio16: cpu_to_virtio16((vdev), (x)), \
387 		__virtio32: cpu_to_virtio32((vdev), (x)), \
388 		__virtio64: cpu_to_virtio64((vdev), (x)) \
389 		)
390 
391 #define __virtio_native_type(structname, member) \
392 	typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
393 
394 /* Config space accessors. */
395 #define virtio_cread(vdev, structname, member, ptr)			\
396 	do {								\
397 		typeof(((structname*)0)->member) virtio_cread_v;	\
398 									\
399 		might_sleep();						\
400 		/* Sanity check: must match the member's type */	\
401 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
402 									\
403 		switch (sizeof(virtio_cread_v)) {			\
404 		case 1:							\
405 		case 2:							\
406 		case 4:							\
407 			vdev->config->get((vdev), 			\
408 					  offsetof(structname, member), \
409 					  &virtio_cread_v,		\
410 					  sizeof(virtio_cread_v));	\
411 			break;						\
412 		default:						\
413 			__virtio_cread_many((vdev), 			\
414 					  offsetof(structname, member), \
415 					  &virtio_cread_v,		\
416 					  1,				\
417 					  sizeof(virtio_cread_v));	\
418 			break;						\
419 		}							\
420 		*(ptr) = virtio_to_cpu(vdev, virtio_cread_v);		\
421 	} while(0)
422 
423 /* Config space accessors. */
424 #define virtio_cwrite(vdev, structname, member, ptr)			\
425 	do {								\
426 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
427 			cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
428 									\
429 		might_sleep();						\
430 		/* Sanity check: must match the member's type */	\
431 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
432 									\
433 		vdev->config->set((vdev), offsetof(structname, member),	\
434 				  &virtio_cwrite_v,			\
435 				  sizeof(virtio_cwrite_v));		\
436 	} while(0)
437 
438 /*
439  * Nothing virtio-specific about these, but let's worry about generalizing
440  * these later.
441  */
442 #define virtio_le_to_cpu(x) \
443 	_Generic((x), \
444 		__u8: (u8)(x), \
445 		 __le16: (u16)le16_to_cpu(x), \
446 		 __le32: (u32)le32_to_cpu(x), \
447 		 __le64: (u64)le64_to_cpu(x) \
448 		)
449 
450 #define virtio_cpu_to_le(x, m) \
451 	_Generic((m), \
452 		 __u8: (x), \
453 		 __le16: cpu_to_le16(x), \
454 		 __le32: cpu_to_le32(x), \
455 		 __le64: cpu_to_le64(x) \
456 		)
457 
458 /* LE (e.g. modern) Config space accessors. */
459 #define virtio_cread_le(vdev, structname, member, ptr)			\
460 	do {								\
461 		typeof(((structname*)0)->member) virtio_cread_v;	\
462 									\
463 		might_sleep();						\
464 		/* Sanity check: must match the member's type */	\
465 		typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
466 									\
467 		switch (sizeof(virtio_cread_v)) {			\
468 		case 1:							\
469 		case 2:							\
470 		case 4:							\
471 			vdev->config->get((vdev), 			\
472 					  offsetof(structname, member), \
473 					  &virtio_cread_v,		\
474 					  sizeof(virtio_cread_v));	\
475 			break;						\
476 		default:						\
477 			__virtio_cread_many((vdev), 			\
478 					  offsetof(structname, member), \
479 					  &virtio_cread_v,		\
480 					  1,				\
481 					  sizeof(virtio_cread_v));	\
482 			break;						\
483 		}							\
484 		*(ptr) = virtio_le_to_cpu(virtio_cread_v);		\
485 	} while(0)
486 
487 #define virtio_cwrite_le(vdev, structname, member, ptr)			\
488 	do {								\
489 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
490 			virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
491 									\
492 		might_sleep();						\
493 		/* Sanity check: must match the member's type */	\
494 		typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
495 									\
496 		vdev->config->set((vdev), offsetof(structname, member),	\
497 				  &virtio_cwrite_v,			\
498 				  sizeof(virtio_cwrite_v));		\
499 	} while(0)
500 
501 
502 /* Read @count fields, @bytes each. */
__virtio_cread_many(struct virtio_device * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)503 static inline void __virtio_cread_many(struct virtio_device *vdev,
504 				       unsigned int offset,
505 				       void *buf, size_t count, size_t bytes)
506 {
507 	u32 old, gen = vdev->config->generation ?
508 		vdev->config->generation(vdev) : 0;
509 	int i;
510 
511 	might_sleep();
512 	do {
513 		old = gen;
514 
515 		for (i = 0; i < count; i++)
516 			vdev->config->get(vdev, offset + bytes * i,
517 					  buf + i * bytes, bytes);
518 
519 		gen = vdev->config->generation ?
520 			vdev->config->generation(vdev) : 0;
521 	} while (gen != old);
522 }
523 
virtio_cread_bytes(struct virtio_device * vdev,unsigned int offset,void * buf,size_t len)524 static inline void virtio_cread_bytes(struct virtio_device *vdev,
525 				      unsigned int offset,
526 				      void *buf, size_t len)
527 {
528 	__virtio_cread_many(vdev, offset, buf, len, 1);
529 }
530 
virtio_cread8(struct virtio_device * vdev,unsigned int offset)531 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
532 {
533 	u8 ret;
534 
535 	might_sleep();
536 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
537 	return ret;
538 }
539 
virtio_cwrite8(struct virtio_device * vdev,unsigned int offset,u8 val)540 static inline void virtio_cwrite8(struct virtio_device *vdev,
541 				  unsigned int offset, u8 val)
542 {
543 	might_sleep();
544 	vdev->config->set(vdev, offset, &val, sizeof(val));
545 }
546 
virtio_cread16(struct virtio_device * vdev,unsigned int offset)547 static inline u16 virtio_cread16(struct virtio_device *vdev,
548 				 unsigned int offset)
549 {
550 	__virtio16 ret;
551 
552 	might_sleep();
553 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
554 	return virtio16_to_cpu(vdev, ret);
555 }
556 
virtio_cwrite16(struct virtio_device * vdev,unsigned int offset,u16 val)557 static inline void virtio_cwrite16(struct virtio_device *vdev,
558 				   unsigned int offset, u16 val)
559 {
560 	__virtio16 v;
561 
562 	might_sleep();
563 	v = cpu_to_virtio16(vdev, val);
564 	vdev->config->set(vdev, offset, &v, sizeof(v));
565 }
566 
virtio_cread32(struct virtio_device * vdev,unsigned int offset)567 static inline u32 virtio_cread32(struct virtio_device *vdev,
568 				 unsigned int offset)
569 {
570 	__virtio32 ret;
571 
572 	might_sleep();
573 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
574 	return virtio32_to_cpu(vdev, ret);
575 }
576 
virtio_cwrite32(struct virtio_device * vdev,unsigned int offset,u32 val)577 static inline void virtio_cwrite32(struct virtio_device *vdev,
578 				   unsigned int offset, u32 val)
579 {
580 	__virtio32 v;
581 
582 	might_sleep();
583 	v = cpu_to_virtio32(vdev, val);
584 	vdev->config->set(vdev, offset, &v, sizeof(v));
585 }
586 
virtio_cread64(struct virtio_device * vdev,unsigned int offset)587 static inline u64 virtio_cread64(struct virtio_device *vdev,
588 				 unsigned int offset)
589 {
590 	__virtio64 ret;
591 
592 	__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
593 	return virtio64_to_cpu(vdev, ret);
594 }
595 
virtio_cwrite64(struct virtio_device * vdev,unsigned int offset,u64 val)596 static inline void virtio_cwrite64(struct virtio_device *vdev,
597 				   unsigned int offset, u64 val)
598 {
599 	__virtio64 v;
600 
601 	might_sleep();
602 	v = cpu_to_virtio64(vdev, val);
603 	vdev->config->set(vdev, offset, &v, sizeof(v));
604 }
605 
606 /* Conditional config space accessors. */
607 #define virtio_cread_feature(vdev, fbit, structname, member, ptr)	\
608 	({								\
609 		int _r = 0;						\
610 		if (!virtio_has_feature(vdev, fbit))			\
611 			_r = -ENOENT;					\
612 		else							\
613 			virtio_cread((vdev), structname, member, ptr);	\
614 		_r;							\
615 	})
616 
617 /* Conditional config space accessors. */
618 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr)	\
619 	({								\
620 		int _r = 0;						\
621 		if (!virtio_has_feature(vdev, fbit))			\
622 			_r = -ENOENT;					\
623 		else							\
624 			virtio_cread_le((vdev), structname, member, ptr); \
625 		_r;							\
626 	})
627 
628 #endif /* _LINUX_VIRTIO_CONFIG_H */
629