1*8ac98aedSDavid Woodhouse /* SPDX-License-Identifier: MIT */ 2f65eadb6SStefano Stabellini /****************************************************************************** 3f65eadb6SStefano Stabellini * ring.h 4f65eadb6SStefano Stabellini * 5f65eadb6SStefano Stabellini * Shared producer-consumer ring macros. 6f65eadb6SStefano Stabellini * 7f65eadb6SStefano Stabellini * Tim Deegan and Andrew Warfield November 2004. 8f65eadb6SStefano Stabellini */ 9f65eadb6SStefano Stabellini 10d1744bd3SAnthony PERARD #ifndef __XEN_PUBLIC_IO_RING_H__ 11d1744bd3SAnthony PERARD #define __XEN_PUBLIC_IO_RING_H__ 12f65eadb6SStefano Stabellini 13f65eadb6SStefano Stabellini /* 14f65eadb6SStefano Stabellini * When #include'ing this header, you need to provide the following 15f65eadb6SStefano Stabellini * declaration upfront: 16f65eadb6SStefano Stabellini * - standard integers types (uint8_t, uint16_t, etc) 17f65eadb6SStefano Stabellini * They are provided by stdint.h of the standard headers. 18f65eadb6SStefano Stabellini * 19f65eadb6SStefano Stabellini * In addition, if you intend to use the FLEX macros, you also need to 20f65eadb6SStefano Stabellini * provide the following, before invoking the FLEX macros: 21f65eadb6SStefano Stabellini * - size_t 22f65eadb6SStefano Stabellini * - memcpy 23f65eadb6SStefano Stabellini * - grant_ref_t 24f65eadb6SStefano Stabellini * These declarations are provided by string.h of the standard headers, 25f65eadb6SStefano Stabellini * and grant_table.h from the Xen public headers. 26f65eadb6SStefano Stabellini */ 27f65eadb6SStefano Stabellini 2850c88402SJoao Martins #include "../xen-compat.h" 2950c88402SJoao Martins 3050c88402SJoao Martins #if __XEN_INTERFACE_VERSION__ < 0x00030208 3150c88402SJoao Martins #define xen_mb() mb() 3250c88402SJoao Martins #define xen_rmb() rmb() 3350c88402SJoao Martins #define xen_wmb() wmb() 3450c88402SJoao Martins #endif 3550c88402SJoao Martins 36f65eadb6SStefano Stabellini typedef unsigned int RING_IDX; 37f65eadb6SStefano Stabellini 38f65eadb6SStefano Stabellini /* Round a 32-bit unsigned constant down to the nearest power of two. */ 39f65eadb6SStefano Stabellini #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) 40f65eadb6SStefano Stabellini #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) 41f65eadb6SStefano Stabellini #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) 42f65eadb6SStefano Stabellini #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) 43f65eadb6SStefano Stabellini #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) 44f65eadb6SStefano Stabellini 45f65eadb6SStefano Stabellini /* 46f65eadb6SStefano Stabellini * Calculate size of a shared ring, given the total available space for the 47f65eadb6SStefano Stabellini * ring and indexes (_sz), and the name tag of the request/response structure. 48f65eadb6SStefano Stabellini * A ring contains as many entries as will fit, rounded down to the nearest 49f65eadb6SStefano Stabellini * power of two (so we can mask with (size-1) to loop around). 50f65eadb6SStefano Stabellini */ 51f65eadb6SStefano Stabellini #define __CONST_RING_SIZE(_s, _sz) \ 52f65eadb6SStefano Stabellini (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 5350c88402SJoao Martins sizeof(((struct _s##_sring *)0)->ring[0]))) 54f65eadb6SStefano Stabellini /* 55f65eadb6SStefano Stabellini * The same for passing in an actual pointer instead of a name tag. 56f65eadb6SStefano Stabellini */ 57f65eadb6SStefano Stabellini #define __RING_SIZE(_s, _sz) \ 58f65eadb6SStefano Stabellini (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 59f65eadb6SStefano Stabellini 60f65eadb6SStefano Stabellini /* 61f65eadb6SStefano Stabellini * Macros to make the correct C datatypes for a new kind of ring. 62f65eadb6SStefano Stabellini * 63f65eadb6SStefano Stabellini * To make a new ring datatype, you need to have two message structures, 64f65eadb6SStefano Stabellini * let's say request_t, and response_t already defined. 65f65eadb6SStefano Stabellini * 66f65eadb6SStefano Stabellini * In a header where you want the ring datatype declared, you then do: 67f65eadb6SStefano Stabellini * 68f65eadb6SStefano Stabellini * DEFINE_RING_TYPES(mytag, request_t, response_t); 69f65eadb6SStefano Stabellini * 70f65eadb6SStefano Stabellini * These expand out to give you a set of types, as you can see below. 71f65eadb6SStefano Stabellini * The most important of these are: 72f65eadb6SStefano Stabellini * 73f65eadb6SStefano Stabellini * mytag_sring_t - The shared ring. 74f65eadb6SStefano Stabellini * mytag_front_ring_t - The 'front' half of the ring. 75f65eadb6SStefano Stabellini * mytag_back_ring_t - The 'back' half of the ring. 76f65eadb6SStefano Stabellini * 77f65eadb6SStefano Stabellini * To initialize a ring in your code you need to know the location and size 78f65eadb6SStefano Stabellini * of the shared memory area (PAGE_SIZE, for instance). To initialise 79f65eadb6SStefano Stabellini * the front half: 80f65eadb6SStefano Stabellini * 81*8ac98aedSDavid Woodhouse * mytag_front_ring_t ring; 82*8ac98aedSDavid Woodhouse * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 83f65eadb6SStefano Stabellini * 84f65eadb6SStefano Stabellini * Initializing the back follows similarly (note that only the front 85f65eadb6SStefano Stabellini * initializes the shared ring): 86f65eadb6SStefano Stabellini * 87f65eadb6SStefano Stabellini * mytag_back_ring_t back_ring; 88f65eadb6SStefano Stabellini * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 89f65eadb6SStefano Stabellini */ 90f65eadb6SStefano Stabellini 91f65eadb6SStefano Stabellini #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ 92f65eadb6SStefano Stabellini \ 93f65eadb6SStefano Stabellini /* Shared ring entry */ \ 94f65eadb6SStefano Stabellini union __name##_sring_entry { \ 95f65eadb6SStefano Stabellini __req_t req; \ 96f65eadb6SStefano Stabellini __rsp_t rsp; \ 97f65eadb6SStefano Stabellini }; \ 98f65eadb6SStefano Stabellini \ 99f65eadb6SStefano Stabellini /* Shared ring page */ \ 100f65eadb6SStefano Stabellini struct __name##_sring { \ 101f65eadb6SStefano Stabellini RING_IDX req_prod, req_event; \ 102f65eadb6SStefano Stabellini RING_IDX rsp_prod, rsp_event; \ 103f65eadb6SStefano Stabellini union { \ 104f65eadb6SStefano Stabellini struct { \ 105f65eadb6SStefano Stabellini uint8_t smartpoll_active; \ 106f65eadb6SStefano Stabellini } netif; \ 107f65eadb6SStefano Stabellini struct { \ 108f65eadb6SStefano Stabellini uint8_t msg; \ 109f65eadb6SStefano Stabellini } tapif_user; \ 110f65eadb6SStefano Stabellini uint8_t pvt_pad[4]; \ 111f65eadb6SStefano Stabellini } pvt; \ 112f65eadb6SStefano Stabellini uint8_t __pad[44]; \ 113f65eadb6SStefano Stabellini union __name##_sring_entry ring[1]; /* variable-length */ \ 114f65eadb6SStefano Stabellini }; \ 115f65eadb6SStefano Stabellini \ 116f65eadb6SStefano Stabellini /* "Front" end's private variables */ \ 117f65eadb6SStefano Stabellini struct __name##_front_ring { \ 118f65eadb6SStefano Stabellini RING_IDX req_prod_pvt; \ 119f65eadb6SStefano Stabellini RING_IDX rsp_cons; \ 120f65eadb6SStefano Stabellini unsigned int nr_ents; \ 121f65eadb6SStefano Stabellini struct __name##_sring *sring; \ 122f65eadb6SStefano Stabellini }; \ 123f65eadb6SStefano Stabellini \ 124f65eadb6SStefano Stabellini /* "Back" end's private variables */ \ 125f65eadb6SStefano Stabellini struct __name##_back_ring { \ 126f65eadb6SStefano Stabellini RING_IDX rsp_prod_pvt; \ 127f65eadb6SStefano Stabellini RING_IDX req_cons; \ 128f65eadb6SStefano Stabellini unsigned int nr_ents; \ 129f65eadb6SStefano Stabellini struct __name##_sring *sring; \ 130f65eadb6SStefano Stabellini }; \ 131f65eadb6SStefano Stabellini \ 132f65eadb6SStefano Stabellini /* Syntactic sugar */ \ 133f65eadb6SStefano Stabellini typedef struct __name##_sring __name##_sring_t; \ 134f65eadb6SStefano Stabellini typedef struct __name##_front_ring __name##_front_ring_t; \ 135f65eadb6SStefano Stabellini typedef struct __name##_back_ring __name##_back_ring_t 136f65eadb6SStefano Stabellini 137f65eadb6SStefano Stabellini /* 138f65eadb6SStefano Stabellini * Macros for manipulating rings. 139f65eadb6SStefano Stabellini * 140f65eadb6SStefano Stabellini * FRONT_RING_whatever works on the "front end" of a ring: here 141f65eadb6SStefano Stabellini * requests are pushed on to the ring and responses taken off it. 142f65eadb6SStefano Stabellini * 143f65eadb6SStefano Stabellini * BACK_RING_whatever works on the "back end" of a ring: here 144f65eadb6SStefano Stabellini * requests are taken off the ring and responses put on. 145f65eadb6SStefano Stabellini * 146f65eadb6SStefano Stabellini * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. 147f65eadb6SStefano Stabellini * This is OK in 1-for-1 request-response situations where the 148f65eadb6SStefano Stabellini * requestor (front end) never has more than RING_SIZE()-1 149f65eadb6SStefano Stabellini * outstanding requests. 150f65eadb6SStefano Stabellini */ 151f65eadb6SStefano Stabellini 152f65eadb6SStefano Stabellini /* Initialising empty rings */ 153f65eadb6SStefano Stabellini #define SHARED_RING_INIT(_s) do { \ 154f65eadb6SStefano Stabellini (_s)->req_prod = (_s)->rsp_prod = 0; \ 155f65eadb6SStefano Stabellini (_s)->req_event = (_s)->rsp_event = 1; \ 156f65eadb6SStefano Stabellini (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \ 157f65eadb6SStefano Stabellini (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ 158f65eadb6SStefano Stabellini } while(0) 159f65eadb6SStefano Stabellini 16050c88402SJoao Martins #define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ 16150c88402SJoao Martins (_r)->req_prod_pvt = (_i); \ 16250c88402SJoao Martins (_r)->rsp_cons = (_i); \ 163f65eadb6SStefano Stabellini (_r)->nr_ents = __RING_SIZE(_s, __size); \ 164f65eadb6SStefano Stabellini (_r)->sring = (_s); \ 165f65eadb6SStefano Stabellini } while (0) 166f65eadb6SStefano Stabellini 16750c88402SJoao Martins #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) 16850c88402SJoao Martins 169*8ac98aedSDavid Woodhouse #define XEN_FRONT_RING_INIT(r, s, size) do { \ 170*8ac98aedSDavid Woodhouse SHARED_RING_INIT(s); \ 171*8ac98aedSDavid Woodhouse FRONT_RING_INIT(r, s, size); \ 172*8ac98aedSDavid Woodhouse } while (0) 173*8ac98aedSDavid Woodhouse 17450c88402SJoao Martins #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ 17550c88402SJoao Martins (_r)->rsp_prod_pvt = (_i); \ 17650c88402SJoao Martins (_r)->req_cons = (_i); \ 177f65eadb6SStefano Stabellini (_r)->nr_ents = __RING_SIZE(_s, __size); \ 178f65eadb6SStefano Stabellini (_r)->sring = (_s); \ 179f65eadb6SStefano Stabellini } while (0) 180f65eadb6SStefano Stabellini 18150c88402SJoao Martins #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) 18250c88402SJoao Martins 183f65eadb6SStefano Stabellini /* How big is this ring? */ 184f65eadb6SStefano Stabellini #define RING_SIZE(_r) \ 185f65eadb6SStefano Stabellini ((_r)->nr_ents) 186f65eadb6SStefano Stabellini 187f65eadb6SStefano Stabellini /* Number of free requests (for use on front side only). */ 188f65eadb6SStefano Stabellini #define RING_FREE_REQUESTS(_r) \ 189f65eadb6SStefano Stabellini (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) 190f65eadb6SStefano Stabellini 191f65eadb6SStefano Stabellini /* Test if there is an empty slot available on the front ring. 192f65eadb6SStefano Stabellini * (This is only meaningful from the front. ) 193f65eadb6SStefano Stabellini */ 194f65eadb6SStefano Stabellini #define RING_FULL(_r) \ 195f65eadb6SStefano Stabellini (RING_FREE_REQUESTS(_r) == 0) 196f65eadb6SStefano Stabellini 197f65eadb6SStefano Stabellini /* Test if there are outstanding messages to be processed on a ring. */ 198*8ac98aedSDavid Woodhouse #define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \ 199f65eadb6SStefano Stabellini ((_r)->sring->rsp_prod - (_r)->rsp_cons) 200f65eadb6SStefano Stabellini 20150c88402SJoao Martins #ifdef __GNUC__ 202*8ac98aedSDavid Woodhouse #define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \ 203f65eadb6SStefano Stabellini unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ 204f65eadb6SStefano Stabellini unsigned int rsp = RING_SIZE(_r) - \ 205f65eadb6SStefano Stabellini ((_r)->req_cons - (_r)->rsp_prod_pvt); \ 206f65eadb6SStefano Stabellini req < rsp ? req : rsp; \ 207f65eadb6SStefano Stabellini }) 20850c88402SJoao Martins #else 20950c88402SJoao Martins /* Same as above, but without the nice GCC ({ ... }) syntax. */ 210*8ac98aedSDavid Woodhouse #define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) \ 21150c88402SJoao Martins ((((_r)->sring->req_prod - (_r)->req_cons) < \ 21250c88402SJoao Martins (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ 21350c88402SJoao Martins ((_r)->sring->req_prod - (_r)->req_cons) : \ 21450c88402SJoao Martins (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) 21550c88402SJoao Martins #endif 216f65eadb6SStefano Stabellini 217*8ac98aedSDavid Woodhouse #ifdef XEN_RING_HAS_UNCONSUMED_IS_BOOL 218*8ac98aedSDavid Woodhouse /* 219*8ac98aedSDavid Woodhouse * These variants should only be used in case no caller is abusing them for 220*8ac98aedSDavid Woodhouse * obtaining the number of unconsumed responses/requests. 221*8ac98aedSDavid Woodhouse */ 222*8ac98aedSDavid Woodhouse #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ 223*8ac98aedSDavid Woodhouse (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r)) 224*8ac98aedSDavid Woodhouse #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ 225*8ac98aedSDavid Woodhouse (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r)) 226*8ac98aedSDavid Woodhouse #else 227*8ac98aedSDavid Woodhouse #define RING_HAS_UNCONSUMED_RESPONSES(_r) XEN_RING_NR_UNCONSUMED_RESPONSES(_r) 228*8ac98aedSDavid Woodhouse #define RING_HAS_UNCONSUMED_REQUESTS(_r) XEN_RING_NR_UNCONSUMED_REQUESTS(_r) 229*8ac98aedSDavid Woodhouse #endif 230*8ac98aedSDavid Woodhouse 231f65eadb6SStefano Stabellini /* Direct access to individual ring elements, by index. */ 232f65eadb6SStefano Stabellini #define RING_GET_REQUEST(_r, _idx) \ 233f65eadb6SStefano Stabellini (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 234f65eadb6SStefano Stabellini 23550c88402SJoao Martins #define RING_GET_RESPONSE(_r, _idx) \ 23650c88402SJoao Martins (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 23750c88402SJoao Martins 238f65eadb6SStefano Stabellini /* 23950c88402SJoao Martins * Get a local copy of a request/response. 240f65eadb6SStefano Stabellini * 24150c88402SJoao Martins * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is 242f65eadb6SStefano Stabellini * done on a local copy that cannot be modified by the other end. 243f65eadb6SStefano Stabellini * 244f65eadb6SStefano Stabellini * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this 24550c88402SJoao Martins * to be ineffective where dest is a struct which consists of only bitfields. 246f65eadb6SStefano Stabellini */ 24750c88402SJoao Martins #define RING_COPY_(type, r, idx, dest) do { \ 24850c88402SJoao Martins /* Use volatile to force the copy into dest. */ \ 24950c88402SJoao Martins *(dest) = *(volatile __typeof__(dest))RING_GET_##type(r, idx); \ 250f65eadb6SStefano Stabellini } while (0) 251f65eadb6SStefano Stabellini 25250c88402SJoao Martins #define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) 25350c88402SJoao Martins #define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) 254f65eadb6SStefano Stabellini 255f65eadb6SStefano Stabellini /* Loop termination condition: Would the specified index overflow the ring? */ 256f65eadb6SStefano Stabellini #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 257f65eadb6SStefano Stabellini (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 258f65eadb6SStefano Stabellini 259f65eadb6SStefano Stabellini /* Ill-behaved frontend determination: Can there be this many requests? */ 260f65eadb6SStefano Stabellini #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ 261f65eadb6SStefano Stabellini (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) 262f65eadb6SStefano Stabellini 26350c88402SJoao Martins /* Ill-behaved backend determination: Can there be this many responses? */ 26450c88402SJoao Martins #define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ 26550c88402SJoao Martins (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) 26650c88402SJoao Martins 267f65eadb6SStefano Stabellini #define RING_PUSH_REQUESTS(_r) do { \ 268f65eadb6SStefano Stabellini xen_wmb(); /* back sees requests /before/ updated producer index */ \ 269f65eadb6SStefano Stabellini (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 270f65eadb6SStefano Stabellini } while (0) 271f65eadb6SStefano Stabellini 272f65eadb6SStefano Stabellini #define RING_PUSH_RESPONSES(_r) do { \ 273f65eadb6SStefano Stabellini xen_wmb(); /* front sees resps /before/ updated producer index */ \ 274f65eadb6SStefano Stabellini (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ 275f65eadb6SStefano Stabellini } while (0) 276f65eadb6SStefano Stabellini 277f65eadb6SStefano Stabellini /* 278f65eadb6SStefano Stabellini * Notification hold-off (req_event and rsp_event): 279f65eadb6SStefano Stabellini * 280f65eadb6SStefano Stabellini * When queueing requests or responses on a shared ring, it may not always be 281f65eadb6SStefano Stabellini * necessary to notify the remote end. For example, if requests are in flight 282f65eadb6SStefano Stabellini * in a backend, the front may be able to queue further requests without 283f65eadb6SStefano Stabellini * notifying the back (if the back checks for new requests when it queues 284f65eadb6SStefano Stabellini * responses). 285f65eadb6SStefano Stabellini * 286f65eadb6SStefano Stabellini * When enqueuing requests or responses: 287f65eadb6SStefano Stabellini * 288f65eadb6SStefano Stabellini * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument 289f65eadb6SStefano Stabellini * is a boolean return value. True indicates that the receiver requires an 290f65eadb6SStefano Stabellini * asynchronous notification. 291f65eadb6SStefano Stabellini * 292f65eadb6SStefano Stabellini * After dequeuing requests or responses (before sleeping the connection): 293f65eadb6SStefano Stabellini * 294f65eadb6SStefano Stabellini * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). 295f65eadb6SStefano Stabellini * The second argument is a boolean return value. True indicates that there 296f65eadb6SStefano Stabellini * are pending messages on the ring (i.e., the connection should not be put 297f65eadb6SStefano Stabellini * to sleep). 298f65eadb6SStefano Stabellini * 299f65eadb6SStefano Stabellini * These macros will set the req_event/rsp_event field to trigger a 300f65eadb6SStefano Stabellini * notification on the very next message that is enqueued. If you want to 301f65eadb6SStefano Stabellini * create batches of work (i.e., only receive a notification after several 302f65eadb6SStefano Stabellini * messages have been enqueued) then you will need to create a customised 303f65eadb6SStefano Stabellini * version of the FINAL_CHECK macro in your own code, which sets the event 304f65eadb6SStefano Stabellini * field appropriately. 305f65eadb6SStefano Stabellini */ 306f65eadb6SStefano Stabellini 307f65eadb6SStefano Stabellini #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ 308f65eadb6SStefano Stabellini RING_IDX __old = (_r)->sring->req_prod; \ 309f65eadb6SStefano Stabellini RING_IDX __new = (_r)->req_prod_pvt; \ 310f65eadb6SStefano Stabellini xen_wmb(); /* back sees requests /before/ updated producer index */ \ 311f65eadb6SStefano Stabellini (_r)->sring->req_prod = __new; \ 312f65eadb6SStefano Stabellini xen_mb(); /* back sees new requests /before/ we check req_event */ \ 313f65eadb6SStefano Stabellini (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ 314f65eadb6SStefano Stabellini (RING_IDX)(__new - __old)); \ 315f65eadb6SStefano Stabellini } while (0) 316f65eadb6SStefano Stabellini 317f65eadb6SStefano Stabellini #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ 318f65eadb6SStefano Stabellini RING_IDX __old = (_r)->sring->rsp_prod; \ 319f65eadb6SStefano Stabellini RING_IDX __new = (_r)->rsp_prod_pvt; \ 320f65eadb6SStefano Stabellini xen_wmb(); /* front sees resps /before/ updated producer index */ \ 321f65eadb6SStefano Stabellini (_r)->sring->rsp_prod = __new; \ 322f65eadb6SStefano Stabellini xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ 323f65eadb6SStefano Stabellini (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ 324f65eadb6SStefano Stabellini (RING_IDX)(__new - __old)); \ 325f65eadb6SStefano Stabellini } while (0) 326f65eadb6SStefano Stabellini 327f65eadb6SStefano Stabellini #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ 328f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 329f65eadb6SStefano Stabellini if (_work_to_do) break; \ 330f65eadb6SStefano Stabellini (_r)->sring->req_event = (_r)->req_cons + 1; \ 331f65eadb6SStefano Stabellini xen_mb(); \ 332f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 333f65eadb6SStefano Stabellini } while (0) 334f65eadb6SStefano Stabellini 335f65eadb6SStefano Stabellini #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ 336f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 337f65eadb6SStefano Stabellini if (_work_to_do) break; \ 338f65eadb6SStefano Stabellini (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ 339f65eadb6SStefano Stabellini xen_mb(); \ 340f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 341f65eadb6SStefano Stabellini } while (0) 342f65eadb6SStefano Stabellini 343f65eadb6SStefano Stabellini 344f65eadb6SStefano Stabellini /* 345f65eadb6SStefano Stabellini * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and 346f65eadb6SStefano Stabellini * functions to check if there is data on the ring, and to read and 347f65eadb6SStefano Stabellini * write to them. 348f65eadb6SStefano Stabellini * 349f65eadb6SStefano Stabellini * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but 350f65eadb6SStefano Stabellini * does not define the indexes page. As different protocols can have 351f65eadb6SStefano Stabellini * extensions to the basic format, this macro allow them to define their 352f65eadb6SStefano Stabellini * own struct. 353f65eadb6SStefano Stabellini * 354f65eadb6SStefano Stabellini * XEN_FLEX_RING_SIZE 355f65eadb6SStefano Stabellini * Convenience macro to calculate the size of one of the two rings 356f65eadb6SStefano Stabellini * from the overall order. 357f65eadb6SStefano Stabellini * 358f65eadb6SStefano Stabellini * $NAME_mask 359f65eadb6SStefano Stabellini * Function to apply the size mask to an index, to reduce the index 360f65eadb6SStefano Stabellini * within the range [0-size]. 361f65eadb6SStefano Stabellini * 362f65eadb6SStefano Stabellini * $NAME_read_packet 363f65eadb6SStefano Stabellini * Function to read data from the ring. The amount of data to read is 364f65eadb6SStefano Stabellini * specified by the "size" argument. 365f65eadb6SStefano Stabellini * 366f65eadb6SStefano Stabellini * $NAME_write_packet 367f65eadb6SStefano Stabellini * Function to write data to the ring. The amount of data to write is 368f65eadb6SStefano Stabellini * specified by the "size" argument. 369f65eadb6SStefano Stabellini * 370f65eadb6SStefano Stabellini * $NAME_get_ring_ptr 371f65eadb6SStefano Stabellini * Convenience function that returns a pointer to read/write to the 372f65eadb6SStefano Stabellini * ring at the right location. 373f65eadb6SStefano Stabellini * 374f65eadb6SStefano Stabellini * $NAME_data_intf 375f65eadb6SStefano Stabellini * Indexes page, shared between frontend and backend. It also 376f65eadb6SStefano Stabellini * contains the array of grant refs. 377f65eadb6SStefano Stabellini * 378f65eadb6SStefano Stabellini * $NAME_queued 379f65eadb6SStefano Stabellini * Function to calculate how many bytes are currently on the ring, 380f65eadb6SStefano Stabellini * ready to be read. It can also be used to calculate how much free 381f65eadb6SStefano Stabellini * space is currently on the ring (XEN_FLEX_RING_SIZE() - 382f65eadb6SStefano Stabellini * $NAME_queued()). 383f65eadb6SStefano Stabellini */ 384f65eadb6SStefano Stabellini 385f65eadb6SStefano Stabellini #ifndef XEN_PAGE_SHIFT 386f65eadb6SStefano Stabellini /* The PAGE_SIZE for ring protocols and hypercall interfaces is always 387f65eadb6SStefano Stabellini * 4K, regardless of the architecture, and page granularity chosen by 388f65eadb6SStefano Stabellini * operating systems. 389f65eadb6SStefano Stabellini */ 390f65eadb6SStefano Stabellini #define XEN_PAGE_SHIFT 12 391f65eadb6SStefano Stabellini #endif 392f65eadb6SStefano Stabellini #define XEN_FLEX_RING_SIZE(order) \ 393f65eadb6SStefano Stabellini (1UL << ((order) + XEN_PAGE_SHIFT - 1)) 394f65eadb6SStefano Stabellini 395f65eadb6SStefano Stabellini #define DEFINE_XEN_FLEX_RING(name) \ 396f65eadb6SStefano Stabellini static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ 397f65eadb6SStefano Stabellini { \ 398f65eadb6SStefano Stabellini return idx & (ring_size - 1); \ 399f65eadb6SStefano Stabellini } \ 400f65eadb6SStefano Stabellini \ 401f65eadb6SStefano Stabellini static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ 402f65eadb6SStefano Stabellini RING_IDX idx, \ 403f65eadb6SStefano Stabellini RING_IDX ring_size) \ 404f65eadb6SStefano Stabellini { \ 405f65eadb6SStefano Stabellini return buf + name##_mask(idx, ring_size); \ 406f65eadb6SStefano Stabellini } \ 407f65eadb6SStefano Stabellini \ 408f65eadb6SStefano Stabellini static inline void name##_read_packet(void *opaque, \ 409f65eadb6SStefano Stabellini const unsigned char *buf, \ 410f65eadb6SStefano Stabellini size_t size, \ 411f65eadb6SStefano Stabellini RING_IDX masked_prod, \ 412f65eadb6SStefano Stabellini RING_IDX *masked_cons, \ 413f65eadb6SStefano Stabellini RING_IDX ring_size) \ 414f65eadb6SStefano Stabellini { \ 415f65eadb6SStefano Stabellini if (*masked_cons < masked_prod || \ 416f65eadb6SStefano Stabellini size <= ring_size - *masked_cons) { \ 417f65eadb6SStefano Stabellini memcpy(opaque, buf + *masked_cons, size); \ 418f65eadb6SStefano Stabellini } else { \ 419f65eadb6SStefano Stabellini memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ 420f65eadb6SStefano Stabellini memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ 421f65eadb6SStefano Stabellini size - (ring_size - *masked_cons)); \ 422f65eadb6SStefano Stabellini } \ 423f65eadb6SStefano Stabellini *masked_cons = name##_mask(*masked_cons + size, ring_size); \ 424f65eadb6SStefano Stabellini } \ 425f65eadb6SStefano Stabellini \ 426f65eadb6SStefano Stabellini static inline void name##_write_packet(unsigned char *buf, \ 427f65eadb6SStefano Stabellini const void *opaque, \ 428f65eadb6SStefano Stabellini size_t size, \ 429f65eadb6SStefano Stabellini RING_IDX *masked_prod, \ 430f65eadb6SStefano Stabellini RING_IDX masked_cons, \ 431f65eadb6SStefano Stabellini RING_IDX ring_size) \ 432f65eadb6SStefano Stabellini { \ 433f65eadb6SStefano Stabellini if (*masked_prod < masked_cons || \ 434f65eadb6SStefano Stabellini size <= ring_size - *masked_prod) { \ 435f65eadb6SStefano Stabellini memcpy(buf + *masked_prod, opaque, size); \ 436f65eadb6SStefano Stabellini } else { \ 437f65eadb6SStefano Stabellini memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ 438f65eadb6SStefano Stabellini memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ 439f65eadb6SStefano Stabellini size - (ring_size - *masked_prod)); \ 440f65eadb6SStefano Stabellini } \ 441f65eadb6SStefano Stabellini *masked_prod = name##_mask(*masked_prod + size, ring_size); \ 442f65eadb6SStefano Stabellini } \ 443f65eadb6SStefano Stabellini \ 444f65eadb6SStefano Stabellini static inline RING_IDX name##_queued(RING_IDX prod, \ 445f65eadb6SStefano Stabellini RING_IDX cons, \ 446f65eadb6SStefano Stabellini RING_IDX ring_size) \ 447f65eadb6SStefano Stabellini { \ 448f65eadb6SStefano Stabellini RING_IDX size; \ 449f65eadb6SStefano Stabellini \ 450f65eadb6SStefano Stabellini if (prod == cons) \ 451f65eadb6SStefano Stabellini return 0; \ 452f65eadb6SStefano Stabellini \ 453f65eadb6SStefano Stabellini prod = name##_mask(prod, ring_size); \ 454f65eadb6SStefano Stabellini cons = name##_mask(cons, ring_size); \ 455f65eadb6SStefano Stabellini \ 456f65eadb6SStefano Stabellini if (prod == cons) \ 457f65eadb6SStefano Stabellini return ring_size; \ 458f65eadb6SStefano Stabellini \ 459f65eadb6SStefano Stabellini if (prod > cons) \ 460f65eadb6SStefano Stabellini size = prod - cons; \ 461f65eadb6SStefano Stabellini else \ 462f65eadb6SStefano Stabellini size = ring_size - (cons - prod); \ 463f65eadb6SStefano Stabellini return size; \ 464f65eadb6SStefano Stabellini } \ 465f65eadb6SStefano Stabellini \ 466f65eadb6SStefano Stabellini struct name##_data { \ 467f65eadb6SStefano Stabellini unsigned char *in; /* half of the allocation */ \ 468f65eadb6SStefano Stabellini unsigned char *out; /* half of the allocation */ \ 469f65eadb6SStefano Stabellini } 470f65eadb6SStefano Stabellini 471f65eadb6SStefano Stabellini #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ 472f65eadb6SStefano Stabellini struct name##_data_intf { \ 473f65eadb6SStefano Stabellini RING_IDX in_cons, in_prod; \ 474f65eadb6SStefano Stabellini \ 475f65eadb6SStefano Stabellini uint8_t pad1[56]; \ 476f65eadb6SStefano Stabellini \ 477f65eadb6SStefano Stabellini RING_IDX out_cons, out_prod; \ 478f65eadb6SStefano Stabellini \ 479f65eadb6SStefano Stabellini uint8_t pad2[56]; \ 480f65eadb6SStefano Stabellini \ 481f65eadb6SStefano Stabellini RING_IDX ring_order; \ 482f65eadb6SStefano Stabellini grant_ref_t ref[]; \ 483f65eadb6SStefano Stabellini }; \ 484f65eadb6SStefano Stabellini DEFINE_XEN_FLEX_RING(name) 485f65eadb6SStefano Stabellini 486d1744bd3SAnthony PERARD #endif /* __XEN_PUBLIC_IO_RING_H__ */ 487f65eadb6SStefano Stabellini 488f65eadb6SStefano Stabellini /* 489f65eadb6SStefano Stabellini * Local variables: 490f65eadb6SStefano Stabellini * mode: C 491f65eadb6SStefano Stabellini * c-file-style: "BSD" 492f65eadb6SStefano Stabellini * c-basic-offset: 4 493f65eadb6SStefano Stabellini * tab-width: 4 494f65eadb6SStefano Stabellini * indent-tabs-mode: nil 495f65eadb6SStefano Stabellini * End: 496f65eadb6SStefano Stabellini */ 497