1f65eadb6SStefano Stabellini /****************************************************************************** 2f65eadb6SStefano Stabellini * ring.h 3f65eadb6SStefano Stabellini * 4f65eadb6SStefano Stabellini * Shared producer-consumer ring macros. 5f65eadb6SStefano Stabellini * 6f65eadb6SStefano Stabellini * Permission is hereby granted, free of charge, to any person obtaining a copy 7f65eadb6SStefano Stabellini * of this software and associated documentation files (the "Software"), to 8f65eadb6SStefano Stabellini * deal in the Software without restriction, including without limitation the 9f65eadb6SStefano Stabellini * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10f65eadb6SStefano Stabellini * sell copies of the Software, and to permit persons to whom the Software is 11f65eadb6SStefano Stabellini * furnished to do so, subject to the following conditions: 12f65eadb6SStefano Stabellini * 13f65eadb6SStefano Stabellini * The above copyright notice and this permission notice shall be included in 14f65eadb6SStefano Stabellini * all copies or substantial portions of the Software. 15f65eadb6SStefano Stabellini * 16f65eadb6SStefano Stabellini * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17f65eadb6SStefano Stabellini * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18f65eadb6SStefano Stabellini * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19f65eadb6SStefano Stabellini * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20f65eadb6SStefano Stabellini * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21f65eadb6SStefano Stabellini * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22f65eadb6SStefano Stabellini * DEALINGS IN THE SOFTWARE. 23f65eadb6SStefano Stabellini * 24f65eadb6SStefano Stabellini * Tim Deegan and Andrew Warfield November 2004. 25f65eadb6SStefano Stabellini */ 26f65eadb6SStefano Stabellini 27*d1744bd3SAnthony PERARD #ifndef __XEN_PUBLIC_IO_RING_H__ 28*d1744bd3SAnthony PERARD #define __XEN_PUBLIC_IO_RING_H__ 29f65eadb6SStefano Stabellini 30f65eadb6SStefano Stabellini /* 31f65eadb6SStefano Stabellini * When #include'ing this header, you need to provide the following 32f65eadb6SStefano Stabellini * declaration upfront: 33f65eadb6SStefano Stabellini * - standard integers types (uint8_t, uint16_t, etc) 34f65eadb6SStefano Stabellini * They are provided by stdint.h of the standard headers. 35f65eadb6SStefano Stabellini * 36f65eadb6SStefano Stabellini * In addition, if you intend to use the FLEX macros, you also need to 37f65eadb6SStefano Stabellini * provide the following, before invoking the FLEX macros: 38f65eadb6SStefano Stabellini * - size_t 39f65eadb6SStefano Stabellini * - memcpy 40f65eadb6SStefano Stabellini * - grant_ref_t 41f65eadb6SStefano Stabellini * These declarations are provided by string.h of the standard headers, 42f65eadb6SStefano Stabellini * and grant_table.h from the Xen public headers. 43f65eadb6SStefano Stabellini */ 44f65eadb6SStefano Stabellini 45f65eadb6SStefano Stabellini #if __XEN_INTERFACE_VERSION__ < 0x00030208 46f65eadb6SStefano Stabellini #define xen_mb() mb() 47f65eadb6SStefano Stabellini #define xen_rmb() rmb() 48f65eadb6SStefano Stabellini #define xen_wmb() wmb() 49f65eadb6SStefano Stabellini #endif 50f65eadb6SStefano Stabellini 51f65eadb6SStefano Stabellini typedef unsigned int RING_IDX; 52f65eadb6SStefano Stabellini 53f65eadb6SStefano Stabellini /* Round a 32-bit unsigned constant down to the nearest power of two. */ 54f65eadb6SStefano Stabellini #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) 55f65eadb6SStefano Stabellini #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) 56f65eadb6SStefano Stabellini #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) 57f65eadb6SStefano Stabellini #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) 58f65eadb6SStefano Stabellini #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) 59f65eadb6SStefano Stabellini 60f65eadb6SStefano Stabellini /* 61f65eadb6SStefano Stabellini * Calculate size of a shared ring, given the total available space for the 62f65eadb6SStefano Stabellini * ring and indexes (_sz), and the name tag of the request/response structure. 63f65eadb6SStefano Stabellini * A ring contains as many entries as will fit, rounded down to the nearest 64f65eadb6SStefano Stabellini * power of two (so we can mask with (size-1) to loop around). 65f65eadb6SStefano Stabellini */ 66f65eadb6SStefano Stabellini #define __CONST_RING_SIZE(_s, _sz) \ 67f65eadb6SStefano Stabellini (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 68f18793b0SStefan Hajnoczi sizeof_field(struct _s##_sring, ring[0]))) 69f65eadb6SStefano Stabellini /* 70f65eadb6SStefano Stabellini * The same for passing in an actual pointer instead of a name tag. 71f65eadb6SStefano Stabellini */ 72f65eadb6SStefano Stabellini #define __RING_SIZE(_s, _sz) \ 73f65eadb6SStefano Stabellini (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 74f65eadb6SStefano Stabellini 75f65eadb6SStefano Stabellini /* 76f65eadb6SStefano Stabellini * Macros to make the correct C datatypes for a new kind of ring. 77f65eadb6SStefano Stabellini * 78f65eadb6SStefano Stabellini * To make a new ring datatype, you need to have two message structures, 79f65eadb6SStefano Stabellini * let's say request_t, and response_t already defined. 80f65eadb6SStefano Stabellini * 81f65eadb6SStefano Stabellini * In a header where you want the ring datatype declared, you then do: 82f65eadb6SStefano Stabellini * 83f65eadb6SStefano Stabellini * DEFINE_RING_TYPES(mytag, request_t, response_t); 84f65eadb6SStefano Stabellini * 85f65eadb6SStefano Stabellini * These expand out to give you a set of types, as you can see below. 86f65eadb6SStefano Stabellini * The most important of these are: 87f65eadb6SStefano Stabellini * 88f65eadb6SStefano Stabellini * mytag_sring_t - The shared ring. 89f65eadb6SStefano Stabellini * mytag_front_ring_t - The 'front' half of the ring. 90f65eadb6SStefano Stabellini * mytag_back_ring_t - The 'back' half of the ring. 91f65eadb6SStefano Stabellini * 92f65eadb6SStefano Stabellini * To initialize a ring in your code you need to know the location and size 93f65eadb6SStefano Stabellini * of the shared memory area (PAGE_SIZE, for instance). To initialise 94f65eadb6SStefano Stabellini * the front half: 95f65eadb6SStefano Stabellini * 96f65eadb6SStefano Stabellini * mytag_front_ring_t front_ring; 97f65eadb6SStefano Stabellini * SHARED_RING_INIT((mytag_sring_t *)shared_page); 98f65eadb6SStefano Stabellini * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 99f65eadb6SStefano Stabellini * 100f65eadb6SStefano Stabellini * Initializing the back follows similarly (note that only the front 101f65eadb6SStefano Stabellini * initializes the shared ring): 102f65eadb6SStefano Stabellini * 103f65eadb6SStefano Stabellini * mytag_back_ring_t back_ring; 104f65eadb6SStefano Stabellini * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 105f65eadb6SStefano Stabellini */ 106f65eadb6SStefano Stabellini 107f65eadb6SStefano Stabellini #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ 108f65eadb6SStefano Stabellini \ 109f65eadb6SStefano Stabellini /* Shared ring entry */ \ 110f65eadb6SStefano Stabellini union __name##_sring_entry { \ 111f65eadb6SStefano Stabellini __req_t req; \ 112f65eadb6SStefano Stabellini __rsp_t rsp; \ 113f65eadb6SStefano Stabellini }; \ 114f65eadb6SStefano Stabellini \ 115f65eadb6SStefano Stabellini /* Shared ring page */ \ 116f65eadb6SStefano Stabellini struct __name##_sring { \ 117f65eadb6SStefano Stabellini RING_IDX req_prod, req_event; \ 118f65eadb6SStefano Stabellini RING_IDX rsp_prod, rsp_event; \ 119f65eadb6SStefano Stabellini union { \ 120f65eadb6SStefano Stabellini struct { \ 121f65eadb6SStefano Stabellini uint8_t smartpoll_active; \ 122f65eadb6SStefano Stabellini } netif; \ 123f65eadb6SStefano Stabellini struct { \ 124f65eadb6SStefano Stabellini uint8_t msg; \ 125f65eadb6SStefano Stabellini } tapif_user; \ 126f65eadb6SStefano Stabellini uint8_t pvt_pad[4]; \ 127f65eadb6SStefano Stabellini } pvt; \ 128f65eadb6SStefano Stabellini uint8_t __pad[44]; \ 129f65eadb6SStefano Stabellini union __name##_sring_entry ring[1]; /* variable-length */ \ 130f65eadb6SStefano Stabellini }; \ 131f65eadb6SStefano Stabellini \ 132f65eadb6SStefano Stabellini /* "Front" end's private variables */ \ 133f65eadb6SStefano Stabellini struct __name##_front_ring { \ 134f65eadb6SStefano Stabellini RING_IDX req_prod_pvt; \ 135f65eadb6SStefano Stabellini RING_IDX rsp_cons; \ 136f65eadb6SStefano Stabellini unsigned int nr_ents; \ 137f65eadb6SStefano Stabellini struct __name##_sring *sring; \ 138f65eadb6SStefano Stabellini }; \ 139f65eadb6SStefano Stabellini \ 140f65eadb6SStefano Stabellini /* "Back" end's private variables */ \ 141f65eadb6SStefano Stabellini struct __name##_back_ring { \ 142f65eadb6SStefano Stabellini RING_IDX rsp_prod_pvt; \ 143f65eadb6SStefano Stabellini RING_IDX req_cons; \ 144f65eadb6SStefano Stabellini unsigned int nr_ents; \ 145f65eadb6SStefano Stabellini struct __name##_sring *sring; \ 146f65eadb6SStefano Stabellini }; \ 147f65eadb6SStefano Stabellini \ 148f65eadb6SStefano Stabellini /* Syntactic sugar */ \ 149f65eadb6SStefano Stabellini typedef struct __name##_sring __name##_sring_t; \ 150f65eadb6SStefano Stabellini typedef struct __name##_front_ring __name##_front_ring_t; \ 151f65eadb6SStefano Stabellini typedef struct __name##_back_ring __name##_back_ring_t 152f65eadb6SStefano Stabellini 153f65eadb6SStefano Stabellini /* 154f65eadb6SStefano Stabellini * Macros for manipulating rings. 155f65eadb6SStefano Stabellini * 156f65eadb6SStefano Stabellini * FRONT_RING_whatever works on the "front end" of a ring: here 157f65eadb6SStefano Stabellini * requests are pushed on to the ring and responses taken off it. 158f65eadb6SStefano Stabellini * 159f65eadb6SStefano Stabellini * BACK_RING_whatever works on the "back end" of a ring: here 160f65eadb6SStefano Stabellini * requests are taken off the ring and responses put on. 161f65eadb6SStefano Stabellini * 162f65eadb6SStefano Stabellini * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. 163f65eadb6SStefano Stabellini * This is OK in 1-for-1 request-response situations where the 164f65eadb6SStefano Stabellini * requestor (front end) never has more than RING_SIZE()-1 165f65eadb6SStefano Stabellini * outstanding requests. 166f65eadb6SStefano Stabellini */ 167f65eadb6SStefano Stabellini 168f65eadb6SStefano Stabellini /* Initialising empty rings */ 169f65eadb6SStefano Stabellini #define SHARED_RING_INIT(_s) do { \ 170f65eadb6SStefano Stabellini (_s)->req_prod = (_s)->rsp_prod = 0; \ 171f65eadb6SStefano Stabellini (_s)->req_event = (_s)->rsp_event = 1; \ 172f65eadb6SStefano Stabellini (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \ 173f65eadb6SStefano Stabellini (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ 174f65eadb6SStefano Stabellini } while(0) 175f65eadb6SStefano Stabellini 176f65eadb6SStefano Stabellini #define FRONT_RING_INIT(_r, _s, __size) do { \ 177f65eadb6SStefano Stabellini (_r)->req_prod_pvt = 0; \ 178f65eadb6SStefano Stabellini (_r)->rsp_cons = 0; \ 179f65eadb6SStefano Stabellini (_r)->nr_ents = __RING_SIZE(_s, __size); \ 180f65eadb6SStefano Stabellini (_r)->sring = (_s); \ 181f65eadb6SStefano Stabellini } while (0) 182f65eadb6SStefano Stabellini 183f65eadb6SStefano Stabellini #define BACK_RING_INIT(_r, _s, __size) do { \ 184f65eadb6SStefano Stabellini (_r)->rsp_prod_pvt = 0; \ 185f65eadb6SStefano Stabellini (_r)->req_cons = 0; \ 186f65eadb6SStefano Stabellini (_r)->nr_ents = __RING_SIZE(_s, __size); \ 187f65eadb6SStefano Stabellini (_r)->sring = (_s); \ 188f65eadb6SStefano Stabellini } while (0) 189f65eadb6SStefano Stabellini 190f65eadb6SStefano Stabellini /* How big is this ring? */ 191f65eadb6SStefano Stabellini #define RING_SIZE(_r) \ 192f65eadb6SStefano Stabellini ((_r)->nr_ents) 193f65eadb6SStefano Stabellini 194f65eadb6SStefano Stabellini /* Number of free requests (for use on front side only). */ 195f65eadb6SStefano Stabellini #define RING_FREE_REQUESTS(_r) \ 196f65eadb6SStefano Stabellini (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) 197f65eadb6SStefano Stabellini 198f65eadb6SStefano Stabellini /* Test if there is an empty slot available on the front ring. 199f65eadb6SStefano Stabellini * (This is only meaningful from the front. ) 200f65eadb6SStefano Stabellini */ 201f65eadb6SStefano Stabellini #define RING_FULL(_r) \ 202f65eadb6SStefano Stabellini (RING_FREE_REQUESTS(_r) == 0) 203f65eadb6SStefano Stabellini 204f65eadb6SStefano Stabellini /* Test if there are outstanding messages to be processed on a ring. */ 205f65eadb6SStefano Stabellini #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ 206f65eadb6SStefano Stabellini ((_r)->sring->rsp_prod - (_r)->rsp_cons) 207f65eadb6SStefano Stabellini 208f65eadb6SStefano Stabellini #ifdef __GNUC__ 209f65eadb6SStefano Stabellini #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ 210f65eadb6SStefano Stabellini unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ 211f65eadb6SStefano Stabellini unsigned int rsp = RING_SIZE(_r) - \ 212f65eadb6SStefano Stabellini ((_r)->req_cons - (_r)->rsp_prod_pvt); \ 213f65eadb6SStefano Stabellini req < rsp ? req : rsp; \ 214f65eadb6SStefano Stabellini }) 215f65eadb6SStefano Stabellini #else 216f65eadb6SStefano Stabellini /* Same as above, but without the nice GCC ({ ... }) syntax. */ 217f65eadb6SStefano Stabellini #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ 218f65eadb6SStefano Stabellini ((((_r)->sring->req_prod - (_r)->req_cons) < \ 219f65eadb6SStefano Stabellini (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ 220f65eadb6SStefano Stabellini ((_r)->sring->req_prod - (_r)->req_cons) : \ 221f65eadb6SStefano Stabellini (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) 222f65eadb6SStefano Stabellini #endif 223f65eadb6SStefano Stabellini 224f65eadb6SStefano Stabellini /* Direct access to individual ring elements, by index. */ 225f65eadb6SStefano Stabellini #define RING_GET_REQUEST(_r, _idx) \ 226f65eadb6SStefano Stabellini (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 227f65eadb6SStefano Stabellini 228f65eadb6SStefano Stabellini /* 229f65eadb6SStefano Stabellini * Get a local copy of a request. 230f65eadb6SStefano Stabellini * 231f65eadb6SStefano Stabellini * Use this in preference to RING_GET_REQUEST() so all processing is 232f65eadb6SStefano Stabellini * done on a local copy that cannot be modified by the other end. 233f65eadb6SStefano Stabellini * 234f65eadb6SStefano Stabellini * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this 235f65eadb6SStefano Stabellini * to be ineffective where _req is a struct which consists of only bitfields. 236f65eadb6SStefano Stabellini */ 237f65eadb6SStefano Stabellini #define RING_COPY_REQUEST(_r, _idx, _req) do { \ 238f65eadb6SStefano Stabellini /* Use volatile to force the copy into _req. */ \ 239f65eadb6SStefano Stabellini *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ 240f65eadb6SStefano Stabellini } while (0) 241f65eadb6SStefano Stabellini 242f65eadb6SStefano Stabellini #define RING_GET_RESPONSE(_r, _idx) \ 243f65eadb6SStefano Stabellini (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 244f65eadb6SStefano Stabellini 245f65eadb6SStefano Stabellini /* Loop termination condition: Would the specified index overflow the ring? */ 246f65eadb6SStefano Stabellini #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 247f65eadb6SStefano Stabellini (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 248f65eadb6SStefano Stabellini 249f65eadb6SStefano Stabellini /* Ill-behaved frontend determination: Can there be this many requests? */ 250f65eadb6SStefano Stabellini #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ 251f65eadb6SStefano Stabellini (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) 252f65eadb6SStefano Stabellini 253f65eadb6SStefano Stabellini #define RING_PUSH_REQUESTS(_r) do { \ 254f65eadb6SStefano Stabellini xen_wmb(); /* back sees requests /before/ updated producer index */ \ 255f65eadb6SStefano Stabellini (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 256f65eadb6SStefano Stabellini } while (0) 257f65eadb6SStefano Stabellini 258f65eadb6SStefano Stabellini #define RING_PUSH_RESPONSES(_r) do { \ 259f65eadb6SStefano Stabellini xen_wmb(); /* front sees resps /before/ updated producer index */ \ 260f65eadb6SStefano Stabellini (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ 261f65eadb6SStefano Stabellini } while (0) 262f65eadb6SStefano Stabellini 263f65eadb6SStefano Stabellini /* 264f65eadb6SStefano Stabellini * Notification hold-off (req_event and rsp_event): 265f65eadb6SStefano Stabellini * 266f65eadb6SStefano Stabellini * When queueing requests or responses on a shared ring, it may not always be 267f65eadb6SStefano Stabellini * necessary to notify the remote end. For example, if requests are in flight 268f65eadb6SStefano Stabellini * in a backend, the front may be able to queue further requests without 269f65eadb6SStefano Stabellini * notifying the back (if the back checks for new requests when it queues 270f65eadb6SStefano Stabellini * responses). 271f65eadb6SStefano Stabellini * 272f65eadb6SStefano Stabellini * When enqueuing requests or responses: 273f65eadb6SStefano Stabellini * 274f65eadb6SStefano Stabellini * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument 275f65eadb6SStefano Stabellini * is a boolean return value. True indicates that the receiver requires an 276f65eadb6SStefano Stabellini * asynchronous notification. 277f65eadb6SStefano Stabellini * 278f65eadb6SStefano Stabellini * After dequeuing requests or responses (before sleeping the connection): 279f65eadb6SStefano Stabellini * 280f65eadb6SStefano Stabellini * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). 281f65eadb6SStefano Stabellini * The second argument is a boolean return value. True indicates that there 282f65eadb6SStefano Stabellini * are pending messages on the ring (i.e., the connection should not be put 283f65eadb6SStefano Stabellini * to sleep). 284f65eadb6SStefano Stabellini * 285f65eadb6SStefano Stabellini * These macros will set the req_event/rsp_event field to trigger a 286f65eadb6SStefano Stabellini * notification on the very next message that is enqueued. If you want to 287f65eadb6SStefano Stabellini * create batches of work (i.e., only receive a notification after several 288f65eadb6SStefano Stabellini * messages have been enqueued) then you will need to create a customised 289f65eadb6SStefano Stabellini * version of the FINAL_CHECK macro in your own code, which sets the event 290f65eadb6SStefano Stabellini * field appropriately. 291f65eadb6SStefano Stabellini */ 292f65eadb6SStefano Stabellini 293f65eadb6SStefano Stabellini #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ 294f65eadb6SStefano Stabellini RING_IDX __old = (_r)->sring->req_prod; \ 295f65eadb6SStefano Stabellini RING_IDX __new = (_r)->req_prod_pvt; \ 296f65eadb6SStefano Stabellini xen_wmb(); /* back sees requests /before/ updated producer index */ \ 297f65eadb6SStefano Stabellini (_r)->sring->req_prod = __new; \ 298f65eadb6SStefano Stabellini xen_mb(); /* back sees new requests /before/ we check req_event */ \ 299f65eadb6SStefano Stabellini (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ 300f65eadb6SStefano Stabellini (RING_IDX)(__new - __old)); \ 301f65eadb6SStefano Stabellini } while (0) 302f65eadb6SStefano Stabellini 303f65eadb6SStefano Stabellini #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ 304f65eadb6SStefano Stabellini RING_IDX __old = (_r)->sring->rsp_prod; \ 305f65eadb6SStefano Stabellini RING_IDX __new = (_r)->rsp_prod_pvt; \ 306f65eadb6SStefano Stabellini xen_wmb(); /* front sees resps /before/ updated producer index */ \ 307f65eadb6SStefano Stabellini (_r)->sring->rsp_prod = __new; \ 308f65eadb6SStefano Stabellini xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ 309f65eadb6SStefano Stabellini (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ 310f65eadb6SStefano Stabellini (RING_IDX)(__new - __old)); \ 311f65eadb6SStefano Stabellini } while (0) 312f65eadb6SStefano Stabellini 313f65eadb6SStefano Stabellini #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ 314f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 315f65eadb6SStefano Stabellini if (_work_to_do) break; \ 316f65eadb6SStefano Stabellini (_r)->sring->req_event = (_r)->req_cons + 1; \ 317f65eadb6SStefano Stabellini xen_mb(); \ 318f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 319f65eadb6SStefano Stabellini } while (0) 320f65eadb6SStefano Stabellini 321f65eadb6SStefano Stabellini #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ 322f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 323f65eadb6SStefano Stabellini if (_work_to_do) break; \ 324f65eadb6SStefano Stabellini (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ 325f65eadb6SStefano Stabellini xen_mb(); \ 326f65eadb6SStefano Stabellini (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 327f65eadb6SStefano Stabellini } while (0) 328f65eadb6SStefano Stabellini 329f65eadb6SStefano Stabellini 330f65eadb6SStefano Stabellini /* 331f65eadb6SStefano Stabellini * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and 332f65eadb6SStefano Stabellini * functions to check if there is data on the ring, and to read and 333f65eadb6SStefano Stabellini * write to them. 334f65eadb6SStefano Stabellini * 335f65eadb6SStefano Stabellini * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but 336f65eadb6SStefano Stabellini * does not define the indexes page. As different protocols can have 337f65eadb6SStefano Stabellini * extensions to the basic format, this macro allow them to define their 338f65eadb6SStefano Stabellini * own struct. 339f65eadb6SStefano Stabellini * 340f65eadb6SStefano Stabellini * XEN_FLEX_RING_SIZE 341f65eadb6SStefano Stabellini * Convenience macro to calculate the size of one of the two rings 342f65eadb6SStefano Stabellini * from the overall order. 343f65eadb6SStefano Stabellini * 344f65eadb6SStefano Stabellini * $NAME_mask 345f65eadb6SStefano Stabellini * Function to apply the size mask to an index, to reduce the index 346f65eadb6SStefano Stabellini * within the range [0-size]. 347f65eadb6SStefano Stabellini * 348f65eadb6SStefano Stabellini * $NAME_read_packet 349f65eadb6SStefano Stabellini * Function to read data from the ring. The amount of data to read is 350f65eadb6SStefano Stabellini * specified by the "size" argument. 351f65eadb6SStefano Stabellini * 352f65eadb6SStefano Stabellini * $NAME_write_packet 353f65eadb6SStefano Stabellini * Function to write data to the ring. The amount of data to write is 354f65eadb6SStefano Stabellini * specified by the "size" argument. 355f65eadb6SStefano Stabellini * 356f65eadb6SStefano Stabellini * $NAME_get_ring_ptr 357f65eadb6SStefano Stabellini * Convenience function that returns a pointer to read/write to the 358f65eadb6SStefano Stabellini * ring at the right location. 359f65eadb6SStefano Stabellini * 360f65eadb6SStefano Stabellini * $NAME_data_intf 361f65eadb6SStefano Stabellini * Indexes page, shared between frontend and backend. It also 362f65eadb6SStefano Stabellini * contains the array of grant refs. 363f65eadb6SStefano Stabellini * 364f65eadb6SStefano Stabellini * $NAME_queued 365f65eadb6SStefano Stabellini * Function to calculate how many bytes are currently on the ring, 366f65eadb6SStefano Stabellini * ready to be read. It can also be used to calculate how much free 367f65eadb6SStefano Stabellini * space is currently on the ring (XEN_FLEX_RING_SIZE() - 368f65eadb6SStefano Stabellini * $NAME_queued()). 369f65eadb6SStefano Stabellini */ 370f65eadb6SStefano Stabellini 371f65eadb6SStefano Stabellini #ifndef XEN_PAGE_SHIFT 372f65eadb6SStefano Stabellini /* The PAGE_SIZE for ring protocols and hypercall interfaces is always 373f65eadb6SStefano Stabellini * 4K, regardless of the architecture, and page granularity chosen by 374f65eadb6SStefano Stabellini * operating systems. 375f65eadb6SStefano Stabellini */ 376f65eadb6SStefano Stabellini #define XEN_PAGE_SHIFT 12 377f65eadb6SStefano Stabellini #endif 378f65eadb6SStefano Stabellini #define XEN_FLEX_RING_SIZE(order) \ 379f65eadb6SStefano Stabellini (1UL << ((order) + XEN_PAGE_SHIFT - 1)) 380f65eadb6SStefano Stabellini 381f65eadb6SStefano Stabellini #define DEFINE_XEN_FLEX_RING(name) \ 382f65eadb6SStefano Stabellini static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ 383f65eadb6SStefano Stabellini { \ 384f65eadb6SStefano Stabellini return idx & (ring_size - 1); \ 385f65eadb6SStefano Stabellini } \ 386f65eadb6SStefano Stabellini \ 387f65eadb6SStefano Stabellini static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ 388f65eadb6SStefano Stabellini RING_IDX idx, \ 389f65eadb6SStefano Stabellini RING_IDX ring_size) \ 390f65eadb6SStefano Stabellini { \ 391f65eadb6SStefano Stabellini return buf + name##_mask(idx, ring_size); \ 392f65eadb6SStefano Stabellini } \ 393f65eadb6SStefano Stabellini \ 394f65eadb6SStefano Stabellini static inline void name##_read_packet(void *opaque, \ 395f65eadb6SStefano Stabellini const unsigned char *buf, \ 396f65eadb6SStefano Stabellini size_t size, \ 397f65eadb6SStefano Stabellini RING_IDX masked_prod, \ 398f65eadb6SStefano Stabellini RING_IDX *masked_cons, \ 399f65eadb6SStefano Stabellini RING_IDX ring_size) \ 400f65eadb6SStefano Stabellini { \ 401f65eadb6SStefano Stabellini if (*masked_cons < masked_prod || \ 402f65eadb6SStefano Stabellini size <= ring_size - *masked_cons) { \ 403f65eadb6SStefano Stabellini memcpy(opaque, buf + *masked_cons, size); \ 404f65eadb6SStefano Stabellini } else { \ 405f65eadb6SStefano Stabellini memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ 406f65eadb6SStefano Stabellini memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ 407f65eadb6SStefano Stabellini size - (ring_size - *masked_cons)); \ 408f65eadb6SStefano Stabellini } \ 409f65eadb6SStefano Stabellini *masked_cons = name##_mask(*masked_cons + size, ring_size); \ 410f65eadb6SStefano Stabellini } \ 411f65eadb6SStefano Stabellini \ 412f65eadb6SStefano Stabellini static inline void name##_write_packet(unsigned char *buf, \ 413f65eadb6SStefano Stabellini const void *opaque, \ 414f65eadb6SStefano Stabellini size_t size, \ 415f65eadb6SStefano Stabellini RING_IDX *masked_prod, \ 416f65eadb6SStefano Stabellini RING_IDX masked_cons, \ 417f65eadb6SStefano Stabellini RING_IDX ring_size) \ 418f65eadb6SStefano Stabellini { \ 419f65eadb6SStefano Stabellini if (*masked_prod < masked_cons || \ 420f65eadb6SStefano Stabellini size <= ring_size - *masked_prod) { \ 421f65eadb6SStefano Stabellini memcpy(buf + *masked_prod, opaque, size); \ 422f65eadb6SStefano Stabellini } else { \ 423f65eadb6SStefano Stabellini memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ 424f65eadb6SStefano Stabellini memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ 425f65eadb6SStefano Stabellini size - (ring_size - *masked_prod)); \ 426f65eadb6SStefano Stabellini } \ 427f65eadb6SStefano Stabellini *masked_prod = name##_mask(*masked_prod + size, ring_size); \ 428f65eadb6SStefano Stabellini } \ 429f65eadb6SStefano Stabellini \ 430f65eadb6SStefano Stabellini static inline RING_IDX name##_queued(RING_IDX prod, \ 431f65eadb6SStefano Stabellini RING_IDX cons, \ 432f65eadb6SStefano Stabellini RING_IDX ring_size) \ 433f65eadb6SStefano Stabellini { \ 434f65eadb6SStefano Stabellini RING_IDX size; \ 435f65eadb6SStefano Stabellini \ 436f65eadb6SStefano Stabellini if (prod == cons) \ 437f65eadb6SStefano Stabellini return 0; \ 438f65eadb6SStefano Stabellini \ 439f65eadb6SStefano Stabellini prod = name##_mask(prod, ring_size); \ 440f65eadb6SStefano Stabellini cons = name##_mask(cons, ring_size); \ 441f65eadb6SStefano Stabellini \ 442f65eadb6SStefano Stabellini if (prod == cons) \ 443f65eadb6SStefano Stabellini return ring_size; \ 444f65eadb6SStefano Stabellini \ 445f65eadb6SStefano Stabellini if (prod > cons) \ 446f65eadb6SStefano Stabellini size = prod - cons; \ 447f65eadb6SStefano Stabellini else \ 448f65eadb6SStefano Stabellini size = ring_size - (cons - prod); \ 449f65eadb6SStefano Stabellini return size; \ 450f65eadb6SStefano Stabellini } \ 451f65eadb6SStefano Stabellini \ 452f65eadb6SStefano Stabellini struct name##_data { \ 453f65eadb6SStefano Stabellini unsigned char *in; /* half of the allocation */ \ 454f65eadb6SStefano Stabellini unsigned char *out; /* half of the allocation */ \ 455f65eadb6SStefano Stabellini } 456f65eadb6SStefano Stabellini 457f65eadb6SStefano Stabellini #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ 458f65eadb6SStefano Stabellini struct name##_data_intf { \ 459f65eadb6SStefano Stabellini RING_IDX in_cons, in_prod; \ 460f65eadb6SStefano Stabellini \ 461f65eadb6SStefano Stabellini uint8_t pad1[56]; \ 462f65eadb6SStefano Stabellini \ 463f65eadb6SStefano Stabellini RING_IDX out_cons, out_prod; \ 464f65eadb6SStefano Stabellini \ 465f65eadb6SStefano Stabellini uint8_t pad2[56]; \ 466f65eadb6SStefano Stabellini \ 467f65eadb6SStefano Stabellini RING_IDX ring_order; \ 468f65eadb6SStefano Stabellini grant_ref_t ref[]; \ 469f65eadb6SStefano Stabellini }; \ 470f65eadb6SStefano Stabellini DEFINE_XEN_FLEX_RING(name) 471f65eadb6SStefano Stabellini 472*d1744bd3SAnthony PERARD #endif /* __XEN_PUBLIC_IO_RING_H__ */ 473f65eadb6SStefano Stabellini 474f65eadb6SStefano Stabellini /* 475f65eadb6SStefano Stabellini * Local variables: 476f65eadb6SStefano Stabellini * mode: C 477f65eadb6SStefano Stabellini * c-file-style: "BSD" 478f65eadb6SStefano Stabellini * c-basic-offset: 4 479f65eadb6SStefano Stabellini * tab-width: 4 480f65eadb6SStefano Stabellini * indent-tabs-mode: nil 481f65eadb6SStefano Stabellini * End: 482f65eadb6SStefano Stabellini */ 483