1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
8
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
13
14 #include "xsk.h"
15
16 struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 /* Hinder the adjacent cache prefetcher to prefetch the consumer
19 * pointer if the producer pointer is touched and vice versa.
20 */
21 u32 pad1 ____cacheline_aligned_in_smp;
22 u32 consumer ____cacheline_aligned_in_smp;
23 u32 pad2 ____cacheline_aligned_in_smp;
24 u32 flags;
25 u32 pad3 ____cacheline_aligned_in_smp;
26 };
27
28 /* Used for the RX and TX queues for packets */
29 struct xdp_rxtx_ring {
30 struct xdp_ring ptrs;
31 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
32 };
33
34 /* Used for the fill and completion queues for buffers */
35 struct xdp_umem_ring {
36 struct xdp_ring ptrs;
37 u64 desc[] ____cacheline_aligned_in_smp;
38 };
39
40 struct xsk_queue {
41 u32 ring_mask;
42 u32 nentries;
43 u32 cached_prod;
44 u32 cached_cons;
45 struct xdp_ring *ring;
46 u64 invalid_descs;
47 u64 queue_empty_descs;
48 size_t ring_vmalloc_size;
49 /* Mutual exclusion of the completion ring in the SKB mode.
50 * Protect: when sockets share a single cq when the same netdev
51 * and queue id is shared.
52 */
53 spinlock_t cq_cached_prod_lock;
54 };
55
56 struct parsed_desc {
57 u32 mb;
58 u32 valid;
59 };
60
61 /* The structure of the shared state of the rings are a simple
62 * circular buffer, as outlined in
63 * Documentation/core-api/circular-buffers.rst. For the Rx and
64 * completion ring, the kernel is the producer and user space is the
65 * consumer. For the Tx and fill rings, the kernel is the consumer and
66 * user space is the producer.
67 *
68 * producer consumer
69 *
70 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
71 * STORE $data LOAD $data
72 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
73 * }
74 *
75 * (A) pairs with (D), and (B) pairs with (C).
76 *
77 * Starting with (B), it protects the data from being written after
78 * the producer pointer. If this barrier was missing, the consumer
79 * could observe the producer pointer being set and thus load the data
80 * before the producer has written the new data. The consumer would in
81 * this case load the old data.
82 *
83 * (C) protects the consumer from speculatively loading the data before
84 * the producer pointer actually has been read. If we do not have this
85 * barrier, some architectures could load old data as speculative loads
86 * are not discarded as the CPU does not know there is a dependency
87 * between ->producer and data.
88 *
89 * (A) is a control dependency that separates the load of ->consumer
90 * from the stores of $data. In case ->consumer indicates there is no
91 * room in the buffer to store $data we do not. The dependency will
92 * order both of the stores after the loads. So no barrier is needed.
93 *
94 * (D) protects the load of the data to be observed to happen after the
95 * store of the consumer pointer. If we did not have this memory
96 * barrier, the producer could observe the consumer pointer being set
97 * and overwrite the data with a new value before the consumer got the
98 * chance to read the old value. The consumer would thus miss reading
99 * the old entry and very likely read the new entry twice, once right
100 * now and again after circling through the ring.
101 */
102
103 /* The operations on the rings are the following:
104 *
105 * producer consumer
106 *
107 * RESERVE entries PEEK in the ring for entries
108 * WRITE data into the ring READ data from the ring
109 * SUBMIT entries RELEASE entries
110 *
111 * The producer reserves one or more entries in the ring. It can then
112 * fill in these entries and finally submit them so that they can be
113 * seen and read by the consumer.
114 *
115 * The consumer peeks into the ring to see if the producer has written
116 * any new entries. If so, the consumer can then read these entries
117 * and when it is done reading them release them back to the producer
118 * so that the producer can use these slots to fill in new entries.
119 *
120 * The function names below reflect these operations.
121 */
122
123 /* Functions that read and validate content from consumer rings. */
124
__xskq_cons_read_addr_unchecked(struct xsk_queue * q,u32 cached_cons,u64 * addr)125 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
126 {
127 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
128 u32 idx = cached_cons & q->ring_mask;
129
130 *addr = ring->desc[idx];
131 }
132
xskq_cons_read_addr_unchecked(struct xsk_queue * q,u64 * addr)133 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
134 {
135 if (q->cached_cons != q->cached_prod) {
136 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
137 return true;
138 }
139
140 return false;
141 }
142
xp_unused_options_set(u32 options)143 static inline bool xp_unused_options_set(u32 options)
144 {
145 return options & ~(XDP_PKT_CONTD | XDP_TX_METADATA);
146 }
147
xp_aligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)148 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
149 struct xdp_desc *desc)
150 {
151 u64 len = desc->len;
152 u64 addr, offset;
153
154 if (!len)
155 return false;
156
157 /* Can overflow if desc->addr < pool->tx_metadata_len */
158 if (check_sub_overflow(desc->addr, pool->tx_metadata_len, &addr))
159 return false;
160
161 offset = addr & (pool->chunk_size - 1);
162
163 /*
164 * Can't overflow: @offset is guaranteed to be < ``U32_MAX``
165 * (pool->chunk_size is ``u32``), @len is guaranteed
166 * to be <= ``U32_MAX``.
167 */
168 if (offset + len + pool->tx_metadata_len > pool->chunk_size)
169 return false;
170
171 if (addr >= pool->addrs_cnt)
172 return false;
173
174 if (xp_unused_options_set(desc->options))
175 return false;
176
177 return true;
178 }
179
xp_unaligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)180 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
181 struct xdp_desc *desc)
182 {
183 u64 len = desc->len;
184 u64 addr, end;
185
186 if (!len)
187 return false;
188
189 /* Can't overflow: @len is guaranteed to be <= ``U32_MAX`` */
190 len += pool->tx_metadata_len;
191 if (len > pool->chunk_size)
192 return false;
193
194 /* Can overflow if desc->addr is close to 0 */
195 if (check_sub_overflow(xp_unaligned_add_offset_to_addr(desc->addr),
196 pool->tx_metadata_len, &addr))
197 return false;
198
199 if (addr >= pool->addrs_cnt)
200 return false;
201
202 /* Can overflow if pool->addrs_cnt is high enough */
203 if (check_add_overflow(addr, len, &end) || end > pool->addrs_cnt)
204 return false;
205
206 if (xp_desc_crosses_non_contig_pg(pool, addr, len))
207 return false;
208
209 if (xp_unused_options_set(desc->options))
210 return false;
211
212 return true;
213 }
214
xp_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)215 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
216 struct xdp_desc *desc)
217 {
218 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
219 xp_aligned_validate_desc(pool, desc);
220 }
221
xskq_has_descs(struct xsk_queue * q)222 static inline bool xskq_has_descs(struct xsk_queue *q)
223 {
224 return q->cached_cons != q->cached_prod;
225 }
226
xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool)227 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
228 struct xdp_desc *d,
229 struct xsk_buff_pool *pool)
230 {
231 if (!xp_validate_desc(pool, d)) {
232 q->invalid_descs++;
233 return false;
234 }
235 return true;
236 }
237
xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)238 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
239 struct xdp_desc *desc,
240 struct xsk_buff_pool *pool)
241 {
242 if (q->cached_cons != q->cached_prod) {
243 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
244 u32 idx = q->cached_cons & q->ring_mask;
245
246 *desc = ring->desc[idx];
247 return xskq_cons_is_valid_desc(q, desc, pool);
248 }
249
250 q->queue_empty_descs++;
251 return false;
252 }
253
xskq_cons_release_n(struct xsk_queue * q,u32 cnt)254 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
255 {
256 q->cached_cons += cnt;
257 }
258
parse_desc(struct xsk_queue * q,struct xsk_buff_pool * pool,struct xdp_desc * desc,struct parsed_desc * parsed)259 static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
260 struct xdp_desc *desc, struct parsed_desc *parsed)
261 {
262 parsed->valid = xskq_cons_is_valid_desc(q, desc, pool);
263 parsed->mb = xp_mb_desc(desc);
264 }
265
266 static inline
xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max)267 u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
268 u32 max)
269 {
270 u32 cached_cons = q->cached_cons, nb_entries = 0;
271 struct xdp_desc *descs = pool->tx_descs;
272 u32 total_descs = 0, nr_frags = 0;
273
274 /* track first entry, if stumble upon *any* invalid descriptor, rewind
275 * current packet that consists of frags and stop the processing
276 */
277 while (cached_cons != q->cached_prod && nb_entries < max) {
278 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
279 u32 idx = cached_cons & q->ring_mask;
280 struct parsed_desc parsed;
281
282 descs[nb_entries] = ring->desc[idx];
283 cached_cons++;
284 parse_desc(q, pool, &descs[nb_entries], &parsed);
285 if (unlikely(!parsed.valid))
286 break;
287
288 if (likely(!parsed.mb)) {
289 total_descs += (nr_frags + 1);
290 nr_frags = 0;
291 } else {
292 nr_frags++;
293 if (nr_frags == pool->xdp_zc_max_segs) {
294 nr_frags = 0;
295 break;
296 }
297 }
298 nb_entries++;
299 }
300
301 cached_cons -= nr_frags;
302 /* Release valid plus any invalid entries */
303 xskq_cons_release_n(q, cached_cons - q->cached_cons);
304 return total_descs;
305 }
306
307 /* Functions for consumers */
308
__xskq_cons_release(struct xsk_queue * q)309 static inline void __xskq_cons_release(struct xsk_queue *q)
310 {
311 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
312 }
313
__xskq_cons_peek(struct xsk_queue * q)314 static inline void __xskq_cons_peek(struct xsk_queue *q)
315 {
316 /* Refresh the local pointer */
317 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
318 }
319
xskq_cons_get_entries(struct xsk_queue * q)320 static inline void xskq_cons_get_entries(struct xsk_queue *q)
321 {
322 __xskq_cons_release(q);
323 __xskq_cons_peek(q);
324 }
325
xskq_cons_nb_entries(struct xsk_queue * q,u32 max)326 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
327 {
328 u32 entries = q->cached_prod - q->cached_cons;
329
330 if (entries >= max)
331 return max;
332
333 __xskq_cons_peek(q);
334 entries = q->cached_prod - q->cached_cons;
335
336 return entries >= max ? max : entries;
337 }
338
xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr)339 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
340 {
341 if (q->cached_prod == q->cached_cons)
342 xskq_cons_get_entries(q);
343 return xskq_cons_read_addr_unchecked(q, addr);
344 }
345
xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)346 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
347 struct xdp_desc *desc,
348 struct xsk_buff_pool *pool)
349 {
350 if (q->cached_prod == q->cached_cons)
351 xskq_cons_get_entries(q);
352 return xskq_cons_read_desc(q, desc, pool);
353 }
354
355 /* To improve performance in the xskq_cons_release functions, only update local state here.
356 * Reflect this to global state when we get new entries from the ring in
357 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
358 */
xskq_cons_release(struct xsk_queue * q)359 static inline void xskq_cons_release(struct xsk_queue *q)
360 {
361 q->cached_cons++;
362 }
363
xskq_cons_cancel_n(struct xsk_queue * q,u32 cnt)364 static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
365 {
366 q->cached_cons -= cnt;
367 }
368
xskq_cons_present_entries(struct xsk_queue * q)369 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
370 {
371 /* No barriers needed since data is not accessed */
372 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
373 }
374
375 /* Functions for producers */
376
xskq_get_prod(struct xsk_queue * q)377 static inline u32 xskq_get_prod(struct xsk_queue *q)
378 {
379 return READ_ONCE(q->ring->producer);
380 }
381
xskq_prod_nb_free(struct xsk_queue * q,u32 max)382 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
383 {
384 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
385
386 if (free_entries >= max)
387 return max;
388
389 /* Refresh the local tail pointer */
390 q->cached_cons = READ_ONCE(q->ring->consumer);
391 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
392
393 return free_entries >= max ? max : free_entries;
394 }
395
xskq_prod_is_full(struct xsk_queue * q)396 static inline bool xskq_prod_is_full(struct xsk_queue *q)
397 {
398 return xskq_prod_nb_free(q, 1) ? false : true;
399 }
400
xskq_prod_cancel_n(struct xsk_queue * q,u32 cnt)401 static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
402 {
403 q->cached_prod -= cnt;
404 }
405
xskq_prod_reserve(struct xsk_queue * q)406 static inline int xskq_prod_reserve(struct xsk_queue *q)
407 {
408 if (xskq_prod_is_full(q))
409 return -ENOSPC;
410
411 /* A, matches D */
412 q->cached_prod++;
413 return 0;
414 }
415
xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr)416 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
417 {
418 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
419
420 if (xskq_prod_is_full(q))
421 return -ENOSPC;
422
423 /* A, matches D */
424 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
425 return 0;
426 }
427
xskq_prod_write_addr(struct xsk_queue * q,u32 idx,u64 addr)428 static inline void xskq_prod_write_addr(struct xsk_queue *q, u32 idx, u64 addr)
429 {
430 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
431
432 ring->desc[idx & q->ring_mask] = addr;
433 }
434
xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries)435 static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
436 u32 nb_entries)
437 {
438 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
439 u32 i, cached_prod;
440
441 /* A, matches D */
442 cached_prod = q->cached_prod;
443 for (i = 0; i < nb_entries; i++)
444 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
445 q->cached_prod = cached_prod;
446 }
447
xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len,u32 flags)448 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
449 u64 addr, u32 len, u32 flags)
450 {
451 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
452 u32 idx;
453
454 if (xskq_prod_is_full(q))
455 return -ENOBUFS;
456
457 /* A, matches D */
458 idx = q->cached_prod++ & q->ring_mask;
459 ring->desc[idx].addr = addr;
460 ring->desc[idx].len = len;
461 ring->desc[idx].options = flags;
462
463 return 0;
464 }
465
__xskq_prod_submit(struct xsk_queue * q,u32 idx)466 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
467 {
468 smp_store_release(&q->ring->producer, idx); /* B, matches C */
469 }
470
xskq_prod_submit(struct xsk_queue * q)471 static inline void xskq_prod_submit(struct xsk_queue *q)
472 {
473 __xskq_prod_submit(q, q->cached_prod);
474 }
475
xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries)476 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
477 {
478 __xskq_prod_submit(q, q->ring->producer + nb_entries);
479 }
480
xskq_prod_is_empty(struct xsk_queue * q)481 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
482 {
483 /* No barriers needed since data is not accessed */
484 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
485 }
486
487 /* For both producers and consumers */
488
xskq_nb_invalid_descs(struct xsk_queue * q)489 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
490 {
491 return q ? q->invalid_descs : 0;
492 }
493
xskq_nb_queue_empty_descs(struct xsk_queue * q)494 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
495 {
496 return q ? q->queue_empty_descs : 0;
497 }
498
499 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
500 void xskq_destroy(struct xsk_queue *q_ops);
501
502 #endif /* _LINUX_XSK_QUEUE_H */
503