1 // SPDX-License-Identifier: GPL-2.0-only
2 /* bpf/cpumap.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6
7 /**
8 * DOC: cpu map
9 * The 'cpumap' is primarily used as a backend map for XDP BPF helper
10 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
11 *
12 * Unlike devmap which redirects XDP frames out to another NIC device,
13 * this map type redirects raw XDP frames to another CPU. The remote
14 * CPU will do SKB-allocation and call the normal network stack.
15 */
16 /*
17 * This is a scalability and isolation mechanism, that allow
18 * separating the early driver network XDP layer, from the rest of the
19 * netstack, and assigning dedicated CPUs for this stage. This
20 * basically allows for 10G wirespeed pre-filtering via bpf.
21 */
22 #include <linux/bitops.h>
23 #include <linux/bpf.h>
24 #include <linux/filter.h>
25 #include <linux/ptr_ring.h>
26 #include <net/xdp.h>
27 #include <net/hotdata.h>
28
29 #include <linux/sched.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/local_lock.h>
33 #include <linux/completion.h>
34 #include <trace/events/xdp.h>
35 #include <linux/btf_ids.h>
36
37 #include <linux/netdevice.h>
38 #include <net/gro.h>
39
40 /* General idea: XDP packets getting XDP redirected to another CPU,
41 * will maximum be stored/queued for one driver ->poll() call. It is
42 * guaranteed that queueing the frame and the flush operation happen on
43 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
44 * which queue in bpf_cpu_map_entry contains packets.
45 */
46
47 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
48 struct bpf_cpu_map_entry;
49 struct bpf_cpu_map;
50
51 struct xdp_bulk_queue {
52 void *q[CPU_MAP_BULK_SIZE];
53 struct list_head flush_node;
54 struct bpf_cpu_map_entry *obj;
55 unsigned int count;
56 local_lock_t bq_lock;
57 };
58
59 /* Struct for every remote "destination" CPU in map */
60 struct bpf_cpu_map_entry {
61 u32 cpu; /* kthread CPU and map index */
62 int map_id; /* Back reference to map */
63
64 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
65 struct xdp_bulk_queue __percpu *bulkq;
66
67 /* Queue with potential multi-producers, and single-consumer kthread */
68 struct ptr_ring *queue;
69 struct task_struct *kthread;
70
71 struct bpf_cpumap_val value;
72 struct bpf_prog *prog;
73 struct gro_node gro;
74
75 struct completion kthread_running;
76 struct rcu_work free_work;
77 };
78
79 struct bpf_cpu_map {
80 struct bpf_map map;
81 /* Below members specific for map type */
82 struct bpf_cpu_map_entry __rcu **cpu_map;
83 };
84
cpu_map_alloc(union bpf_attr * attr)85 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
86 {
87 u32 value_size = attr->value_size;
88 struct bpf_cpu_map *cmap;
89
90 /* check sanity of attributes */
91 if (attr->max_entries == 0 || attr->key_size != 4 ||
92 (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
93 value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
94 attr->map_flags & ~BPF_F_NUMA_NODE)
95 return ERR_PTR(-EINVAL);
96
97 /* Pre-limit array size based on NR_CPUS, not final CPU check */
98 if (attr->max_entries > NR_CPUS)
99 return ERR_PTR(-E2BIG);
100
101 cmap = bpf_map_area_alloc(sizeof(*cmap), NUMA_NO_NODE);
102 if (!cmap)
103 return ERR_PTR(-ENOMEM);
104
105 bpf_map_init_from_attr(&cmap->map, attr);
106
107 /* Alloc array for possible remote "destination" CPUs */
108 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
109 sizeof(struct bpf_cpu_map_entry *),
110 cmap->map.numa_node);
111 if (!cmap->cpu_map) {
112 bpf_map_area_free(cmap);
113 return ERR_PTR(-ENOMEM);
114 }
115
116 return &cmap->map;
117 }
118
__cpu_map_ring_cleanup(struct ptr_ring * ring)119 static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
120 {
121 /* The tear-down procedure should have made sure that queue is
122 * empty. See __cpu_map_entry_replace() and work-queue
123 * invoked cpu_map_kthread_stop(). Catch any broken behaviour
124 * gracefully and warn once.
125 */
126 void *ptr;
127
128 while ((ptr = ptr_ring_consume(ring))) {
129 WARN_ON_ONCE(1);
130 if (unlikely(__ptr_test_bit(0, &ptr))) {
131 __ptr_clear_bit(0, &ptr);
132 kfree_skb(ptr);
133 continue;
134 }
135 xdp_return_frame(ptr);
136 }
137 }
138
cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry * rcpu,void ** skbs,u32 skb_n,struct xdp_cpumap_stats * stats)139 static u32 cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
140 void **skbs, u32 skb_n,
141 struct xdp_cpumap_stats *stats)
142 {
143 struct xdp_buff xdp;
144 u32 act, pass = 0;
145 int err;
146
147 for (u32 i = 0; i < skb_n; i++) {
148 struct sk_buff *skb = skbs[i];
149
150 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog);
151 switch (act) {
152 case XDP_PASS:
153 skbs[pass++] = skb;
154 break;
155 case XDP_REDIRECT:
156 err = xdp_do_generic_redirect(skb->dev, skb, &xdp,
157 rcpu->prog);
158 if (unlikely(err)) {
159 kfree_skb(skb);
160 stats->drop++;
161 } else {
162 stats->redirect++;
163 }
164 break;
165 default:
166 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
167 fallthrough;
168 case XDP_ABORTED:
169 trace_xdp_exception(skb->dev, rcpu->prog, act);
170 fallthrough;
171 case XDP_DROP:
172 napi_consume_skb(skb, true);
173 stats->drop++;
174 break;
175 }
176 }
177
178 stats->pass += pass;
179
180 return pass;
181 }
182
cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry * rcpu,void ** frames,int n,struct xdp_cpumap_stats * stats)183 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
184 void **frames, int n,
185 struct xdp_cpumap_stats *stats)
186 {
187 struct xdp_rxq_info rxq = {};
188 struct xdp_buff xdp;
189 int i, nframes = 0;
190
191 xdp.rxq = &rxq;
192
193 for (i = 0; i < n; i++) {
194 struct xdp_frame *xdpf = frames[i];
195 u32 act;
196 int err;
197
198 rxq.dev = xdpf->dev_rx;
199 rxq.mem.type = xdpf->mem_type;
200 /* TODO: report queue_index to xdp_rxq_info */
201
202 xdp_convert_frame_to_buff(xdpf, &xdp);
203
204 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
205 switch (act) {
206 case XDP_PASS:
207 err = xdp_update_frame_from_buff(&xdp, xdpf);
208 if (err < 0) {
209 xdp_return_frame(xdpf);
210 stats->drop++;
211 } else {
212 frames[nframes++] = xdpf;
213 }
214 break;
215 case XDP_REDIRECT:
216 err = xdp_do_redirect(xdpf->dev_rx, &xdp,
217 rcpu->prog);
218 if (unlikely(err)) {
219 xdp_return_frame(xdpf);
220 stats->drop++;
221 } else {
222 stats->redirect++;
223 }
224 break;
225 default:
226 bpf_warn_invalid_xdp_action(xdpf->dev_rx, rcpu->prog, act);
227 fallthrough;
228 case XDP_ABORTED:
229 trace_xdp_exception(xdpf->dev_rx, rcpu->prog, act);
230 fallthrough;
231 case XDP_DROP:
232 xdp_return_frame(xdpf);
233 stats->drop++;
234 break;
235 }
236 }
237
238 stats->pass += nframes;
239
240 return nframes;
241 }
242
243 #define CPUMAP_BATCH 8
244
245 struct cpu_map_ret {
246 u32 xdp_n;
247 u32 skb_n;
248 };
249
cpu_map_bpf_prog_run(struct bpf_cpu_map_entry * rcpu,void ** frames,void ** skbs,struct cpu_map_ret * ret,struct xdp_cpumap_stats * stats)250 static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
251 void **skbs, struct cpu_map_ret *ret,
252 struct xdp_cpumap_stats *stats)
253 {
254 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
255
256 if (!rcpu->prog)
257 goto out;
258
259 rcu_read_lock();
260 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
261 xdp_set_return_frame_no_direct();
262
263 ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats);
264 if (unlikely(ret->skb_n))
265 ret->skb_n = cpu_map_bpf_prog_run_skb(rcpu, skbs, ret->skb_n,
266 stats);
267
268 if (stats->redirect)
269 xdp_do_flush();
270
271 xdp_clear_return_frame_no_direct();
272 bpf_net_ctx_clear(bpf_net_ctx);
273 rcu_read_unlock();
274
275 out:
276 if (unlikely(ret->skb_n) && ret->xdp_n)
277 memmove(&skbs[ret->xdp_n], skbs, ret->skb_n * sizeof(*skbs));
278 }
279
cpu_map_gro_flush(struct bpf_cpu_map_entry * rcpu,bool empty)280 static void cpu_map_gro_flush(struct bpf_cpu_map_entry *rcpu, bool empty)
281 {
282 /*
283 * If the ring is not empty, there'll be a new iteration soon, and we
284 * only need to do a full flush if a tick is long (> 1 ms).
285 * If the ring is empty, to not hold GRO packets in the stack for too
286 * long, do a full flush.
287 * This is equivalent to how NAPI decides whether to perform a full
288 * flush.
289 */
290 gro_flush_normal(&rcpu->gro, !empty && HZ >= 1000);
291 }
292
cpu_map_kthread_run(void * data)293 static int cpu_map_kthread_run(void *data)
294 {
295 struct bpf_cpu_map_entry *rcpu = data;
296 unsigned long last_qs = jiffies;
297 u32 packets = 0;
298
299 complete(&rcpu->kthread_running);
300 set_current_state(TASK_INTERRUPTIBLE);
301
302 /* When kthread gives stop order, then rcpu have been disconnected
303 * from map, thus no new packets can enter. Remaining in-flight
304 * per CPU stored packets are flushed to this queue. Wait honoring
305 * kthread_stop signal until queue is empty.
306 */
307 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
308 struct xdp_cpumap_stats stats = {}; /* zero stats */
309 unsigned int kmem_alloc_drops = 0, sched = 0;
310 struct cpu_map_ret ret = { };
311 void *frames[CPUMAP_BATCH];
312 void *skbs[CPUMAP_BATCH];
313 u32 i, n, m;
314 bool empty;
315
316 /* Release CPU reschedule checks */
317 if (__ptr_ring_empty(rcpu->queue)) {
318 set_current_state(TASK_INTERRUPTIBLE);
319 /* Recheck to avoid lost wake-up */
320 if (__ptr_ring_empty(rcpu->queue)) {
321 schedule();
322 sched = 1;
323 last_qs = jiffies;
324 } else {
325 __set_current_state(TASK_RUNNING);
326 }
327 } else {
328 rcu_softirq_qs_periodic(last_qs);
329 sched = cond_resched();
330 }
331
332 /*
333 * The bpf_cpu_map_entry is single consumer, with this
334 * kthread CPU pinned. Lockless access to ptr_ring
335 * consume side valid as no-resize allowed of queue.
336 */
337 n = __ptr_ring_consume_batched(rcpu->queue, frames,
338 CPUMAP_BATCH);
339 for (i = 0; i < n; i++) {
340 void *f = frames[i];
341 struct page *page;
342
343 if (unlikely(__ptr_test_bit(0, &f))) {
344 struct sk_buff *skb = f;
345
346 __ptr_clear_bit(0, &skb);
347 skbs[ret.skb_n++] = skb;
348 continue;
349 }
350
351 frames[ret.xdp_n++] = f;
352 page = virt_to_page(f);
353
354 /* Bring struct page memory area to curr CPU. Read by
355 * build_skb_around via page_is_pfmemalloc(), and when
356 * freed written by page_frag_free call.
357 */
358 prefetchw(page);
359 }
360
361 local_bh_disable();
362
363 /* Support running another XDP prog on this CPU */
364 cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats);
365 if (!ret.xdp_n)
366 goto stats;
367
368 m = napi_skb_cache_get_bulk(skbs, ret.xdp_n);
369 if (unlikely(m < ret.xdp_n)) {
370 for (i = m; i < ret.xdp_n; i++)
371 xdp_return_frame(frames[i]);
372
373 if (ret.skb_n)
374 memmove(&skbs[m], &skbs[ret.xdp_n],
375 ret.skb_n * sizeof(*skbs));
376
377 kmem_alloc_drops += ret.xdp_n - m;
378 ret.xdp_n = m;
379 }
380
381 for (i = 0; i < ret.xdp_n; i++) {
382 struct xdp_frame *xdpf = frames[i];
383
384 /* Can fail only when !skb -- already handled above */
385 __xdp_build_skb_from_frame(xdpf, skbs[i], xdpf->dev_rx);
386 }
387
388 stats:
389 /* Feedback loop via tracepoint.
390 * NB: keep before recv to allow measuring enqueue/dequeue latency.
391 */
392 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
393 sched, &stats);
394
395 for (i = 0; i < ret.xdp_n + ret.skb_n; i++)
396 gro_receive_skb(&rcpu->gro, skbs[i]);
397
398 /* Flush either every 64 packets or in case of empty ring */
399 packets += n;
400 empty = __ptr_ring_empty(rcpu->queue);
401 if (packets >= NAPI_POLL_WEIGHT || empty) {
402 cpu_map_gro_flush(rcpu, empty);
403 packets = 0;
404 }
405
406 local_bh_enable(); /* resched point, may call do_softirq() */
407 }
408 __set_current_state(TASK_RUNNING);
409
410 return 0;
411 }
412
__cpu_map_load_bpf_program(struct bpf_cpu_map_entry * rcpu,struct bpf_map * map,int fd)413 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
414 struct bpf_map *map, int fd)
415 {
416 struct bpf_prog *prog;
417
418 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
419 if (IS_ERR(prog))
420 return PTR_ERR(prog);
421
422 if (prog->expected_attach_type != BPF_XDP_CPUMAP ||
423 !bpf_prog_map_compatible(map, prog)) {
424 bpf_prog_put(prog);
425 return -EINVAL;
426 }
427
428 rcpu->value.bpf_prog.id = prog->aux->id;
429 rcpu->prog = prog;
430
431 return 0;
432 }
433
434 static struct bpf_cpu_map_entry *
__cpu_map_entry_alloc(struct bpf_map * map,struct bpf_cpumap_val * value,u32 cpu)435 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
436 u32 cpu)
437 {
438 int numa, err = -ENOMEM, i, fd = value->bpf_prog.fd;
439 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
440 struct bpf_cpu_map_entry *rcpu;
441 struct xdp_bulk_queue *bq;
442
443 /* Have map->numa_node, but choose node of redirect target CPU */
444 numa = cpu_to_node(cpu);
445
446 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
447 if (!rcpu)
448 return ERR_PTR(err);
449
450 /* Alloc percpu bulkq */
451 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
452 sizeof(void *), gfp);
453 if (!rcpu->bulkq)
454 goto free_rcu;
455
456 for_each_possible_cpu(i) {
457 bq = per_cpu_ptr(rcpu->bulkq, i);
458 bq->obj = rcpu;
459 local_lock_init(&bq->bq_lock);
460 }
461
462 /* Alloc queue */
463 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
464 numa);
465 if (!rcpu->queue)
466 goto free_bulkq;
467
468 err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
469 if (err)
470 goto free_queue;
471
472 rcpu->cpu = cpu;
473 rcpu->map_id = map->id;
474 rcpu->value.qsize = value->qsize;
475 gro_init(&rcpu->gro);
476
477 if (fd > 0) {
478 err = __cpu_map_load_bpf_program(rcpu, map, fd);
479 if (err)
480 goto free_ptr_ring;
481 }
482
483 /* Setup kthread */
484 init_completion(&rcpu->kthread_running);
485 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
486 "cpumap/%d/map:%d", cpu,
487 map->id);
488 if (IS_ERR(rcpu->kthread)) {
489 err = PTR_ERR(rcpu->kthread);
490 goto free_prog;
491 }
492
493 /* Make sure kthread runs on a single CPU */
494 kthread_bind(rcpu->kthread, cpu);
495 wake_up_process(rcpu->kthread);
496
497 /* Make sure kthread has been running, so kthread_stop() will not
498 * stop the kthread prematurely and all pending frames or skbs
499 * will be handled by the kthread before kthread_stop() returns.
500 */
501 wait_for_completion(&rcpu->kthread_running);
502
503 return rcpu;
504
505 free_prog:
506 if (rcpu->prog)
507 bpf_prog_put(rcpu->prog);
508 free_ptr_ring:
509 gro_cleanup(&rcpu->gro);
510 ptr_ring_cleanup(rcpu->queue, NULL);
511 free_queue:
512 kfree(rcpu->queue);
513 free_bulkq:
514 free_percpu(rcpu->bulkq);
515 free_rcu:
516 kfree(rcpu);
517 return ERR_PTR(err);
518 }
519
__cpu_map_entry_free(struct work_struct * work)520 static void __cpu_map_entry_free(struct work_struct *work)
521 {
522 struct bpf_cpu_map_entry *rcpu;
523
524 /* This cpu_map_entry have been disconnected from map and one
525 * RCU grace-period have elapsed. Thus, XDP cannot queue any
526 * new packets and cannot change/set flush_needed that can
527 * find this entry.
528 */
529 rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work);
530
531 /* kthread_stop will wake_up_process and wait for it to complete.
532 * cpu_map_kthread_run() makes sure the pointer ring is empty
533 * before exiting.
534 */
535 kthread_stop(rcpu->kthread);
536
537 if (rcpu->prog)
538 bpf_prog_put(rcpu->prog);
539 gro_cleanup(&rcpu->gro);
540 /* The queue should be empty at this point */
541 __cpu_map_ring_cleanup(rcpu->queue);
542 ptr_ring_cleanup(rcpu->queue, NULL);
543 kfree(rcpu->queue);
544 free_percpu(rcpu->bulkq);
545 kfree(rcpu);
546 }
547
548 /* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old
549 * entry is no longer in use before freeing. We use queue_rcu_work() to call
550 * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace
551 * period. This means that (a) all pending enqueue and flush operations have
552 * completed (because of the RCU callback), and (b) we are in a workqueue
553 * context where we can stop the kthread and wait for it to exit before freeing
554 * everything.
555 */
__cpu_map_entry_replace(struct bpf_cpu_map * cmap,u32 key_cpu,struct bpf_cpu_map_entry * rcpu)556 static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
557 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
558 {
559 struct bpf_cpu_map_entry *old_rcpu;
560
561 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
562 if (old_rcpu) {
563 INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free);
564 queue_rcu_work(system_percpu_wq, &old_rcpu->free_work);
565 }
566 }
567
cpu_map_delete_elem(struct bpf_map * map,void * key)568 static long cpu_map_delete_elem(struct bpf_map *map, void *key)
569 {
570 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
571 u32 key_cpu = *(u32 *)key;
572
573 if (key_cpu >= map->max_entries)
574 return -EINVAL;
575
576 /* notice caller map_delete_elem() uses rcu_read_lock() */
577 __cpu_map_entry_replace(cmap, key_cpu, NULL);
578 return 0;
579 }
580
cpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)581 static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
582 u64 map_flags)
583 {
584 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
585 struct bpf_cpumap_val cpumap_value = {};
586 struct bpf_cpu_map_entry *rcpu;
587 /* Array index key correspond to CPU number */
588 u32 key_cpu = *(u32 *)key;
589
590 memcpy(&cpumap_value, value, map->value_size);
591
592 if (unlikely(map_flags > BPF_EXIST))
593 return -EINVAL;
594 if (unlikely(key_cpu >= cmap->map.max_entries))
595 return -E2BIG;
596 if (unlikely(map_flags == BPF_NOEXIST))
597 return -EEXIST;
598 if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
599 return -EOVERFLOW;
600
601 /* Make sure CPU is a valid possible cpu */
602 if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
603 return -ENODEV;
604
605 if (cpumap_value.qsize == 0) {
606 rcpu = NULL; /* Same as deleting */
607 } else {
608 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
609 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
610 if (IS_ERR(rcpu))
611 return PTR_ERR(rcpu);
612 }
613 rcu_read_lock();
614 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
615 rcu_read_unlock();
616 return 0;
617 }
618
cpu_map_free(struct bpf_map * map)619 static void cpu_map_free(struct bpf_map *map)
620 {
621 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
622 u32 i;
623
624 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
625 * so the bpf programs (can be more than one that used this map) were
626 * disconnected from events. Wait for outstanding critical sections in
627 * these programs to complete. synchronize_rcu() below not only
628 * guarantees no further "XDP/bpf-side" reads against
629 * bpf_cpu_map->cpu_map, but also ensure pending flush operations
630 * (if any) are completed.
631 */
632 synchronize_rcu();
633
634 /* The only possible user of bpf_cpu_map_entry is
635 * cpu_map_kthread_run().
636 */
637 for (i = 0; i < cmap->map.max_entries; i++) {
638 struct bpf_cpu_map_entry *rcpu;
639
640 rcpu = rcu_dereference_raw(cmap->cpu_map[i]);
641 if (!rcpu)
642 continue;
643
644 /* Stop kthread and cleanup entry directly */
645 __cpu_map_entry_free(&rcpu->free_work.work);
646 }
647 bpf_map_area_free(cmap->cpu_map);
648 bpf_map_area_free(cmap);
649 }
650
651 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
652 * by local_bh_disable() (from XDP calls inside NAPI). The
653 * rcu_read_lock_bh_held() below makes lockdep accept both.
654 */
__cpu_map_lookup_elem(struct bpf_map * map,u32 key)655 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
656 {
657 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
658 struct bpf_cpu_map_entry *rcpu;
659
660 if (key >= map->max_entries)
661 return NULL;
662
663 rcpu = rcu_dereference_check(cmap->cpu_map[key],
664 rcu_read_lock_bh_held());
665 return rcpu;
666 }
667
cpu_map_lookup_elem(struct bpf_map * map,void * key)668 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
669 {
670 struct bpf_cpu_map_entry *rcpu =
671 __cpu_map_lookup_elem(map, *(u32 *)key);
672
673 return rcpu ? &rcpu->value : NULL;
674 }
675
cpu_map_get_next_key(struct bpf_map * map,void * key,void * next_key)676 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
677 {
678 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
679 u32 index = key ? *(u32 *)key : U32_MAX;
680 u32 *next = next_key;
681
682 if (index >= cmap->map.max_entries) {
683 *next = 0;
684 return 0;
685 }
686
687 if (index == cmap->map.max_entries - 1)
688 return -ENOENT;
689 *next = index + 1;
690 return 0;
691 }
692
cpu_map_redirect(struct bpf_map * map,u64 index,u64 flags)693 static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
694 {
695 return __bpf_xdp_redirect_map(map, index, flags, 0,
696 __cpu_map_lookup_elem);
697 }
698
cpu_map_mem_usage(const struct bpf_map * map)699 static u64 cpu_map_mem_usage(const struct bpf_map *map)
700 {
701 u64 usage = sizeof(struct bpf_cpu_map);
702
703 /* Currently the dynamically allocated elements are not counted */
704 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *);
705 return usage;
706 }
707
708 BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
709 const struct bpf_map_ops cpu_map_ops = {
710 .map_meta_equal = bpf_map_meta_equal,
711 .map_alloc = cpu_map_alloc,
712 .map_free = cpu_map_free,
713 .map_delete_elem = cpu_map_delete_elem,
714 .map_update_elem = cpu_map_update_elem,
715 .map_lookup_elem = cpu_map_lookup_elem,
716 .map_get_next_key = cpu_map_get_next_key,
717 .map_check_btf = map_check_no_btf,
718 .map_mem_usage = cpu_map_mem_usage,
719 .map_btf_id = &cpu_map_btf_ids[0],
720 .map_redirect = cpu_map_redirect,
721 };
722
bq_flush_to_queue(struct xdp_bulk_queue * bq)723 static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
724 {
725 struct bpf_cpu_map_entry *rcpu = bq->obj;
726 unsigned int processed = 0, drops = 0;
727 const int to_cpu = rcpu->cpu;
728 struct ptr_ring *q;
729 int i;
730
731 lockdep_assert_held(&bq->bq_lock);
732
733 if (unlikely(!bq->count))
734 return;
735
736 q = rcpu->queue;
737 spin_lock(&q->producer_lock);
738
739 for (i = 0; i < bq->count; i++) {
740 struct xdp_frame *xdpf = bq->q[i];
741 int err;
742
743 err = __ptr_ring_produce(q, xdpf);
744 if (err) {
745 drops++;
746 xdp_return_frame_rx_napi(xdpf);
747 }
748 processed++;
749 }
750 bq->count = 0;
751 spin_unlock(&q->producer_lock);
752
753 __list_del_clearprev(&bq->flush_node);
754
755 /* Feedback loop via tracepoints */
756 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
757 }
758
759 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
760 * Thus, safe percpu variable access. PREEMPT_RT relies on
761 * local_lock_nested_bh() to serialise access to the per-CPU bq.
762 */
bq_enqueue(struct bpf_cpu_map_entry * rcpu,struct xdp_frame * xdpf)763 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
764 {
765 struct xdp_bulk_queue *bq;
766
767 local_lock_nested_bh(&rcpu->bulkq->bq_lock);
768 bq = this_cpu_ptr(rcpu->bulkq);
769
770 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
771 bq_flush_to_queue(bq);
772
773 /* Notice, xdp_buff/page MUST be queued here, long enough for
774 * driver to code invoking us to finished, due to driver
775 * (e.g. ixgbe) recycle tricks based on page-refcnt.
776 *
777 * Thus, incoming xdp_frame is always queued here (else we race
778 * with another CPU on page-refcnt and remaining driver code).
779 * Queue time is very short, as driver will invoke flush
780 * operation, when completing napi->poll call.
781 */
782 bq->q[bq->count++] = xdpf;
783
784 if (!bq->flush_node.prev) {
785 struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
786
787 list_add(&bq->flush_node, flush_list);
788 }
789
790 local_unlock_nested_bh(&rcpu->bulkq->bq_lock);
791 }
792
cpu_map_enqueue(struct bpf_cpu_map_entry * rcpu,struct xdp_frame * xdpf,struct net_device * dev_rx)793 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
794 struct net_device *dev_rx)
795 {
796 /* Info needed when constructing SKB on remote CPU */
797 xdpf->dev_rx = dev_rx;
798
799 bq_enqueue(rcpu, xdpf);
800 return 0;
801 }
802
cpu_map_generic_redirect(struct bpf_cpu_map_entry * rcpu,struct sk_buff * skb)803 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
804 struct sk_buff *skb)
805 {
806 int ret;
807
808 __skb_pull(skb, skb->mac_len);
809 skb_set_redirected(skb, false);
810 __ptr_set_bit(0, &skb);
811
812 ret = ptr_ring_produce(rcpu->queue, skb);
813 if (ret < 0)
814 goto trace;
815
816 wake_up_process(rcpu->kthread);
817 trace:
818 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu);
819 return ret;
820 }
821
__cpu_map_flush(struct list_head * flush_list)822 void __cpu_map_flush(struct list_head *flush_list)
823 {
824 struct xdp_bulk_queue *bq, *tmp;
825
826 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
827 local_lock_nested_bh(&bq->obj->bulkq->bq_lock);
828 bq_flush_to_queue(bq);
829 local_unlock_nested_bh(&bq->obj->bulkq->bq_lock);
830
831 /* If already running, costs spin_lock_irqsave + smb_mb */
832 wake_up_process(bq->obj->kthread);
833 }
834 }
835