1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2014 Matteo Landi
5 * Copyright (C) 2012-2016 Luigi Rizzo
6 * Copyright (C) 2012-2016 Giuseppe Lettieri
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #ifdef linux
32 #include "bsd_glue.h"
33 #endif /* linux */
34
35 #ifdef __APPLE__
36 #include "osx_glue.h"
37 #endif /* __APPLE__ */
38
39 #ifdef __FreeBSD__
40 #include <sys/types.h>
41 #include <sys/domainset.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h> /* MALLOC_DEFINE */
44 #include <sys/proc.h>
45 #include <vm/vm.h> /* vtophys */
46 #include <vm/pmap.h> /* vtophys */
47 #include <sys/socket.h> /* sockaddrs */
48 #include <sys/selinfo.h>
49 #include <sys/sysctl.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/vnet.h>
53 #include <machine/bus.h> /* bus_dmamap_* */
54
55 /* M_NETMAP only used in here */
56 MALLOC_DECLARE(M_NETMAP);
57 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
58
59 #endif /* __FreeBSD__ */
60
61 #ifdef _WIN32
62 #include <win_glue.h>
63 #endif
64
65 #include <net/netmap.h>
66 #include <dev/netmap/netmap_kern.h>
67 #include <net/netmap_virt.h>
68 #include "netmap_mem2.h"
69
70 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
71 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */
72 #else
73 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
74 #endif
75
76 #define NETMAP_POOL_MAX_NAMSZ 32
77
78
79 enum {
80 NETMAP_IF_POOL = 0,
81 NETMAP_RING_POOL,
82 NETMAP_BUF_POOL,
83 NETMAP_POOLS_NR
84 };
85
86
87 struct netmap_obj_params {
88 u_int size;
89 u_int num;
90
91 u_int last_size;
92 u_int last_num;
93 };
94
95 struct netmap_obj_pool {
96 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */
97
98 /* ---------------------------------------------------*/
99 /* these are only meaningful if the pool is finalized */
100 /* (see 'finalized' field in netmap_mem_d) */
101 size_t memtotal; /* actual total memory space */
102
103 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
104 uint32_t *bitmap; /* one bit per buffer, 1 means free */
105 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
106 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
107
108 u_int objtotal; /* actual total number of objects. */
109 u_int numclusters; /* actual number of clusters */
110 u_int objfree; /* number of free objects. */
111
112 int alloc_done; /* we have allocated the memory */
113 /* ---------------------------------------------------*/
114
115 /* limits */
116 u_int objminsize; /* minimum object size */
117 u_int objmaxsize; /* maximum object size */
118 u_int nummin; /* minimum number of objects */
119 u_int nummax; /* maximum number of objects */
120
121 /* these are changed only by config */
122 u_int _objtotal; /* total number of objects */
123 u_int _objsize; /* object size */
124 u_int _clustsize; /* cluster size */
125 u_int _clustentries; /* objects per cluster */
126 u_int _numclusters; /* number of clusters */
127
128 /* requested values */
129 u_int r_objtotal;
130 u_int r_objsize;
131 };
132
133 #define NMA_LOCK_T NM_MTX_T
134 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
135 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
136 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx)
137 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx)
138 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx)
139
140 struct netmap_mem_ops {
141 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
142 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
143 u_int *memflags, uint16_t *id);
144
145 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
146 int (*nmd_config)(struct netmap_mem_d *);
147 int (*nmd_finalize)(struct netmap_mem_d *, struct netmap_adapter *);
148 void (*nmd_deref)(struct netmap_mem_d *, struct netmap_adapter *);
149 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
150 void (*nmd_delete)(struct netmap_mem_d *);
151
152 struct netmap_if * (*nmd_if_new)(struct netmap_mem_d *,
153 struct netmap_adapter *, struct netmap_priv_d *);
154 void (*nmd_if_delete)(struct netmap_mem_d *,
155 struct netmap_adapter *, struct netmap_if *);
156 int (*nmd_rings_create)(struct netmap_mem_d *,
157 struct netmap_adapter *);
158 void (*nmd_rings_delete)(struct netmap_mem_d *,
159 struct netmap_adapter *);
160 };
161
162 struct netmap_mem_d {
163 NMA_LOCK_T nm_mtx; /* protect the allocator */
164 size_t nm_totalsize; /* shorthand */
165
166 u_int flags;
167 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
168 #define NETMAP_MEM_HIDDEN 0x8 /* being prepared */
169 #define NETMAP_MEM_NOMAP 0x10 /* do not map/unmap pdevs */
170 int lasterr; /* last error for curr config */
171 int active; /* active users */
172 int refcount;
173 /* the three allocators */
174 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
175
176 nm_memid_t nm_id; /* allocator identifier */
177 int nm_grp; /* iommu group id */
178 int nm_numa_domain; /* local NUMA domain */
179
180 /* list of all existing allocators, sorted by nm_id */
181 struct netmap_mem_d *prev, *next;
182
183 const struct netmap_mem_ops *ops;
184
185 struct netmap_obj_params params[NETMAP_POOLS_NR];
186
187 #define NM_MEM_NAMESZ 16
188 char name[NM_MEM_NAMESZ];
189 };
190
191 int
netmap_mem_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)192 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
193 {
194 int rv;
195
196 NMA_LOCK(nmd);
197 rv = nmd->ops->nmd_get_lut(nmd, lut);
198 NMA_UNLOCK(nmd);
199
200 return rv;
201 }
202
203 int
netmap_mem_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,nm_memid_t * memid)204 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
205 u_int *memflags, nm_memid_t *memid)
206 {
207 int rv;
208
209 NMA_LOCK(nmd);
210 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
211 NMA_UNLOCK(nmd);
212
213 return rv;
214 }
215
216 vm_paddr_t
netmap_mem_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t off)217 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
218 {
219 vm_paddr_t pa;
220
221 #if defined(__FreeBSD__)
222 /* This function is called by netmap_dev_pager_fault(), which holds a
223 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
224 * spin on the trylock. */
225 NMA_SPINLOCK(nmd);
226 #else
227 NMA_LOCK(nmd);
228 #endif
229 pa = nmd->ops->nmd_ofstophys(nmd, off);
230 NMA_UNLOCK(nmd);
231
232 return pa;
233 }
234
235 static int
netmap_mem_config(struct netmap_mem_d * nmd)236 netmap_mem_config(struct netmap_mem_d *nmd)
237 {
238 if (nmd->active) {
239 /* already in use. Not fatal, but we
240 * cannot change the configuration
241 */
242 return 0;
243 }
244
245 return nmd->ops->nmd_config(nmd);
246 }
247
248 ssize_t
netmap_mem_if_offset(struct netmap_mem_d * nmd,const void * off)249 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
250 {
251 ssize_t rv;
252
253 NMA_LOCK(nmd);
254 rv = nmd->ops->nmd_if_offset(nmd, off);
255 NMA_UNLOCK(nmd);
256
257 return rv;
258 }
259
260 static void
netmap_mem_delete(struct netmap_mem_d * nmd)261 netmap_mem_delete(struct netmap_mem_d *nmd)
262 {
263 nmd->ops->nmd_delete(nmd);
264 }
265
266 struct netmap_if *
netmap_mem_if_new(struct netmap_adapter * na,struct netmap_priv_d * priv)267 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
268 {
269 struct netmap_if *nifp;
270 struct netmap_mem_d *nmd = na->nm_mem;
271
272 NMA_LOCK(nmd);
273 nifp = nmd->ops->nmd_if_new(nmd, na, priv);
274 NMA_UNLOCK(nmd);
275
276 return nifp;
277 }
278
279 void
netmap_mem_if_delete(struct netmap_adapter * na,struct netmap_if * nif)280 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
281 {
282 struct netmap_mem_d *nmd = na->nm_mem;
283
284 NMA_LOCK(nmd);
285 nmd->ops->nmd_if_delete(nmd, na, nif);
286 NMA_UNLOCK(nmd);
287 }
288
289 int
netmap_mem_rings_create(struct netmap_adapter * na)290 netmap_mem_rings_create(struct netmap_adapter *na)
291 {
292 int rv;
293 struct netmap_mem_d *nmd = na->nm_mem;
294
295 NMA_LOCK(nmd);
296 rv = nmd->ops->nmd_rings_create(nmd, na);
297 NMA_UNLOCK(nmd);
298
299 return rv;
300 }
301
302 void
netmap_mem_rings_delete(struct netmap_adapter * na)303 netmap_mem_rings_delete(struct netmap_adapter *na)
304 {
305 struct netmap_mem_d *nmd = na->nm_mem;
306
307 NMA_LOCK(nmd);
308 nmd->ops->nmd_rings_delete(nmd, na);
309 NMA_UNLOCK(nmd);
310 }
311
312 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
313 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
314 static int nm_mem_check_group(struct netmap_mem_d *, void *);
315 static void nm_mem_release_id(struct netmap_mem_d *);
316
317 nm_memid_t
netmap_mem_get_id(struct netmap_mem_d * nmd)318 netmap_mem_get_id(struct netmap_mem_d *nmd)
319 {
320 return nmd->nm_id;
321 }
322
323 #ifdef NM_DEBUG_MEM_PUTGET
324 #define NM_DBG_REFC(nmd, func, line) \
325 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount);
326 #else
327 #define NM_DBG_REFC(nmd, func, line)
328 #endif
329
330 /* circular list of all existing allocators */
331 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
332 static NM_MTX_T nm_mem_list_lock;
333
334 struct netmap_mem_d *
__netmap_mem_get(struct netmap_mem_d * nmd,const char * func,int line)335 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
336 {
337 NM_MTX_LOCK(nm_mem_list_lock);
338 nmd->refcount++;
339 NM_DBG_REFC(nmd, func, line);
340 NM_MTX_UNLOCK(nm_mem_list_lock);
341 return nmd;
342 }
343
344 void
__netmap_mem_put(struct netmap_mem_d * nmd,const char * func,int line)345 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
346 {
347 int last;
348 NM_MTX_LOCK(nm_mem_list_lock);
349 last = (--nmd->refcount == 0);
350 if (last)
351 nm_mem_release_id(nmd);
352 NM_DBG_REFC(nmd, func, line);
353 NM_MTX_UNLOCK(nm_mem_list_lock);
354 if (last)
355 netmap_mem_delete(nmd);
356 }
357
358 int
netmap_mem_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)359 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
360 {
361 int lasterr = 0;
362 if (nm_mem_check_group(nmd, na->pdev) < 0) {
363 return ENOMEM;
364 }
365
366 NMA_LOCK(nmd);
367
368 if (netmap_mem_config(nmd))
369 goto out;
370
371 nmd->active++;
372
373 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na);
374
375 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) {
376 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
377 }
378
379 out:
380 lasterr = nmd->lasterr;
381 NMA_UNLOCK(nmd);
382
383 if (lasterr)
384 netmap_mem_deref(nmd, na);
385
386 return lasterr;
387 }
388
389 static int
nm_isset(uint32_t * bitmap,u_int i)390 nm_isset(uint32_t *bitmap, u_int i)
391 {
392 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
393 }
394
395
396 static int
netmap_init_obj_allocator_bitmap(struct netmap_obj_pool * p)397 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
398 {
399 u_int n, j;
400
401 if (p->bitmap == NULL) {
402 /* Allocate the bitmap */
403 n = (p->objtotal + 31) / 32;
404 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
405 if (p->bitmap == NULL) {
406 nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
407 p->name);
408 return ENOMEM;
409 }
410 p->bitmap_slots = n;
411 } else {
412 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
413 }
414
415 p->objfree = 0;
416 /*
417 * Set all the bits in the bitmap that have
418 * corresponding buffers to 1 to indicate they are
419 * free.
420 */
421 for (j = 0; j < p->objtotal; j++) {
422 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
423 if (netmap_debug & NM_DEBUG_MEM)
424 nm_prinf("skipping %s %d", p->name, j);
425 continue;
426 }
427 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) );
428 p->objfree++;
429 }
430
431 if (netmap_verbose)
432 nm_prinf("%s free %u", p->name, p->objfree);
433 if (p->objfree == 0) {
434 if (netmap_verbose)
435 nm_prerr("%s: no objects available", p->name);
436 return ENOMEM;
437 }
438
439 return 0;
440 }
441
442 static int
netmap_mem_init_bitmaps(struct netmap_mem_d * nmd)443 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
444 {
445 int i, error = 0;
446
447 for (i = 0; i < NETMAP_POOLS_NR; i++) {
448 struct netmap_obj_pool *p = &nmd->pools[i];
449
450 error = netmap_init_obj_allocator_bitmap(p);
451 if (error)
452 return error;
453 }
454
455 /*
456 * buffers 0 and 1 are reserved
457 */
458 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
459 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
460 return ENOMEM;
461 }
462
463 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
464 if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
465 /* XXX This check is a workaround that prevents a
466 * NULL pointer crash which currently happens only
467 * with ptnetmap guests.
468 * Removed shared-info --> is the bug still there? */
469 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
470 }
471 return 0;
472 }
473
474 int
netmap_mem_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)475 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
476 {
477 int last_user = 0;
478 NMA_LOCK(nmd);
479 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP))
480 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
481 if (nmd->active == 1) {
482 last_user = 1;
483 /*
484 * Reset the allocator when it falls out of use so that any
485 * pool resources leaked by unclean application exits are
486 * reclaimed.
487 */
488 netmap_mem_init_bitmaps(nmd);
489 }
490 nmd->ops->nmd_deref(nmd, na);
491
492 nmd->active--;
493 if (last_user) {
494 nmd->lasterr = 0;
495 }
496
497 NMA_UNLOCK(nmd);
498 return last_user;
499 }
500
501
502 /* accessor functions */
503 static int
netmap_mem2_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)504 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
505 {
506 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
507 #ifdef __FreeBSD__
508 lut->plut = lut->lut;
509 #endif
510 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
511 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
512
513 return 0;
514 }
515
516 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
517 [NETMAP_IF_POOL] = {
518 .size = 1024,
519 .num = 2,
520 },
521 [NETMAP_RING_POOL] = {
522 .size = 5*PAGE_SIZE,
523 .num = 4,
524 },
525 [NETMAP_BUF_POOL] = {
526 .size = 2048,
527 .num = 4098,
528 },
529 };
530
531
532 /*
533 * nm_mem is the memory allocator used for all physical interfaces
534 * running in netmap mode.
535 * Virtual (VALE) ports will have each its own allocator.
536 */
537 extern const struct netmap_mem_ops netmap_mem_global_ops; /* forward */
538 struct netmap_mem_d nm_mem = { /* Our memory allocator. */
539 .pools = {
540 [NETMAP_IF_POOL] = {
541 .name = "netmap_if",
542 .objminsize = sizeof(struct netmap_if),
543 .objmaxsize = 4096,
544 .nummin = 10, /* don't be stingy */
545 .nummax = 10000, /* XXX very large */
546 },
547 [NETMAP_RING_POOL] = {
548 .name = "netmap_ring",
549 .objminsize = sizeof(struct netmap_ring),
550 .objmaxsize = 32*PAGE_SIZE,
551 .nummin = 2,
552 .nummax = 1024,
553 },
554 [NETMAP_BUF_POOL] = {
555 .name = "netmap_buf",
556 .objminsize = 64,
557 .objmaxsize = 65536,
558 .nummin = 4,
559 .nummax = 1000000, /* one million! */
560 },
561 },
562
563 .params = {
564 [NETMAP_IF_POOL] = {
565 .size = 1024,
566 .num = 100,
567 },
568 [NETMAP_RING_POOL] = {
569 .size = 9*PAGE_SIZE,
570 .num = 200,
571 },
572 [NETMAP_BUF_POOL] = {
573 .size = 2048,
574 .num = NETMAP_BUF_MAX_NUM,
575 },
576 },
577
578 .nm_id = 1,
579 .nm_grp = -1,
580 .nm_numa_domain = -1,
581
582 .prev = &nm_mem,
583 .next = &nm_mem,
584
585 .ops = &netmap_mem_global_ops,
586
587 .name = "1"
588 };
589
590 static struct netmap_mem_d nm_mem_blueprint;
591
592 /* blueprint for the private memory allocators */
593 /* XXX clang is not happy about using name as a print format */
594 static const struct netmap_mem_d nm_blueprint = {
595 .pools = {
596 [NETMAP_IF_POOL] = {
597 .name = "%s_if",
598 .objminsize = sizeof(struct netmap_if),
599 .objmaxsize = 4096,
600 .nummin = 1,
601 .nummax = 100,
602 },
603 [NETMAP_RING_POOL] = {
604 .name = "%s_ring",
605 .objminsize = sizeof(struct netmap_ring),
606 .objmaxsize = 32*PAGE_SIZE,
607 .nummin = 2,
608 .nummax = 1024,
609 },
610 [NETMAP_BUF_POOL] = {
611 .name = "%s_buf",
612 .objminsize = 64,
613 .objmaxsize = 65536,
614 .nummin = 4,
615 .nummax = 1000000, /* one million! */
616 },
617 },
618
619 .nm_grp = -1,
620 .nm_numa_domain = -1,
621
622 .flags = NETMAP_MEM_PRIVATE,
623
624 .ops = &netmap_mem_global_ops,
625 };
626
627 /* memory allocator related sysctls */
628
629 #define STRINGIFY(x) #x
630
631 #define DECLARE_SYSCTLS(id, name) \
632 SYSBEGIN(mem2_ ## name); \
633 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
634 CTLFLAG_RWTUN, &nm_mem.params[id].size, 0, \
635 "Requested size of netmap " STRINGIFY(name) "s"); \
636 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
637 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, \
638 "Current size of netmap " STRINGIFY(name) "s"); \
639 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
640 CTLFLAG_RWTUN, &nm_mem.params[id].num, 0, \
641 "Requested number of netmap " STRINGIFY(name) "s"); \
642 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
643 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, \
644 "Current number of netmap " STRINGIFY(name) "s"); \
645 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
646 CTLFLAG_RWTUN, &netmap_min_priv_params[id].size, 0, \
647 "Default size of private netmap " STRINGIFY(name) "s"); \
648 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
649 CTLFLAG_RWTUN, &netmap_min_priv_params[id].num, 0, \
650 "Default number of private netmap " STRINGIFY(name) "s"); \
651 SYSEND
652
653 SYSCTL_DECL(_dev_netmap);
654 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
655 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
656 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
657
658 int netmap_port_numa_affinity = 0;
659 SYSCTL_INT(_dev_netmap, OID_AUTO, port_numa_affinity,
660 CTLFLAG_RDTUN, &netmap_port_numa_affinity, 0,
661 "Use NUMA-local memory for memory pools when possible");
662
663 /* call with nm_mem_list_lock held */
664 static int
nm_mem_assign_id_locked(struct netmap_mem_d * nmd,int grp_id,int domain)665 nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id, int domain)
666 {
667 nm_memid_t id;
668 struct netmap_mem_d *scan = netmap_last_mem_d;
669 int error = ENOMEM;
670
671 do {
672 /* we rely on unsigned wrap around */
673 id = scan->nm_id + 1;
674 if (id == 0) /* reserve 0 as error value */
675 id = 1;
676 scan = scan->next;
677 if (id != scan->nm_id) {
678 nmd->nm_id = id;
679 nmd->nm_grp = grp_id;
680 nmd->nm_numa_domain = domain;
681 nmd->prev = scan->prev;
682 nmd->next = scan;
683 scan->prev->next = nmd;
684 scan->prev = nmd;
685 netmap_last_mem_d = nmd;
686 nmd->refcount = 1;
687 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
688 error = 0;
689 break;
690 }
691 } while (scan != netmap_last_mem_d);
692
693 return error;
694 }
695
696 /* call with nm_mem_list_lock *not* held */
697 static int
nm_mem_assign_id(struct netmap_mem_d * nmd,int grp_id)698 nm_mem_assign_id(struct netmap_mem_d *nmd, int grp_id)
699 {
700 int ret;
701
702 NM_MTX_LOCK(nm_mem_list_lock);
703 ret = nm_mem_assign_id_locked(nmd, grp_id, -1);
704 NM_MTX_UNLOCK(nm_mem_list_lock);
705
706 return ret;
707 }
708
709 /* call with nm_mem_list_lock held */
710 static void
nm_mem_release_id(struct netmap_mem_d * nmd)711 nm_mem_release_id(struct netmap_mem_d *nmd)
712 {
713 nmd->prev->next = nmd->next;
714 nmd->next->prev = nmd->prev;
715
716 if (netmap_last_mem_d == nmd)
717 netmap_last_mem_d = nmd->prev;
718
719 nmd->prev = nmd->next = NULL;
720 }
721
722 struct netmap_mem_d *
netmap_mem_find(nm_memid_t id)723 netmap_mem_find(nm_memid_t id)
724 {
725 struct netmap_mem_d *nmd;
726
727 NM_MTX_LOCK(nm_mem_list_lock);
728 nmd = netmap_last_mem_d;
729 do {
730 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
731 nmd->refcount++;
732 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
733 NM_MTX_UNLOCK(nm_mem_list_lock);
734 return nmd;
735 }
736 nmd = nmd->next;
737 } while (nmd != netmap_last_mem_d);
738 NM_MTX_UNLOCK(nm_mem_list_lock);
739 return NULL;
740 }
741
742 static int
nm_mem_check_group(struct netmap_mem_d * nmd,void * dev)743 nm_mem_check_group(struct netmap_mem_d *nmd, void *dev)
744 {
745 int err = 0, id;
746
747 /* Skip not hw adapters.
748 * Vale port can use particular allocator through vale-ctl -m option
749 */
750 if (!dev)
751 return 0;
752 id = nm_iommu_group_id(dev);
753 if (netmap_debug & NM_DEBUG_MEM)
754 nm_prinf("iommu_group %d", id);
755
756 NMA_LOCK(nmd);
757
758 if (nmd->nm_grp != id) {
759 if (netmap_verbose)
760 nm_prerr("iommu group mismatch: %d vs %d",
761 nmd->nm_grp, id);
762 nmd->lasterr = err = ENOMEM;
763 }
764
765 NMA_UNLOCK(nmd);
766 return err;
767 }
768
769 static struct lut_entry *
nm_alloc_lut(u_int nobj)770 nm_alloc_lut(u_int nobj)
771 {
772 size_t n = sizeof(struct lut_entry) * nobj;
773 struct lut_entry *lut;
774 #ifdef linux
775 lut = vmalloc(n);
776 #else
777 lut = nm_os_malloc(n);
778 #endif
779 return lut;
780 }
781
782 static void
nm_free_lut(struct lut_entry * lut,u_int objtotal)783 nm_free_lut(struct lut_entry *lut, u_int objtotal)
784 {
785 bzero(lut, sizeof(struct lut_entry) * objtotal);
786 #ifdef linux
787 vfree(lut);
788 #else
789 nm_os_free(lut);
790 #endif
791 }
792
793 #if defined(linux) || defined(_WIN32)
794 static struct plut_entry *
nm_alloc_plut(u_int nobj)795 nm_alloc_plut(u_int nobj)
796 {
797 size_t n = sizeof(struct plut_entry) * nobj;
798 struct plut_entry *lut;
799 lut = vmalloc(n);
800 return lut;
801 }
802
803 static void
nm_free_plut(struct plut_entry * lut)804 nm_free_plut(struct plut_entry * lut)
805 {
806 vfree(lut);
807 }
808 #endif /* linux or _WIN32 */
809
810
811 /*
812 * First, find the allocator that contains the requested offset,
813 * then locate the cluster through a lookup table.
814 */
815 static vm_paddr_t
netmap_mem2_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t offset)816 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
817 {
818 int i;
819 vm_ooffset_t o = offset;
820 vm_paddr_t pa;
821 struct netmap_obj_pool *p;
822
823 p = nmd->pools;
824
825 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
826 if (offset >= p[i].memtotal)
827 continue;
828 // now lookup the cluster's address
829 #ifndef _WIN32
830 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
831 offset % p[i]._objsize;
832 #else
833 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
834 pa.QuadPart += offset % p[i]._objsize;
835 #endif
836 return pa;
837 }
838 /* this is only in case of errors */
839 nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o,
840 p[NETMAP_IF_POOL].memtotal,
841 p[NETMAP_IF_POOL].memtotal
842 + p[NETMAP_RING_POOL].memtotal,
843 p[NETMAP_IF_POOL].memtotal
844 + p[NETMAP_RING_POOL].memtotal
845 + p[NETMAP_BUF_POOL].memtotal);
846 #ifndef _WIN32
847 return 0; /* bad address */
848 #else
849 vm_paddr_t res;
850 res.QuadPart = 0;
851 return res;
852 #endif
853 }
854
855 #ifdef _WIN32
856
857 /*
858 * win32_build_virtual_memory_for_userspace
859 *
860 * This function get all the object making part of the pools and maps
861 * a contiguous virtual memory space for the userspace
862 * It works this way
863 * 1 - allocate a Memory Descriptor List wide as the sum
864 * of the memory needed for the pools
865 * 2 - cycle all the objects in every pool and for every object do
866 *
867 * 2a - cycle all the objects in every pool, get the list
868 * of the physical address descriptors
869 * 2b - calculate the offset in the array of pages descriptor in the
870 * main MDL
871 * 2c - copy the descriptors of the object in the main MDL
872 *
873 * 3 - return the resulting MDL that needs to be mapped in userland
874 *
875 * In this way we will have an MDL that describes all the memory for the
876 * objects in a single object
877 */
878
879 PMDL
win32_build_user_vm_map(struct netmap_mem_d * nmd)880 win32_build_user_vm_map(struct netmap_mem_d* nmd)
881 {
882 u_int memflags, ofs = 0;
883 PMDL mainMdl, tempMdl;
884 uint64_t memsize;
885 int i, j;
886
887 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
888 nm_prerr("memory not finalised yet");
889 return NULL;
890 }
891
892 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
893 if (mainMdl == NULL) {
894 nm_prerr("failed to allocate mdl");
895 return NULL;
896 }
897
898 NMA_LOCK(nmd);
899 for (i = 0; i < NETMAP_POOLS_NR; i++) {
900 struct netmap_obj_pool *p = &nmd->pools[i];
901 int clsz = p->_clustsize;
902 int clobjs = p->_clustentries; /* objects per cluster */
903 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
904 PPFN_NUMBER pSrc, pDst;
905
906 /* each pool has a different cluster size so we need to reallocate */
907 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
908 if (tempMdl == NULL) {
909 NMA_UNLOCK(nmd);
910 nm_prerr("fail to allocate tempMdl");
911 IoFreeMdl(mainMdl);
912 return NULL;
913 }
914 pSrc = MmGetMdlPfnArray(tempMdl);
915 /* create one entry per cluster, the lut[] has one entry per object */
916 for (j = 0; j < p->numclusters; j++, ofs += clsz) {
917 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
918 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
919 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
920 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
921 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
922 }
923 IoFreeMdl(tempMdl);
924 }
925 NMA_UNLOCK(nmd);
926 return mainMdl;
927 }
928
929 #endif /* _WIN32 */
930
931 /*
932 * helper function for OS-specific mmap routines (currently only windows).
933 * Given an nmd and a pool index, returns the cluster size and number of clusters.
934 * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
935 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
936 */
937
938 int
netmap_mem2_get_pool_info(struct netmap_mem_d * nmd,u_int pool,u_int * clustsize,u_int * numclusters)939 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
940 {
941 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
942 return 1; /* invalid arguments */
943 // NMA_LOCK_ASSERT(nmd);
944 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
945 *clustsize = *numclusters = 0;
946 return 1; /* not ready yet */
947 }
948 *clustsize = nmd->pools[pool]._clustsize;
949 *numclusters = nmd->pools[pool].numclusters;
950 return 0; /* success */
951 }
952
953 static int
netmap_mem2_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,nm_memid_t * id)954 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
955 u_int *memflags, nm_memid_t *id)
956 {
957 int error = 0;
958 error = netmap_mem_config(nmd);
959 if (error)
960 goto out;
961 if (size) {
962 if (nmd->flags & NETMAP_MEM_FINALIZED) {
963 *size = nmd->nm_totalsize;
964 } else {
965 int i;
966 *size = 0;
967 for (i = 0; i < NETMAP_POOLS_NR; i++) {
968 struct netmap_obj_pool *p = nmd->pools + i;
969 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize);
970 }
971 }
972 }
973 if (memflags)
974 *memflags = nmd->flags;
975 if (id)
976 *id = nmd->nm_id;
977 out:
978 return error;
979 }
980
981 /*
982 * we store objects by kernel address, need to find the offset
983 * within the pool to export the value to userspace.
984 * Algorithm: scan until we find the cluster, then add the
985 * actual offset in the cluster
986 */
987 static ssize_t
netmap_obj_offset(struct netmap_obj_pool * p,const void * vaddr)988 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
989 {
990 int i, k = p->_clustentries, n = p->objtotal;
991 ssize_t ofs = 0;
992
993 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
994 const char *base = p->lut[i].vaddr;
995 ssize_t relofs = (const char *) vaddr - base;
996
997 if (relofs < 0 || relofs >= p->_clustsize)
998 continue;
999
1000 ofs = ofs + relofs;
1001 nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
1002 p->name, ofs, i, vaddr);
1003 return ofs;
1004 }
1005 nm_prerr("address %p is not contained inside any cluster (%s)",
1006 vaddr, p->name);
1007 return 0; /* An error occurred */
1008 }
1009
1010 /* Helper functions which convert virtual addresses to offsets */
1011 #define netmap_if_offset(n, v) \
1012 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
1013
1014 #define netmap_ring_offset(n, v) \
1015 ((n)->pools[NETMAP_IF_POOL].memtotal + \
1016 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
1017
1018 static ssize_t
netmap_mem2_if_offset(struct netmap_mem_d * nmd,const void * addr)1019 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
1020 {
1021 return netmap_if_offset(nmd, addr);
1022 }
1023
1024 /*
1025 * report the index, and use start position as a hint,
1026 * otherwise buffer allocation becomes terribly expensive.
1027 */
1028 static void *
netmap_obj_malloc(struct netmap_obj_pool * p,u_int len,uint32_t * start,uint32_t * index)1029 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
1030 {
1031 uint32_t i = 0; /* index in the bitmap */
1032 uint32_t mask, j = 0; /* slot counter */
1033 void *vaddr = NULL;
1034
1035 if (len > p->_objsize) {
1036 nm_prerr("%s request size %d too large", p->name, len);
1037 return NULL;
1038 }
1039
1040 if (p->objfree == 0) {
1041 nm_prerr("no more %s objects", p->name);
1042 return NULL;
1043 }
1044 if (start)
1045 i = *start;
1046
1047 /* termination is guaranteed by p->free, but better check bounds on i */
1048 while (vaddr == NULL && i < p->bitmap_slots) {
1049 uint32_t cur = p->bitmap[i];
1050 if (cur == 0) { /* bitmask is fully used */
1051 i++;
1052 continue;
1053 }
1054 /* locate a slot */
1055 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1056 ;
1057
1058 p->bitmap[i] &= ~mask; /* mark object as in use */
1059 p->objfree--;
1060
1061 vaddr = p->lut[i * 32 + j].vaddr;
1062 if (index)
1063 *index = i * 32 + j;
1064 }
1065 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1066
1067 if (start)
1068 *start = i;
1069 return vaddr;
1070 }
1071
1072
1073 /*
1074 * free by index, not by address.
1075 * XXX should we also cleanup the content ?
1076 */
1077 static int
netmap_obj_free(struct netmap_obj_pool * p,uint32_t j)1078 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1079 {
1080 uint32_t *ptr, mask;
1081
1082 if (j >= p->objtotal) {
1083 nm_prerr("invalid index %u, max %u", j, p->objtotal);
1084 return 1;
1085 }
1086 ptr = &p->bitmap[j / 32];
1087 mask = (1 << (j % 32));
1088 if (*ptr & mask) {
1089 nm_prerr("ouch, double free on buffer %d", j);
1090 return 1;
1091 } else {
1092 *ptr |= mask;
1093 p->objfree++;
1094 return 0;
1095 }
1096 }
1097
1098 /*
1099 * free by address. This is slow but is only used for a few
1100 * objects (rings, nifp)
1101 */
1102 static void
netmap_obj_free_va(struct netmap_obj_pool * p,void * vaddr)1103 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1104 {
1105 u_int i, j, n = p->numclusters;
1106
1107 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1108 void *base = p->lut[i * p->_clustentries].vaddr;
1109 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1110
1111 /* Given address, is out of the scope of the current cluster.*/
1112 if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1113 continue;
1114
1115 j = j + relofs / p->_objsize;
1116 /* KASSERT(j != 0, ("Cannot free object 0")); */
1117 netmap_obj_free(p, j);
1118 return;
1119 }
1120 nm_prerr("address %p is not contained inside any cluster (%s)",
1121 vaddr, p->name);
1122 }
1123
1124 unsigned
netmap_mem_bufsize(struct netmap_mem_d * nmd)1125 netmap_mem_bufsize(struct netmap_mem_d *nmd)
1126 {
1127 return nmd->pools[NETMAP_BUF_POOL]._objsize;
1128 }
1129
1130 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1131 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1132 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1133 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1134 #define netmap_buf_malloc(n, _pos, _index) \
1135 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1136
1137
1138 #if 0 /* currently unused */
1139 /* Return the index associated to the given packet buffer */
1140 #define netmap_buf_index(n, v) \
1141 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1142 #endif
1143
1144 /*
1145 * allocate extra buffers in a linked list.
1146 * returns the actual number.
1147 */
1148 uint32_t
netmap_extra_alloc(struct netmap_adapter * na,uint32_t * head,uint32_t n)1149 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1150 {
1151 struct netmap_mem_d *nmd = na->nm_mem;
1152 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1153
1154 NMA_LOCK(nmd);
1155
1156 *head = 0; /* default, 'null' index ie empty list */
1157 for (i = 0 ; i < n; i++) {
1158 uint32_t cur = *head; /* save current head */
1159 uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1160 if (p == NULL) {
1161 nm_prerr("no more buffers after %d of %d", i, n);
1162 *head = cur; /* restore */
1163 break;
1164 }
1165 nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1166 *p = cur; /* link to previous head */
1167 }
1168
1169 NMA_UNLOCK(nmd);
1170
1171 return i;
1172 }
1173
1174 static void
netmap_extra_free(struct netmap_adapter * na,uint32_t head)1175 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
1176 {
1177 struct lut_entry *lut = na->na_lut.lut;
1178 struct netmap_mem_d *nmd = na->nm_mem;
1179 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1180 uint32_t i, cur, *buf;
1181
1182 nm_prdis("freeing the extra list");
1183 for (i = 0; head >=2 && head < p->objtotal; i++) {
1184 cur = head;
1185 buf = lut[head].vaddr;
1186 head = *buf;
1187 *buf = 0;
1188 if (netmap_obj_free(p, cur))
1189 break;
1190 }
1191 if (head != 0)
1192 nm_prerr("breaking with head %d", head);
1193 if (netmap_debug & NM_DEBUG_MEM)
1194 nm_prinf("freed %d buffers", i);
1195 }
1196
1197
1198 /* Return nonzero on error */
1199 static int
netmap_new_bufs(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n)1200 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1201 {
1202 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1203 u_int i = 0; /* slot counter */
1204 uint32_t pos = 0; /* slot in p->bitmap */
1205 uint32_t index = 0; /* buffer index */
1206
1207 for (i = 0; i < n; i++) {
1208 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1209 if (vaddr == NULL) {
1210 nm_prerr("no more buffers after %d of %d", i, n);
1211 goto cleanup;
1212 }
1213 slot[i].buf_idx = index;
1214 slot[i].len = p->_objsize;
1215 slot[i].flags = 0;
1216 slot[i].ptr = 0;
1217 }
1218
1219 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1220 return (0);
1221
1222 cleanup:
1223 while (i > 0) {
1224 i--;
1225 netmap_obj_free(p, slot[i].buf_idx);
1226 }
1227 bzero(slot, n * sizeof(slot[0]));
1228 return (ENOMEM);
1229 }
1230
1231 static void
netmap_mem_set_ring(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n,uint32_t index)1232 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1233 {
1234 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1235 u_int i;
1236
1237 for (i = 0; i < n; i++) {
1238 slot[i].buf_idx = index;
1239 slot[i].len = p->_objsize;
1240 slot[i].flags = 0;
1241 }
1242 }
1243
1244
1245 static void
netmap_free_buf(struct netmap_mem_d * nmd,uint32_t i)1246 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1247 {
1248 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1249
1250 if (i < 2 || i >= p->objtotal) {
1251 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1252 return;
1253 }
1254 netmap_obj_free(p, i);
1255 }
1256
1257
1258 static void
netmap_free_bufs(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n)1259 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1260 {
1261 u_int i;
1262
1263 for (i = 0; i < n; i++) {
1264 if (slot[i].buf_idx > 1)
1265 netmap_free_buf(nmd, slot[i].buf_idx);
1266 }
1267 nm_prdis("%s: released some buffers, available: %u",
1268 p->name, p->objfree);
1269 }
1270
1271 static void
netmap_reset_obj_allocator(struct netmap_obj_pool * p)1272 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
1273 {
1274
1275 if (p == NULL)
1276 return;
1277 if (p->bitmap)
1278 nm_os_free(p->bitmap);
1279 p->bitmap = NULL;
1280 if (p->invalid_bitmap)
1281 nm_os_free(p->invalid_bitmap);
1282 p->invalid_bitmap = NULL;
1283 if (!p->alloc_done) {
1284 /* allocation was done by somebody else.
1285 * Let them clean up after themselves.
1286 */
1287 return;
1288 }
1289 if (p->lut) {
1290 u_int i;
1291
1292 /*
1293 * Free each cluster allocated in
1294 * netmap_finalize_obj_allocator(). The cluster start
1295 * addresses are stored at multiples of p->_clusterentries
1296 * in the lut.
1297 */
1298 for (i = 0; i < p->objtotal; i += p->_clustentries) {
1299 free(p->lut[i].vaddr, M_NETMAP);
1300 }
1301 nm_free_lut(p->lut, p->objtotal);
1302 }
1303 p->lut = NULL;
1304 p->objtotal = 0;
1305 p->memtotal = 0;
1306 p->numclusters = 0;
1307 p->objfree = 0;
1308 p->alloc_done = 0;
1309 }
1310
1311 /*
1312 * Free all resources related to an allocator.
1313 */
1314 static void
netmap_destroy_obj_allocator(struct netmap_obj_pool * p)1315 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1316 {
1317 if (p == NULL)
1318 return;
1319 netmap_reset_obj_allocator(p);
1320 }
1321
1322 /*
1323 * We receive a request for objtotal objects, of size objsize each.
1324 * Internally we may round up both numbers, as we allocate objects
1325 * in small clusters multiple of the page size.
1326 * We need to keep track of objtotal and clustentries,
1327 * as they are needed when freeing memory.
1328 *
1329 * XXX note -- userspace needs the buffers to be contiguous,
1330 * so we cannot afford gaps at the end of a cluster.
1331 */
1332
1333
1334 /* call with NMA_LOCK held */
1335 static int
netmap_config_obj_allocator(struct netmap_obj_pool * p,u_int objtotal,u_int objsize)1336 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1337 {
1338 int i;
1339 u_int clustsize; /* the cluster size, multiple of page size */
1340 u_int clustentries; /* how many objects per entry */
1341
1342 /* we store the current request, so we can
1343 * detect configuration changes later */
1344 p->r_objtotal = objtotal;
1345 p->r_objsize = objsize;
1346
1347 #define MAX_CLUSTSIZE (1<<22) // 4 MB
1348 #define LINE_ROUND NM_BUF_ALIGN // 64
1349 if (objsize >= MAX_CLUSTSIZE) {
1350 /* we could do it but there is no point */
1351 nm_prerr("unsupported allocation for %d bytes", objsize);
1352 return EINVAL;
1353 }
1354 /* make sure objsize is a multiple of LINE_ROUND */
1355 i = (objsize & (LINE_ROUND - 1));
1356 if (i) {
1357 nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1358 objsize += LINE_ROUND - i;
1359 }
1360 if (objsize < p->objminsize || objsize > p->objmaxsize) {
1361 nm_prerr("requested objsize %d out of range [%d, %d]",
1362 objsize, p->objminsize, p->objmaxsize);
1363 return EINVAL;
1364 }
1365 if (objtotal < p->nummin || objtotal > p->nummax) {
1366 nm_prerr("requested objtotal %d out of range [%d, %d]",
1367 objtotal, p->nummin, p->nummax);
1368 return EINVAL;
1369 }
1370 /*
1371 * Compute number of objects using a brute-force approach:
1372 * given a max cluster size,
1373 * we try to fill it with objects keeping track of the
1374 * wasted space to the next page boundary.
1375 */
1376 for (clustentries = 0, i = 1;; i++) {
1377 u_int delta, used = i * objsize;
1378 if (used > MAX_CLUSTSIZE)
1379 break;
1380 delta = used % PAGE_SIZE;
1381 if (delta == 0) { // exact solution
1382 clustentries = i;
1383 break;
1384 }
1385 }
1386 /* exact solution not found */
1387 if (clustentries == 0) {
1388 nm_prerr("unsupported allocation for %d bytes", objsize);
1389 return EINVAL;
1390 }
1391 /* compute clustsize */
1392 clustsize = clustentries * objsize;
1393 if (netmap_debug & NM_DEBUG_MEM)
1394 nm_prinf("objsize %d clustsize %d objects %d",
1395 objsize, clustsize, clustentries);
1396
1397 /*
1398 * The number of clusters is n = ceil(objtotal/clustentries)
1399 * objtotal' = n * clustentries
1400 */
1401 p->_clustentries = clustentries;
1402 p->_clustsize = clustsize;
1403 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1404
1405 /* actual values (may be larger than requested) */
1406 p->_objsize = objsize;
1407 p->_objtotal = p->_numclusters * clustentries;
1408
1409 return 0;
1410 }
1411
1412 /* call with NMA_LOCK held */
1413 static int
netmap_finalize_obj_allocator(struct netmap_mem_d * nmd,struct netmap_obj_pool * p)1414 netmap_finalize_obj_allocator(struct netmap_mem_d *nmd, struct netmap_obj_pool *p)
1415 {
1416 int i; /* must be signed */
1417
1418 if (p->lut) {
1419 /* if the lut is already there we assume that also all the
1420 * clusters have already been allocated, possibly by somebody
1421 * else (e.g., extmem). In the latter case, the alloc_done flag
1422 * will remain at zero, so that we will not attempt to
1423 * deallocate the clusters by ourselves in
1424 * netmap_reset_obj_allocator.
1425 */
1426 return 0;
1427 }
1428
1429 /* optimistically assume we have enough memory */
1430 p->numclusters = p->_numclusters;
1431 p->objtotal = p->_objtotal;
1432 p->alloc_done = 1;
1433
1434 p->lut = nm_alloc_lut(p->objtotal);
1435 if (p->lut == NULL) {
1436 nm_prerr("Unable to create lookup table for '%s'", p->name);
1437 goto clean;
1438 }
1439
1440 /*
1441 * Allocate clusters, init pointers
1442 */
1443
1444 for (i = 0; i < (int)p->objtotal;) {
1445 int lim = i + p->_clustentries;
1446 char *clust;
1447
1448 /*
1449 * XXX Note, we only need contigmalloc() for buffers attached
1450 * to native interfaces. In all other cases (nifp, netmap rings
1451 * and even buffers for VALE ports or emulated interfaces) we
1452 * can live with standard malloc, because the hardware will not
1453 * access the pages directly.
1454 */
1455 if (nmd->nm_numa_domain == -1) {
1456 clust = contigmalloc(p->_clustsize, M_NETMAP,
1457 M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
1458 } else {
1459 struct domainset *ds;
1460
1461 ds = DOMAINSET_PREF(nmd->nm_numa_domain);
1462 clust = contigmalloc_domainset(p->_clustsize, M_NETMAP,
1463 ds, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
1464 }
1465 if (clust == NULL) {
1466 /*
1467 * If we get here, there is a severe memory shortage,
1468 * so halve the allocated memory to reclaim some.
1469 */
1470 nm_prerr("Unable to create cluster at %d for '%s' allocator",
1471 i, p->name);
1472 if (i < 2) /* nothing to halve */
1473 goto out;
1474 lim = i / 2;
1475 for (i--; i >= lim; i--) {
1476 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1477 free(p->lut[i].vaddr, M_NETMAP);
1478 p->lut[i].vaddr = NULL;
1479 }
1480 out:
1481 p->objtotal = i;
1482 /* we may have stopped in the middle of a cluster */
1483 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1484 break;
1485 }
1486 /*
1487 * Set lut state for all buffers in the current cluster.
1488 *
1489 * [i, lim) is the set of buffer indexes that cover the
1490 * current cluster.
1491 *
1492 * 'clust' is really the address of the current buffer in
1493 * the current cluster as we index through it with a stride
1494 * of p->_objsize.
1495 */
1496 for (; i < lim; i++, clust += p->_objsize) {
1497 p->lut[i].vaddr = clust;
1498 #if !defined(linux) && !defined(_WIN32)
1499 p->lut[i].paddr = vtophys(clust);
1500 #endif
1501 }
1502 }
1503 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize;
1504 if (netmap_verbose)
1505 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'",
1506 p->numclusters, p->_clustsize >> 10,
1507 p->memtotal >> 10, p->name);
1508
1509 return 0;
1510
1511 clean:
1512 netmap_reset_obj_allocator(p);
1513 return ENOMEM;
1514 }
1515
1516 /* call with lock held */
1517 static int
netmap_mem_params_changed(struct netmap_obj_params * p)1518 netmap_mem_params_changed(struct netmap_obj_params* p)
1519 {
1520 int i, rv = 0;
1521
1522 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1523 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1524 p[i].last_size = p[i].size;
1525 p[i].last_num = p[i].num;
1526 rv = 1;
1527 }
1528 }
1529 return rv;
1530 }
1531
1532 static void
netmap_mem_reset_all(struct netmap_mem_d * nmd)1533 netmap_mem_reset_all(struct netmap_mem_d *nmd)
1534 {
1535 int i;
1536
1537 if (netmap_debug & NM_DEBUG_MEM)
1538 nm_prinf("resetting %p", nmd);
1539 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1540 netmap_reset_obj_allocator(&nmd->pools[i]);
1541 }
1542 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1543 }
1544
1545 static int
netmap_mem_unmap(struct netmap_obj_pool * p,struct netmap_adapter * na)1546 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1547 {
1548 int i, lim = p->objtotal;
1549 struct netmap_lut *lut;
1550 if (na == NULL || na->pdev == NULL)
1551 return 0;
1552
1553 lut = &na->na_lut;
1554
1555
1556
1557 #if defined(__FreeBSD__)
1558 /* On FreeBSD mapping and unmapping is performed by the txsync
1559 * and rxsync routine, packet by packet. */
1560 (void)i;
1561 (void)lim;
1562 (void)lut;
1563 #elif defined(_WIN32)
1564 (void)i;
1565 (void)lim;
1566 (void)lut;
1567 nm_prerr("unsupported on Windows");
1568 #else /* linux */
1569 nm_prdis("unmapping and freeing plut for %s", na->name);
1570 if (lut->plut == NULL || na->pdev == NULL)
1571 return 0;
1572 for (i = 0; i < lim; i += p->_clustentries) {
1573 if (lut->plut[i].paddr)
1574 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1575 }
1576 nm_free_plut(lut->plut);
1577 lut->plut = NULL;
1578 #endif /* linux */
1579
1580 return 0;
1581 }
1582
1583 static int
netmap_mem_map(struct netmap_obj_pool * p,struct netmap_adapter * na)1584 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1585 {
1586 int error = 0;
1587 int i, lim = p->objtotal;
1588 struct netmap_lut *lut = &na->na_lut;
1589
1590 if (na->pdev == NULL)
1591 return 0;
1592
1593 #if defined(__FreeBSD__)
1594 /* On FreeBSD mapping and unmapping is performed by the txsync
1595 * and rxsync routine, packet by packet. */
1596 (void)i;
1597 (void)lim;
1598 (void)lut;
1599 #elif defined(_WIN32)
1600 (void)i;
1601 (void)lim;
1602 (void)lut;
1603 nm_prerr("unsupported on Windows");
1604 #else /* linux */
1605
1606 if (lut->plut != NULL) {
1607 nm_prdis("plut already allocated for %s", na->name);
1608 return 0;
1609 }
1610
1611 nm_prdis("allocating physical lut for %s", na->name);
1612 lut->plut = nm_alloc_plut(lim);
1613 if (lut->plut == NULL) {
1614 nm_prerr("Failed to allocate physical lut for %s", na->name);
1615 return ENOMEM;
1616 }
1617
1618 for (i = 0; i < lim; i += p->_clustentries) {
1619 lut->plut[i].paddr = 0;
1620 }
1621
1622 for (i = 0; i < lim; i += p->_clustentries) {
1623 int j;
1624
1625 if (p->lut[i].vaddr == NULL)
1626 continue;
1627
1628 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1629 p->lut[i].vaddr, p->_clustsize);
1630 if (error) {
1631 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1632 break;
1633 }
1634
1635 for (j = 1; j < p->_clustentries; j++) {
1636 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1637 }
1638 }
1639
1640 if (error)
1641 netmap_mem_unmap(p, na);
1642
1643 #endif /* linux */
1644
1645 return error;
1646 }
1647
1648 static int
netmap_mem_finalize_all(struct netmap_mem_d * nmd)1649 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1650 {
1651 int i;
1652 if (nmd->flags & NETMAP_MEM_FINALIZED)
1653 return 0;
1654 nmd->lasterr = 0;
1655 nmd->nm_totalsize = 0;
1656 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1657 nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]);
1658 if (nmd->lasterr)
1659 goto error;
1660 nmd->nm_totalsize += nmd->pools[i].memtotal;
1661 }
1662 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1663 nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1664 if (nmd->lasterr)
1665 goto error;
1666
1667 nmd->flags |= NETMAP_MEM_FINALIZED;
1668
1669 if (netmap_verbose)
1670 nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB",
1671 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1672 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1673 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1674
1675 if (netmap_verbose)
1676 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1677
1678
1679 return 0;
1680 error:
1681 netmap_mem_reset_all(nmd);
1682 return nmd->lasterr;
1683 }
1684
1685 /*
1686 * allocator for private memory
1687 */
1688 static void *
_netmap_mem_private_new(size_t size,struct netmap_obj_params * p,int grp_id,const struct netmap_mem_ops * ops,uint64_t memtotal,int * perr)1689 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, int grp_id,
1690 const struct netmap_mem_ops *ops, uint64_t memtotal, int *perr)
1691 {
1692 struct netmap_mem_d *d = NULL;
1693 int i, err = 0;
1694 int checksz = 0;
1695
1696 /* if memtotal is !=0 we check that the request fits the available
1697 * memory. Moreover, any surprlus memory is assigned to buffers.
1698 */
1699 checksz = (memtotal > 0);
1700
1701 d = nm_os_malloc(size);
1702 if (d == NULL) {
1703 err = ENOMEM;
1704 goto error;
1705 }
1706
1707 *d = nm_blueprint;
1708 d->ops = ops;
1709
1710 err = nm_mem_assign_id(d, grp_id);
1711 if (err)
1712 goto error_free;
1713 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1714
1715 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1716 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1717 nm_blueprint.pools[i].name,
1718 d->name);
1719 if (checksz) {
1720 uint64_t poolsz = (uint64_t)p[i].num * p[i].size;
1721 if (memtotal < poolsz) {
1722 nm_prerr("%s: request too large", d->pools[i].name);
1723 err = ENOMEM;
1724 goto error_rel_id;
1725 }
1726 memtotal -= poolsz;
1727 }
1728 d->params[i].num = p[i].num;
1729 d->params[i].size = p[i].size;
1730 }
1731 if (checksz && memtotal > 0) {
1732 uint64_t sz = d->params[NETMAP_BUF_POOL].size;
1733 uint64_t n = (memtotal + sz - 1) / sz;
1734
1735 if (n) {
1736 if (netmap_verbose) {
1737 nm_prinf("%s: adding %llu more buffers",
1738 d->pools[NETMAP_BUF_POOL].name,
1739 (unsigned long long)n);
1740 }
1741 d->params[NETMAP_BUF_POOL].num += n;
1742 }
1743 }
1744
1745 NMA_LOCK_INIT(d);
1746
1747 err = netmap_mem_config(d);
1748 if (err)
1749 goto error_destroy_lock;
1750
1751 d->flags &= ~NETMAP_MEM_FINALIZED;
1752
1753 return d;
1754
1755 error_destroy_lock:
1756 NMA_LOCK_DESTROY(d);
1757 error_rel_id:
1758 nm_mem_release_id(d);
1759 error_free:
1760 nm_os_free(d);
1761 error:
1762 if (perr)
1763 *perr = err;
1764 return NULL;
1765 }
1766
1767 struct netmap_mem_d *
netmap_mem_private_new(u_int txr,u_int txd,u_int rxr,u_int rxd,u_int extra_bufs,u_int npipes,int * perr)1768 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1769 u_int extra_bufs, u_int npipes, int *perr)
1770 {
1771 struct netmap_mem_d *d = NULL;
1772 struct netmap_obj_params p[NETMAP_POOLS_NR];
1773 int i;
1774 u_int v, maxd;
1775 /* account for the fake host rings */
1776 txr++;
1777 rxr++;
1778
1779 /* copy the min values */
1780 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1781 p[i] = netmap_min_priv_params[i];
1782 }
1783
1784 /* possibly increase them to fit user request */
1785 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1786 if (p[NETMAP_IF_POOL].size < v)
1787 p[NETMAP_IF_POOL].size = v;
1788 v = 2 + 4 * npipes;
1789 if (p[NETMAP_IF_POOL].num < v)
1790 p[NETMAP_IF_POOL].num = v;
1791 maxd = (txd > rxd) ? txd : rxd;
1792 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1793 if (p[NETMAP_RING_POOL].size < v)
1794 p[NETMAP_RING_POOL].size = v;
1795 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1796 * and two rx rings (again, 1 normal and 1 fake host)
1797 */
1798 v = txr + rxr + 8 * npipes;
1799 if (p[NETMAP_RING_POOL].num < v)
1800 p[NETMAP_RING_POOL].num = v;
1801 /* for each pipe we only need the buffers for the 4 "real" rings.
1802 * On the other end, the pipe ring dimension may be different from
1803 * the parent port ring dimension. As a compromise, we allocate twice the
1804 * space actually needed if the pipe rings were the same size as the parent rings
1805 */
1806 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1807 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1808 if (p[NETMAP_BUF_POOL].num < v)
1809 p[NETMAP_BUF_POOL].num = v;
1810
1811 if (netmap_verbose)
1812 nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1813 p[NETMAP_IF_POOL].num,
1814 p[NETMAP_IF_POOL].size,
1815 p[NETMAP_RING_POOL].num,
1816 p[NETMAP_RING_POOL].size,
1817 p[NETMAP_BUF_POOL].num,
1818 p[NETMAP_BUF_POOL].size);
1819
1820 d = _netmap_mem_private_new(sizeof(*d), p, -1, &netmap_mem_global_ops, 0, perr);
1821
1822 return d;
1823 }
1824
1825 /* Reference IOMMU and NUMA local allocator - find existing or create new,
1826 * for non-hw adapters, fall back to global allocator.
1827 */
1828 struct netmap_mem_d *
netmap_mem_get_allocator(struct netmap_adapter * na)1829 netmap_mem_get_allocator(struct netmap_adapter *na)
1830 {
1831 int i, domain, err, grp_id;
1832 struct netmap_mem_d *nmd;
1833
1834 if (na == NULL || na->pdev == NULL)
1835 return netmap_mem_get(&nm_mem);
1836
1837 domain = nm_numa_domain(na->pdev);
1838 grp_id = nm_iommu_group_id(na->pdev);
1839
1840 NM_MTX_LOCK(nm_mem_list_lock);
1841 nmd = netmap_last_mem_d;
1842 do {
1843 if (!(nmd->flags & NETMAP_MEM_HIDDEN) &&
1844 nmd->nm_grp == grp_id && nmd->nm_numa_domain == domain) {
1845 nmd->refcount++;
1846 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
1847 NM_MTX_UNLOCK(nm_mem_list_lock);
1848 return nmd;
1849 }
1850 nmd = nmd->next;
1851 } while (nmd != netmap_last_mem_d);
1852
1853 nmd = nm_os_malloc(sizeof(*nmd));
1854 if (nmd == NULL)
1855 goto error;
1856
1857 *nmd = nm_mem_blueprint;
1858
1859 err = nm_mem_assign_id_locked(nmd, grp_id, domain);
1860 if (err)
1861 goto error_free;
1862
1863 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id);
1864
1865 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1866 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s",
1867 nm_mem_blueprint.pools[i].name, nmd->name);
1868 }
1869
1870 NMA_LOCK_INIT(nmd);
1871
1872 NM_MTX_UNLOCK(nm_mem_list_lock);
1873 return nmd;
1874
1875 error_free:
1876 nm_os_free(nmd);
1877 error:
1878 NM_MTX_UNLOCK(nm_mem_list_lock);
1879 return NULL;
1880 }
1881
1882 /* call with lock held */
1883 static int
netmap_mem2_config(struct netmap_mem_d * nmd)1884 netmap_mem2_config(struct netmap_mem_d *nmd)
1885 {
1886 int i;
1887
1888 if (!netmap_mem_params_changed(nmd->params))
1889 goto out;
1890
1891 nm_prdis("reconfiguring");
1892
1893 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1894 /* reset previous allocation */
1895 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1896 netmap_reset_obj_allocator(&nmd->pools[i]);
1897 }
1898 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1899 }
1900
1901 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1902 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1903 nmd->params[i].num, nmd->params[i].size);
1904 if (nmd->lasterr)
1905 goto out;
1906 }
1907
1908 out:
1909
1910 return nmd->lasterr;
1911 }
1912
1913 static int
netmap_mem2_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)1914 netmap_mem2_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1915 {
1916 if (nmd->flags & NETMAP_MEM_FINALIZED)
1917 goto out;
1918
1919 if (netmap_mem_finalize_all(nmd))
1920 goto out;
1921
1922 nmd->lasterr = 0;
1923
1924 out:
1925 return nmd->lasterr;
1926 }
1927
1928 static void
netmap_mem2_delete(struct netmap_mem_d * nmd)1929 netmap_mem2_delete(struct netmap_mem_d *nmd)
1930 {
1931 int i;
1932
1933 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1934 netmap_destroy_obj_allocator(&nmd->pools[i]);
1935 }
1936
1937 NMA_LOCK_DESTROY(nmd);
1938 if (nmd != &nm_mem)
1939 nm_os_free(nmd);
1940 }
1941
1942 #ifdef WITH_EXTMEM
1943 /* doubly linekd list of all existing external allocators */
1944 static struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1945 NM_MTX_T nm_mem_ext_list_lock;
1946 #endif /* WITH_EXTMEM */
1947
1948 int
netmap_mem_init(void)1949 netmap_mem_init(void)
1950 {
1951 nm_mem_blueprint = nm_mem;
1952 NM_MTX_INIT(nm_mem_list_lock);
1953 NMA_LOCK_INIT(&nm_mem);
1954 netmap_mem_get(&nm_mem);
1955 #ifdef WITH_EXTMEM
1956 NM_MTX_INIT(nm_mem_ext_list_lock);
1957 #endif /* WITH_EXTMEM */
1958 return (0);
1959 }
1960
1961 void
netmap_mem_fini(void)1962 netmap_mem_fini(void)
1963 {
1964 netmap_mem_put(&nm_mem);
1965 }
1966
1967 static int
netmap_mem_ring_needed(struct netmap_kring * kring)1968 netmap_mem_ring_needed(struct netmap_kring *kring)
1969 {
1970 return kring->ring == NULL &&
1971 (kring->users > 0 ||
1972 (kring->nr_kflags & NKR_NEEDRING));
1973 }
1974
1975 static int
netmap_mem_ring_todelete(struct netmap_kring * kring)1976 netmap_mem_ring_todelete(struct netmap_kring *kring)
1977 {
1978 return kring->ring != NULL &&
1979 kring->users == 0 &&
1980 !(kring->nr_kflags & NKR_NEEDRING);
1981 }
1982
1983
1984 /* call with NMA_LOCK held *
1985 *
1986 * Allocate netmap rings and buffers for this card
1987 * The rings are contiguous, but have variable size.
1988 * The kring array must follow the layout described
1989 * in netmap_krings_create().
1990 */
1991 static int
netmap_mem2_rings_create(struct netmap_mem_d * nmd,struct netmap_adapter * na)1992 netmap_mem2_rings_create(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1993 {
1994 enum txrx t;
1995
1996 for_rx_tx(t) {
1997 u_int i;
1998
1999 for (i = 0; i < netmap_all_rings(na, t); i++) {
2000 struct netmap_kring *kring = NMR(na, t)[i];
2001 struct netmap_ring *ring = kring->ring;
2002 u_int len, ndesc;
2003
2004 if (!netmap_mem_ring_needed(kring)) {
2005 /* unneeded, or already created by somebody else */
2006 if (netmap_debug & NM_DEBUG_MEM)
2007 nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
2008 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2009 continue;
2010 }
2011 if (netmap_debug & NM_DEBUG_MEM)
2012 nm_prinf("creating %s", kring->name);
2013 ndesc = kring->nkr_num_slots;
2014 len = sizeof(struct netmap_ring) +
2015 ndesc * sizeof(struct netmap_slot);
2016 ring = netmap_ring_malloc(nmd, len);
2017 if (ring == NULL) {
2018 nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
2019 goto cleanup;
2020 }
2021 nm_prdis("txring at %p", ring);
2022 kring->ring = ring;
2023 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
2024 *(int64_t *)(uintptr_t)&ring->buf_ofs =
2025 (nmd->pools[NETMAP_IF_POOL].memtotal +
2026 nmd->pools[NETMAP_RING_POOL].memtotal) -
2027 netmap_ring_offset(nmd, ring);
2028
2029 /* copy values from kring */
2030 ring->head = kring->rhead;
2031 ring->cur = kring->rcur;
2032 ring->tail = kring->rtail;
2033 *(uint32_t *)(uintptr_t)&ring->nr_buf_size =
2034 netmap_mem_bufsize(nmd);
2035 nm_prdis("%s h %d c %d t %d", kring->name,
2036 ring->head, ring->cur, ring->tail);
2037 nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
2038 if (!(kring->nr_kflags & NKR_FAKERING)) {
2039 /* this is a real ring */
2040 if (netmap_debug & NM_DEBUG_MEM)
2041 nm_prinf("allocating buffers for %s", kring->name);
2042 if (netmap_new_bufs(nmd, ring->slot, ndesc)) {
2043 nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
2044 goto cleanup;
2045 }
2046 } else {
2047 /* this is a fake ring, set all indices to 0 */
2048 if (netmap_debug & NM_DEBUG_MEM)
2049 nm_prinf("NOT allocating buffers for %s", kring->name);
2050 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0);
2051 }
2052 /* ring info */
2053 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
2054 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
2055 }
2056 }
2057
2058 return 0;
2059
2060 cleanup:
2061 /* we cannot actually cleanup here, since we don't own kring->users
2062 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
2063 * the first or zero-out the second, then call netmap_free_rings()
2064 * to do the cleanup
2065 */
2066
2067 return ENOMEM;
2068 }
2069
2070 static void
netmap_mem2_rings_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na)2071 netmap_mem2_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2072 {
2073 enum txrx t;
2074
2075 for_rx_tx(t) {
2076 u_int i;
2077 for (i = 0; i < netmap_all_rings(na, t); i++) {
2078 struct netmap_kring *kring = NMR(na, t)[i];
2079 struct netmap_ring *ring = kring->ring;
2080
2081 if (!netmap_mem_ring_todelete(kring)) {
2082 if (netmap_debug & NM_DEBUG_MEM)
2083 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
2084 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2085 continue;
2086 }
2087 if (netmap_debug & NM_DEBUG_MEM)
2088 nm_prinf("deleting ring %s", kring->name);
2089 if (!(kring->nr_kflags & NKR_FAKERING)) {
2090 nm_prdis("freeing bufs for %s", kring->name);
2091 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots);
2092 } else {
2093 nm_prdis("NOT freeing bufs for %s", kring->name);
2094 }
2095 netmap_ring_free(nmd, ring);
2096 kring->ring = NULL;
2097 }
2098 }
2099 }
2100
2101 /* call with NMA_LOCK held */
2102 /*
2103 * Allocate the per-fd structure netmap_if.
2104 *
2105 * We assume that the configuration stored in na
2106 * (number of tx/rx rings and descs) does not change while
2107 * the interface is in netmap mode.
2108 */
2109 static struct netmap_if *
netmap_mem2_if_new(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_priv_d * priv)2110 netmap_mem2_if_new(struct netmap_mem_d *nmd,
2111 struct netmap_adapter *na, struct netmap_priv_d *priv)
2112 {
2113 struct netmap_if *nifp;
2114 ssize_t base; /* handy for relative offsets between rings and nifp */
2115 u_int i, len, n[NR_TXRX], ntot;
2116 enum txrx t;
2117
2118 ntot = 0;
2119 for_rx_tx(t) {
2120 /* account for the (eventually fake) host rings */
2121 n[t] = netmap_all_rings(na, t);
2122 ntot += n[t];
2123 }
2124 /*
2125 * the descriptor is followed inline by an array of offsets
2126 * to the tx and rx rings in the shared memory region.
2127 */
2128
2129 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
2130 nifp = netmap_if_malloc(nmd, len);
2131 if (nifp == NULL) {
2132 return NULL;
2133 }
2134
2135 /* initialize base fields -- override const */
2136 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2137 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2138 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings =
2139 (na->num_host_tx_rings ? na->num_host_tx_rings : 1);
2140 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings =
2141 (na->num_host_rx_rings ? na->num_host_rx_rings : 1);
2142 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2143
2144 /*
2145 * fill the slots for the rx and tx rings. They contain the offset
2146 * between the ring and nifp, so the information is usable in
2147 * userspace to reach the ring from the nifp.
2148 */
2149 base = netmap_if_offset(nmd, nifp);
2150 for (i = 0; i < n[NR_TX]; i++) {
2151 /* XXX instead of ofs == 0 maybe use the offset of an error
2152 * ring, like we do for buffers? */
2153 ssize_t ofs = 0;
2154
2155 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2156 && i < priv->np_qlast[NR_TX]) {
2157 ofs = netmap_ring_offset(nmd,
2158 na->tx_rings[i]->ring) - base;
2159 }
2160 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2161 }
2162 for (i = 0; i < n[NR_RX]; i++) {
2163 /* XXX instead of ofs == 0 maybe use the offset of an error
2164 * ring, like we do for buffers? */
2165 ssize_t ofs = 0;
2166
2167 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2168 && i < priv->np_qlast[NR_RX]) {
2169 ofs = netmap_ring_offset(nmd,
2170 na->rx_rings[i]->ring) - base;
2171 }
2172 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2173 }
2174
2175 return (nifp);
2176 }
2177
2178 static void
netmap_mem2_if_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_if * nifp)2179 netmap_mem2_if_delete(struct netmap_mem_d *nmd,
2180 struct netmap_adapter *na, struct netmap_if *nifp)
2181 {
2182 if (nifp == NULL)
2183 /* nothing to do */
2184 return;
2185 if (nifp->ni_bufs_head)
2186 netmap_extra_free(na, nifp->ni_bufs_head);
2187 netmap_if_free(nmd, nifp);
2188 }
2189
2190 static void
netmap_mem2_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)2191 netmap_mem2_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2192 {
2193
2194 if (netmap_debug & NM_DEBUG_MEM)
2195 nm_prinf("active = %d", nmd->active);
2196
2197 }
2198
2199 const struct netmap_mem_ops netmap_mem_global_ops = {
2200 .nmd_get_lut = netmap_mem2_get_lut,
2201 .nmd_get_info = netmap_mem2_get_info,
2202 .nmd_ofstophys = netmap_mem2_ofstophys,
2203 .nmd_config = netmap_mem2_config,
2204 .nmd_finalize = netmap_mem2_finalize,
2205 .nmd_deref = netmap_mem2_deref,
2206 .nmd_delete = netmap_mem2_delete,
2207 .nmd_if_offset = netmap_mem2_if_offset,
2208 .nmd_if_new = netmap_mem2_if_new,
2209 .nmd_if_delete = netmap_mem2_if_delete,
2210 .nmd_rings_create = netmap_mem2_rings_create,
2211 .nmd_rings_delete = netmap_mem2_rings_delete
2212 };
2213
2214 int
netmap_mem_pools_info_get(struct nmreq_pools_info * req,struct netmap_mem_d * nmd)2215 netmap_mem_pools_info_get(struct nmreq_pools_info *req,
2216 struct netmap_mem_d *nmd)
2217 {
2218 int ret;
2219
2220 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2221 &req->nr_mem_id);
2222 if (ret) {
2223 return ret;
2224 }
2225
2226 NMA_LOCK(nmd);
2227 req->nr_if_pool_offset = 0;
2228 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2229 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2230
2231 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2232 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2233 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2234
2235 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2236 nmd->pools[NETMAP_RING_POOL].memtotal;
2237 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2238 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2239 NMA_UNLOCK(nmd);
2240
2241 return 0;
2242 }
2243
2244 #ifdef WITH_EXTMEM
2245 struct netmap_mem_ext {
2246 struct netmap_mem_d up;
2247
2248 struct nm_os_extmem *os;
2249 struct netmap_mem_ext *next, *prev;
2250 };
2251
2252 /* call with nm_mem_list_lock held */
2253 static void
netmap_mem_ext_register(struct netmap_mem_ext * e)2254 netmap_mem_ext_register(struct netmap_mem_ext *e)
2255 {
2256 NM_MTX_LOCK(nm_mem_ext_list_lock);
2257 if (netmap_mem_ext_list)
2258 netmap_mem_ext_list->prev = e;
2259 e->next = netmap_mem_ext_list;
2260 netmap_mem_ext_list = e;
2261 e->prev = NULL;
2262 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2263 }
2264
2265 /* call with nm_mem_list_lock held */
2266 static void
netmap_mem_ext_unregister(struct netmap_mem_ext * e)2267 netmap_mem_ext_unregister(struct netmap_mem_ext *e)
2268 {
2269 if (e->prev)
2270 e->prev->next = e->next;
2271 else
2272 netmap_mem_ext_list = e->next;
2273 if (e->next)
2274 e->next->prev = e->prev;
2275 e->prev = e->next = NULL;
2276 }
2277
2278 static struct netmap_mem_ext *
netmap_mem_ext_search(struct nm_os_extmem * os)2279 netmap_mem_ext_search(struct nm_os_extmem *os)
2280 {
2281 struct netmap_mem_ext *e;
2282
2283 NM_MTX_LOCK(nm_mem_ext_list_lock);
2284 for (e = netmap_mem_ext_list; e; e = e->next) {
2285 if (nm_os_extmem_isequal(e->os, os)) {
2286 netmap_mem_get(&e->up);
2287 break;
2288 }
2289 }
2290 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2291 return e;
2292 }
2293
2294
2295 static void
netmap_mem_ext_delete(struct netmap_mem_d * d)2296 netmap_mem_ext_delete(struct netmap_mem_d *d)
2297 {
2298 int i;
2299 struct netmap_mem_ext *e =
2300 (struct netmap_mem_ext *)d;
2301
2302 netmap_mem_ext_unregister(e);
2303
2304 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2305 struct netmap_obj_pool *p = &d->pools[i];
2306
2307 if (p->lut) {
2308 nm_free_lut(p->lut, p->objtotal);
2309 p->lut = NULL;
2310 }
2311 }
2312 if (e->os)
2313 nm_os_extmem_delete(e->os);
2314 netmap_mem2_delete(d);
2315 }
2316
2317 static int
netmap_mem_ext_config(struct netmap_mem_d * nmd)2318 netmap_mem_ext_config(struct netmap_mem_d *nmd)
2319 {
2320 return 0;
2321 }
2322
2323 struct netmap_mem_ops netmap_mem_ext_ops = {
2324 .nmd_get_lut = netmap_mem2_get_lut,
2325 .nmd_get_info = netmap_mem2_get_info,
2326 .nmd_ofstophys = netmap_mem2_ofstophys,
2327 .nmd_config = netmap_mem_ext_config,
2328 .nmd_finalize = netmap_mem2_finalize,
2329 .nmd_deref = netmap_mem2_deref,
2330 .nmd_delete = netmap_mem_ext_delete,
2331 .nmd_if_offset = netmap_mem2_if_offset,
2332 .nmd_if_new = netmap_mem2_if_new,
2333 .nmd_if_delete = netmap_mem2_if_delete,
2334 .nmd_rings_create = netmap_mem2_rings_create,
2335 .nmd_rings_delete = netmap_mem2_rings_delete
2336 };
2337
2338 struct netmap_mem_d *
netmap_mem_ext_create(uint64_t usrptr,struct nmreq_pools_info * pi,int * perror)2339 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2340 {
2341 int error = 0;
2342 int i, j;
2343 struct netmap_mem_ext *nme;
2344 char *clust;
2345 size_t off;
2346 struct nm_os_extmem *os = NULL;
2347 int nr_pages;
2348
2349 // XXX sanity checks
2350 if (pi->nr_if_pool_objtotal == 0)
2351 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2352 if (pi->nr_if_pool_objsize == 0)
2353 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2354 if (pi->nr_ring_pool_objtotal == 0)
2355 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2356 if (pi->nr_ring_pool_objsize == 0)
2357 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2358 if (pi->nr_buf_pool_objtotal == 0)
2359 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2360 if (pi->nr_buf_pool_objsize == 0)
2361 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2362 if (netmap_verbose & NM_DEBUG_MEM)
2363 nm_prinf("if %d %d ring %d %d buf %d %d",
2364 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2365 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2366 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2367
2368 os = nm_os_extmem_create(usrptr, pi, &error);
2369 if (os == NULL) {
2370 nm_prerr("os extmem creation failed");
2371 goto out;
2372 }
2373
2374 nme = netmap_mem_ext_search(os);
2375 if (nme) {
2376 nm_os_extmem_delete(os);
2377 return &nme->up;
2378 }
2379 if (netmap_verbose & NM_DEBUG_MEM)
2380 nm_prinf("not found, creating new");
2381
2382 nme = _netmap_mem_private_new(sizeof(*nme),
2383
2384 (struct netmap_obj_params[]){
2385 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2386 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2387 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2388 -1,
2389 &netmap_mem_ext_ops,
2390 pi->nr_memsize,
2391 &error);
2392 if (nme == NULL)
2393 goto out_unmap;
2394
2395 nr_pages = nm_os_extmem_nr_pages(os);
2396
2397 /* from now on pages will be released by nme destructor;
2398 * we let res = 0 to prevent release in out_unmap below
2399 */
2400 nme->os = os;
2401 os = NULL; /* pass ownership */
2402
2403 clust = nm_os_extmem_nextpage(nme->os);
2404 off = 0;
2405 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2406 struct netmap_obj_pool *p = &nme->up.pools[i];
2407 struct netmap_obj_params *o = &nme->up.params[i];
2408
2409 p->_objsize = o->size;
2410 p->_clustsize = o->size;
2411 p->_clustentries = 1;
2412
2413 p->lut = nm_alloc_lut(o->num);
2414 if (p->lut == NULL) {
2415 error = ENOMEM;
2416 goto out_delete;
2417 }
2418
2419 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2420 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2421 if (p->invalid_bitmap == NULL) {
2422 error = ENOMEM;
2423 goto out_delete;
2424 }
2425
2426 if (nr_pages == 0) {
2427 p->objtotal = 0;
2428 p->memtotal = 0;
2429 p->objfree = 0;
2430 continue;
2431 }
2432
2433 for (j = 0; j < o->num && nr_pages > 0; j++) {
2434 size_t noff;
2435
2436 p->lut[j].vaddr = clust + off;
2437 #if !defined(linux) && !defined(_WIN32)
2438 p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2439 #endif
2440 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2441 noff = off + p->_objsize;
2442 if (noff < PAGE_SIZE) {
2443 off = noff;
2444 continue;
2445 }
2446 nm_prdis("too big, recomputing offset...");
2447 while (noff >= PAGE_SIZE) {
2448 char *old_clust = clust;
2449 noff -= PAGE_SIZE;
2450 clust = nm_os_extmem_nextpage(nme->os);
2451 nr_pages--;
2452 nm_prdis("noff %zu page %p nr_pages %d", noff,
2453 page_to_virt(*pages), nr_pages);
2454 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2455 (nr_pages == 0 ||
2456 old_clust + PAGE_SIZE != clust))
2457 {
2458 /* out of space or non contiguous,
2459 * drop this object
2460 * */
2461 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2462 nm_prdis("non contiguous at off %zu, drop", noff);
2463 }
2464 if (nr_pages == 0)
2465 break;
2466 }
2467 off = noff;
2468 }
2469 p->objtotal = j;
2470 p->numclusters = p->objtotal;
2471 p->memtotal = j * (size_t)p->_objsize;
2472 nm_prdis("%d memtotal %zu", j, p->memtotal);
2473 }
2474
2475 netmap_mem_ext_register(nme);
2476
2477 return &nme->up;
2478
2479 out_delete:
2480 netmap_mem_put(&nme->up);
2481 out_unmap:
2482 if (os)
2483 nm_os_extmem_delete(os);
2484 out:
2485 if (perror)
2486 *perror = error;
2487 return NULL;
2488
2489 }
2490 #endif /* WITH_EXTMEM */
2491
2492
2493 #ifdef WITH_PTNETMAP
2494 struct mem_pt_if {
2495 struct mem_pt_if *next;
2496 if_t ifp;
2497 unsigned int nifp_offset;
2498 };
2499
2500 /* Netmap allocator for ptnetmap guests. */
2501 struct netmap_mem_ptg {
2502 struct netmap_mem_d up;
2503
2504 vm_paddr_t nm_paddr; /* physical address in the guest */
2505 void *nm_addr; /* virtual address in the guest */
2506 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */
2507 nm_memid_t host_mem_id; /* allocator identifier in the host */
2508 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2509 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */
2510 };
2511
2512 /* Link a passthrough interface to a passthrough netmap allocator. */
2513 static int
netmap_mem_pt_guest_ifp_add(struct netmap_mem_d * nmd,if_t ifp,unsigned int nifp_offset)2514 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, if_t ifp,
2515 unsigned int nifp_offset)
2516 {
2517 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2518 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2519
2520 if (!ptif) {
2521 return ENOMEM;
2522 }
2523
2524 NMA_LOCK(nmd);
2525
2526 ptif->ifp = ifp;
2527 ptif->nifp_offset = nifp_offset;
2528
2529 if (ptnmd->pt_ifs) {
2530 ptif->next = ptnmd->pt_ifs;
2531 }
2532 ptnmd->pt_ifs = ptif;
2533
2534 NMA_UNLOCK(nmd);
2535
2536 nm_prinf("ifp=%s,nifp_offset=%u",
2537 if_name(ptif->ifp), ptif->nifp_offset);
2538
2539 return 0;
2540 }
2541
2542 /* Called with NMA_LOCK(nmd) held. */
2543 static struct mem_pt_if *
netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d * nmd,if_t ifp)2544 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, if_t ifp)
2545 {
2546 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2547 struct mem_pt_if *curr;
2548
2549 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2550 if (curr->ifp == ifp) {
2551 return curr;
2552 }
2553 }
2554
2555 return NULL;
2556 }
2557
2558 /* Unlink a passthrough interface from a passthrough netmap allocator. */
2559 int
netmap_mem_pt_guest_ifp_del(struct netmap_mem_d * nmd,if_t ifp)2560 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, if_t ifp)
2561 {
2562 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2563 struct mem_pt_if *prev = NULL;
2564 struct mem_pt_if *curr;
2565 int ret = -1;
2566
2567 NMA_LOCK(nmd);
2568
2569 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2570 if (curr->ifp == ifp) {
2571 if (prev) {
2572 prev->next = curr->next;
2573 } else {
2574 ptnmd->pt_ifs = curr->next;
2575 }
2576 nm_prinf("removed (ifp=%s,nifp_offset=%u)",
2577 if_name(curr->ifp), curr->nifp_offset);
2578 nm_os_free(curr);
2579 ret = 0;
2580 break;
2581 }
2582 prev = curr;
2583 }
2584
2585 NMA_UNLOCK(nmd);
2586
2587 return ret;
2588 }
2589
2590 static int
netmap_mem_pt_guest_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)2591 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2592 {
2593 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2594
2595 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2596 return EINVAL;
2597 }
2598
2599 *lut = ptnmd->buf_lut;
2600 return 0;
2601 }
2602
2603 static int
netmap_mem_pt_guest_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,uint16_t * id)2604 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2605 u_int *memflags, uint16_t *id)
2606 {
2607 int error = 0;
2608
2609 error = nmd->ops->nmd_config(nmd);
2610 if (error)
2611 goto out;
2612
2613 if (size)
2614 *size = nmd->nm_totalsize;
2615 if (memflags)
2616 *memflags = nmd->flags;
2617 if (id)
2618 *id = nmd->nm_id;
2619
2620 out:
2621
2622 return error;
2623 }
2624
2625 static vm_paddr_t
netmap_mem_pt_guest_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t off)2626 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2627 {
2628 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2629 vm_paddr_t paddr;
2630 /* if the offset is valid, just return csb->base_addr + off */
2631 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2632 nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
2633 return paddr;
2634 }
2635
2636 static int
netmap_mem_pt_guest_config(struct netmap_mem_d * nmd)2637 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2638 {
2639 /* nothing to do, we are configured on creation
2640 * and configuration never changes thereafter
2641 */
2642 return 0;
2643 }
2644
2645 static int
netmap_mem_pt_guest_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)2646 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2647 {
2648 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2649 uint64_t mem_size;
2650 uint32_t bufsize;
2651 uint32_t nbuffers;
2652 uint32_t poolofs;
2653 vm_paddr_t paddr;
2654 char *vaddr;
2655 int i;
2656 int error = 0;
2657
2658 if (nmd->flags & NETMAP_MEM_FINALIZED)
2659 goto out;
2660
2661 if (ptnmd->ptn_dev == NULL) {
2662 nm_prerr("ptnetmap memdev not attached");
2663 error = ENOMEM;
2664 goto out;
2665 }
2666 /* Map memory through ptnetmap-memdev BAR. */
2667 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2668 &ptnmd->nm_addr, &mem_size);
2669 if (error)
2670 goto out;
2671
2672 /* Initialize the lut using the information contained in the
2673 * ptnetmap memory device. */
2674 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2675 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2676 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2677 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2678
2679 /* allocate the lut */
2680 if (ptnmd->buf_lut.lut == NULL) {
2681 nm_prinf("allocating lut");
2682 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2683 if (ptnmd->buf_lut.lut == NULL) {
2684 nm_prerr("lut allocation failed");
2685 return ENOMEM;
2686 }
2687 }
2688
2689 /* we have physically contiguous memory mapped through PCI BAR */
2690 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2691 PTNET_MDEV_IO_BUF_POOL_OFS);
2692 vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2693 paddr = ptnmd->nm_paddr + poolofs;
2694
2695 for (i = 0; i < nbuffers; i++) {
2696 ptnmd->buf_lut.lut[i].vaddr = vaddr;
2697 vaddr += bufsize;
2698 paddr += bufsize;
2699 }
2700
2701 ptnmd->buf_lut.objtotal = nbuffers;
2702 ptnmd->buf_lut.objsize = bufsize;
2703 nmd->nm_totalsize = mem_size;
2704
2705 /* Initialize these fields as are needed by
2706 * netmap_mem_bufsize().
2707 * XXX please improve this, why do we need this
2708 * replication? maybe we nmd->pools[] should no be
2709 * there for the guest allocator? */
2710 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2711 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2712
2713 nmd->flags |= NETMAP_MEM_FINALIZED;
2714 out:
2715 return error;
2716 }
2717
2718 static void
netmap_mem_pt_guest_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)2719 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2720 {
2721 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2722
2723 if (nmd->active == 1 &&
2724 (nmd->flags & NETMAP_MEM_FINALIZED)) {
2725 nmd->flags &= ~NETMAP_MEM_FINALIZED;
2726 /* unmap ptnetmap-memdev memory */
2727 if (ptnmd->ptn_dev) {
2728 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2729 }
2730 ptnmd->nm_addr = NULL;
2731 ptnmd->nm_paddr = 0;
2732 }
2733 }
2734
2735 static ssize_t
netmap_mem_pt_guest_if_offset(struct netmap_mem_d * nmd,const void * vaddr)2736 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2737 {
2738 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2739
2740 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2741 }
2742
2743 static void
netmap_mem_pt_guest_delete(struct netmap_mem_d * nmd)2744 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2745 {
2746 if (nmd == NULL)
2747 return;
2748 if (netmap_verbose)
2749 nm_prinf("deleting %p", nmd);
2750 if (nmd->active > 0)
2751 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2752 if (netmap_verbose)
2753 nm_prinf("done deleting %p", nmd);
2754 NMA_LOCK_DESTROY(nmd);
2755 nm_os_free(nmd);
2756 }
2757
2758 static struct netmap_if *
netmap_mem_pt_guest_if_new(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_priv_d * priv)2759 netmap_mem_pt_guest_if_new(struct netmap_mem_d *nmd,
2760 struct netmap_adapter *na, struct netmap_priv_d *priv)
2761 {
2762 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2763 struct mem_pt_if *ptif;
2764 struct netmap_if *nifp = NULL;
2765
2766 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2767 if (ptif == NULL) {
2768 nm_prerr("interface %s is not in passthrough", na->name);
2769 goto out;
2770 }
2771
2772 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2773 ptif->nifp_offset);
2774 out:
2775 return nifp;
2776 }
2777
2778 static void
netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_if * nifp)2779 netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,
2780 struct netmap_adapter *na, struct netmap_if *nifp)
2781 {
2782 struct mem_pt_if *ptif;
2783
2784 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2785 if (ptif == NULL) {
2786 nm_prerr("interface %s is not in passthrough", na->name);
2787 }
2788 }
2789
2790 static int
netmap_mem_pt_guest_rings_create(struct netmap_mem_d * nmd,struct netmap_adapter * na)2791 netmap_mem_pt_guest_rings_create(struct netmap_mem_d *nmd,
2792 struct netmap_adapter *na)
2793 {
2794 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2795 struct mem_pt_if *ptif;
2796 struct netmap_if *nifp;
2797 int i, error = -1;
2798
2799 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2800 if (ptif == NULL) {
2801 nm_prerr("interface %s is not in passthrough", na->name);
2802 goto out;
2803 }
2804
2805
2806 /* point each kring to the corresponding backend ring */
2807 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2808 for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2809 struct netmap_kring *kring = na->tx_rings[i];
2810 if (kring->ring)
2811 continue;
2812 kring->ring = (struct netmap_ring *)
2813 ((char *)nifp + nifp->ring_ofs[i]);
2814 }
2815 for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2816 struct netmap_kring *kring = na->rx_rings[i];
2817 if (kring->ring)
2818 continue;
2819 kring->ring = (struct netmap_ring *)
2820 ((char *)nifp +
2821 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2822 }
2823
2824 error = 0;
2825 out:
2826 return error;
2827 }
2828
2829 static void
netmap_mem_pt_guest_rings_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na)2830 netmap_mem_pt_guest_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2831 {
2832 #if 0
2833 enum txrx t;
2834
2835 for_rx_tx(t) {
2836 u_int i;
2837 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2838 struct netmap_kring *kring = &NMR(na, t)[i];
2839
2840 kring->ring = NULL;
2841 }
2842 }
2843 #endif
2844 (void)nmd;
2845 (void)na;
2846 }
2847
2848 static struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2849 .nmd_get_lut = netmap_mem_pt_guest_get_lut,
2850 .nmd_get_info = netmap_mem_pt_guest_get_info,
2851 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2852 .nmd_config = netmap_mem_pt_guest_config,
2853 .nmd_finalize = netmap_mem_pt_guest_finalize,
2854 .nmd_deref = netmap_mem_pt_guest_deref,
2855 .nmd_if_offset = netmap_mem_pt_guest_if_offset,
2856 .nmd_delete = netmap_mem_pt_guest_delete,
2857 .nmd_if_new = netmap_mem_pt_guest_if_new,
2858 .nmd_if_delete = netmap_mem_pt_guest_if_delete,
2859 .nmd_rings_create = netmap_mem_pt_guest_rings_create,
2860 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2861 };
2862
2863 /* Called with nm_mem_list_lock held. */
2864 static struct netmap_mem_d *
netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)2865 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2866 {
2867 struct netmap_mem_d *mem = NULL;
2868 struct netmap_mem_d *scan = netmap_last_mem_d;
2869
2870 do {
2871 /* find ptnetmap allocator through host ID */
2872 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2873 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2874 mem = scan;
2875 mem->refcount++;
2876 NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2877 break;
2878 }
2879 scan = scan->next;
2880 } while (scan != netmap_last_mem_d);
2881
2882 return mem;
2883 }
2884
2885 /* Called with nm_mem_list_lock held. */
2886 static struct netmap_mem_d *
netmap_mem_pt_guest_create(nm_memid_t mem_id)2887 netmap_mem_pt_guest_create(nm_memid_t mem_id)
2888 {
2889 struct netmap_mem_ptg *ptnmd;
2890 int err = 0;
2891
2892 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2893 if (ptnmd == NULL) {
2894 err = ENOMEM;
2895 goto error;
2896 }
2897
2898 ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2899 ptnmd->host_mem_id = mem_id;
2900 ptnmd->pt_ifs = NULL;
2901
2902 /* Assign new id in the guest (We have the lock) */
2903 err = nm_mem_assign_id_locked(&ptnmd->up, -1, -1);
2904 if (err)
2905 goto error;
2906
2907 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2908 ptnmd->up.flags |= NETMAP_MEM_IO;
2909
2910 NMA_LOCK_INIT(&ptnmd->up);
2911
2912 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2913
2914
2915 return &ptnmd->up;
2916 error:
2917 netmap_mem_pt_guest_delete(&ptnmd->up);
2918 return NULL;
2919 }
2920
2921 /*
2922 * find host id in guest allocators and create guest allocator
2923 * if it is not there
2924 */
2925 static struct netmap_mem_d *
netmap_mem_pt_guest_get(nm_memid_t mem_id)2926 netmap_mem_pt_guest_get(nm_memid_t mem_id)
2927 {
2928 struct netmap_mem_d *nmd;
2929
2930 NM_MTX_LOCK(nm_mem_list_lock);
2931 nmd = netmap_mem_pt_guest_find_memid(mem_id);
2932 if (nmd == NULL) {
2933 nmd = netmap_mem_pt_guest_create(mem_id);
2934 }
2935 NM_MTX_UNLOCK(nm_mem_list_lock);
2936
2937 return nmd;
2938 }
2939
2940 /*
2941 * The guest allocator can be created by ptnetmap_memdev (during the device
2942 * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2943 *
2944 * The order is not important (we have different order in LINUX and FreeBSD).
2945 * The first one, creates the device, and the second one simply attaches it.
2946 */
2947
2948 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2949 * the guest */
2950 struct netmap_mem_d *
netmap_mem_pt_guest_attach(struct ptnetmap_memdev * ptn_dev,nm_memid_t mem_id)2951 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2952 {
2953 struct netmap_mem_d *nmd;
2954 struct netmap_mem_ptg *ptnmd;
2955
2956 nmd = netmap_mem_pt_guest_get(mem_id);
2957
2958 /* assign this device to the guest allocator */
2959 if (nmd) {
2960 ptnmd = (struct netmap_mem_ptg *)nmd;
2961 ptnmd->ptn_dev = ptn_dev;
2962 }
2963
2964 return nmd;
2965 }
2966
2967 /* Called when ptnet device is attaching */
2968 struct netmap_mem_d *
netmap_mem_pt_guest_new(if_t ifp,unsigned int nifp_offset,unsigned int memid)2969 netmap_mem_pt_guest_new(if_t ifp,
2970 unsigned int nifp_offset,
2971 unsigned int memid)
2972 {
2973 struct netmap_mem_d *nmd;
2974
2975 if (ifp == NULL) {
2976 return NULL;
2977 }
2978
2979 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2980
2981 if (nmd) {
2982 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2983 }
2984
2985 return nmd;
2986 }
2987
2988 #endif /* WITH_PTNETMAP */
2989