1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Device memory TCP support
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemb@google.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com>
8 *
9 */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12
13 #include <net/netmem.h>
14
15 struct netlink_ext_ack;
16
17 struct net_devmem_dmabuf_binding {
18 struct dma_buf *dmabuf;
19 struct dma_buf_attachment *attachment;
20 struct sg_table *sgt;
21 struct net_device *dev;
22 struct gen_pool *chunk_pool;
23 /* Protect dev */
24 struct mutex lock;
25
26 /* The user holds a ref (via the netlink API) for as long as they want
27 * the binding to remain alive. Each page pool using this binding holds
28 * a ref to keep the binding alive. Each allocated net_iov holds a
29 * ref.
30 *
31 * The binding undos itself and unmaps the underlying dmabuf once all
32 * those refs are dropped and the binding is no longer desired or in
33 * use.
34 */
35 refcount_t ref;
36
37 /* The list of bindings currently active. Used for netlink to notify us
38 * of the user dropping the bind.
39 */
40 struct list_head list;
41
42 /* rxq's this binding is active on. */
43 struct xarray bound_rxqs;
44
45 /* ID of this binding. Globally unique to all bindings currently
46 * active.
47 */
48 u32 id;
49 };
50
51 #if defined(CONFIG_NET_DEVMEM)
52 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
53 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
54 * this owner struct to keep track of some metadata necessary to create
55 * allocations from this chunk.
56 */
57 struct dmabuf_genpool_chunk_owner {
58 struct net_iov_area area;
59 struct net_devmem_dmabuf_binding *binding;
60
61 /* dma_addr of the start of the chunk. */
62 dma_addr_t base_dma_addr;
63 };
64
65 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
66 struct net_devmem_dmabuf_binding *
67 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
68 struct netlink_ext_ack *extack);
69 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
70 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
71 struct net_devmem_dmabuf_binding *binding,
72 struct netlink_ext_ack *extack);
73
74 static inline struct dmabuf_genpool_chunk_owner *
net_devmem_iov_to_chunk_owner(const struct net_iov * niov)75 net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
76 {
77 struct net_iov_area *owner = net_iov_owner(niov);
78
79 return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
80 }
81
82 static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov * niov)83 net_devmem_iov_binding(const struct net_iov *niov)
84 {
85 return net_devmem_iov_to_chunk_owner(niov)->binding;
86 }
87
net_devmem_iov_binding_id(const struct net_iov * niov)88 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
89 {
90 return net_devmem_iov_binding(niov)->id;
91 }
92
net_iov_virtual_addr(const struct net_iov * niov)93 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
94 {
95 struct net_iov_area *owner = net_iov_owner(niov);
96
97 return owner->base_virtual +
98 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
99 }
100
101 static inline void
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding * binding)102 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
103 {
104 refcount_inc(&binding->ref);
105 }
106
107 static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding * binding)108 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
109 {
110 if (!refcount_dec_and_test(&binding->ref))
111 return;
112
113 __net_devmem_dmabuf_binding_free(binding);
114 }
115
116 struct net_iov *
117 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
118 void net_devmem_free_dmabuf(struct net_iov *ppiov);
119
120 bool net_is_devmem_iov(struct net_iov *niov);
121
122 #else
123 struct net_devmem_dmabuf_binding;
124
125 static inline void
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding * binding)126 __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
127 {
128 }
129
130 static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,unsigned int dmabuf_fd,struct netlink_ext_ack * extack)131 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
132 struct netlink_ext_ack *extack)
133 {
134 return ERR_PTR(-EOPNOTSUPP);
135 }
136
137 static inline void
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)138 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
139 {
140 }
141
142 static inline int
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)143 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
144 struct net_devmem_dmabuf_binding *binding,
145 struct netlink_ext_ack *extack)
146
147 {
148 return -EOPNOTSUPP;
149 }
150
151 static inline struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)152 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
153 {
154 return NULL;
155 }
156
net_devmem_free_dmabuf(struct net_iov * ppiov)157 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
158 {
159 }
160
net_iov_virtual_addr(const struct net_iov * niov)161 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
162 {
163 return 0;
164 }
165
net_devmem_iov_binding_id(const struct net_iov * niov)166 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
167 {
168 return 0;
169 }
170
net_is_devmem_iov(struct net_iov * niov)171 static inline bool net_is_devmem_iov(struct net_iov *niov)
172 {
173 return false;
174 }
175 #endif
176
177 #endif /* _NET_DEVMEM_H */
178