xref: /linux/net/core/devmem.h (revision 37816488247ddddbc3de113c78c83572274b1e2e)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Device memory TCP support
4  *
5  * Authors:	Mina Almasry <almasrymina@google.com>
6  *		Willem de Bruijn <willemb@google.com>
7  *		Kaiyuan Zhang <kaiyuanz@google.com>
8  *
9  */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12 
13 #include <net/netmem.h>
14 #include <net/netdev_netlink.h>
15 
16 struct netlink_ext_ack;
17 
18 struct net_devmem_dmabuf_binding {
19 	struct dma_buf *dmabuf;
20 	struct dma_buf_attachment *attachment;
21 	struct sg_table *sgt;
22 	struct net_device *dev;
23 	struct gen_pool *chunk_pool;
24 	/* Protect dev */
25 	struct mutex lock;
26 
27 	/* The user holds a ref (via the netlink API) for as long as they want
28 	 * the binding to remain alive. Each page pool using this binding holds
29 	 * a ref to keep the binding alive. The page_pool does not release the
30 	 * ref until all the net_iovs allocated from this binding are released
31 	 * back to the page_pool.
32 	 *
33 	 * The binding undos itself and unmaps the underlying dmabuf once all
34 	 * those refs are dropped and the binding is no longer desired or in
35 	 * use.
36 	 *
37 	 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
38 	 * reference, making sure that the binding remains alive until all the
39 	 * net_iovs are no longer used. net_iovs allocated from this binding
40 	 * that are stuck in the TX path for any reason (such as awaiting
41 	 * retransmits) hold a reference to the binding until the skb holding
42 	 * them is freed.
43 	 */
44 	refcount_t ref;
45 
46 	/* The list of bindings currently active. Used for netlink to notify us
47 	 * of the user dropping the bind.
48 	 */
49 	struct list_head list;
50 
51 	/* rxq's this binding is active on. */
52 	struct xarray bound_rxqs;
53 
54 	/* ID of this binding. Globally unique to all bindings currently
55 	 * active.
56 	 */
57 	u32 id;
58 
59 	/* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
60 	enum dma_data_direction direction;
61 
62 	/* Array of net_iov pointers for this binding, sorted by virtual
63 	 * address. This array is convenient to map the virtual addresses to
64 	 * net_iovs in the TX path.
65 	 */
66 	struct net_iov **tx_vec;
67 
68 	struct work_struct unbind_w;
69 };
70 
71 #if defined(CONFIG_NET_DEVMEM)
72 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
73  * entry from the dmabuf is inserted into the genpool as a chunk, and needs
74  * this owner struct to keep track of some metadata necessary to create
75  * allocations from this chunk.
76  */
77 struct dmabuf_genpool_chunk_owner {
78 	struct net_iov_area area;
79 	struct net_devmem_dmabuf_binding *binding;
80 
81 	/* dma_addr of the start of the chunk.  */
82 	dma_addr_t base_dma_addr;
83 };
84 
85 void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
86 struct net_devmem_dmabuf_binding *
87 net_devmem_bind_dmabuf(struct net_device *dev,
88 		       enum dma_data_direction direction,
89 		       unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
90 		       struct netlink_ext_ack *extack);
91 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
92 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
93 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
94 				    struct net_devmem_dmabuf_binding *binding,
95 				    struct netlink_ext_ack *extack);
96 void net_devmem_bind_tx_release(struct sock *sk);
97 
98 static inline struct dmabuf_genpool_chunk_owner *
net_devmem_iov_to_chunk_owner(const struct net_iov * niov)99 net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
100 {
101 	struct net_iov_area *owner = net_iov_owner(niov);
102 
103 	return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
104 }
105 
106 static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov * niov)107 net_devmem_iov_binding(const struct net_iov *niov)
108 {
109 	return net_devmem_iov_to_chunk_owner(niov)->binding;
110 }
111 
net_devmem_iov_binding_id(const struct net_iov * niov)112 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
113 {
114 	return net_devmem_iov_binding(niov)->id;
115 }
116 
net_iov_virtual_addr(const struct net_iov * niov)117 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
118 {
119 	struct net_iov_area *owner = net_iov_owner(niov);
120 
121 	return owner->base_virtual +
122 	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
123 }
124 
125 static inline bool
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding * binding)126 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
127 {
128 	return refcount_inc_not_zero(&binding->ref);
129 }
130 
131 static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding * binding)132 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
133 {
134 	if (!refcount_dec_and_test(&binding->ref))
135 		return;
136 
137 	INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
138 	schedule_work(&binding->unbind_w);
139 }
140 
141 void net_devmem_get_net_iov(struct net_iov *niov);
142 void net_devmem_put_net_iov(struct net_iov *niov);
143 
144 struct net_iov *
145 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
146 void net_devmem_free_dmabuf(struct net_iov *ppiov);
147 
148 bool net_is_devmem_iov(struct net_iov *niov);
149 struct net_devmem_dmabuf_binding *
150 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
151 struct net_iov *
152 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
153 		       size_t *off, size_t *size);
154 
155 #else
156 struct net_devmem_dmabuf_binding;
157 
158 static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding * binding)159 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
160 {
161 }
162 
net_devmem_get_net_iov(struct net_iov * niov)163 static inline void net_devmem_get_net_iov(struct net_iov *niov)
164 {
165 }
166 
net_devmem_put_net_iov(struct net_iov * niov)167 static inline void net_devmem_put_net_iov(struct net_iov *niov)
168 {
169 }
170 
171 static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,enum dma_data_direction direction,unsigned int dmabuf_fd,struct netdev_nl_sock * priv,struct netlink_ext_ack * extack)172 net_devmem_bind_dmabuf(struct net_device *dev,
173 		       enum dma_data_direction direction,
174 		       unsigned int dmabuf_fd,
175 		       struct netdev_nl_sock *priv,
176 		       struct netlink_ext_ack *extack)
177 {
178 	return ERR_PTR(-EOPNOTSUPP);
179 }
180 
net_devmem_lookup_dmabuf(u32 id)181 static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
182 {
183 	return NULL;
184 }
185 
186 static inline void
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)187 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
188 {
189 }
190 
191 static inline int
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)192 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
193 				struct net_devmem_dmabuf_binding *binding,
194 				struct netlink_ext_ack *extack)
195 
196 {
197 	return -EOPNOTSUPP;
198 }
199 
200 static inline struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)201 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
202 {
203 	return NULL;
204 }
205 
net_devmem_free_dmabuf(struct net_iov * ppiov)206 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
207 {
208 }
209 
net_iov_virtual_addr(const struct net_iov * niov)210 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
211 {
212 	return 0;
213 }
214 
net_devmem_iov_binding_id(const struct net_iov * niov)215 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
216 {
217 	return 0;
218 }
219 
net_is_devmem_iov(struct net_iov * niov)220 static inline bool net_is_devmem_iov(struct net_iov *niov)
221 {
222 	return false;
223 }
224 
225 static inline struct net_devmem_dmabuf_binding *
net_devmem_get_binding(struct sock * sk,unsigned int dmabuf_id)226 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
227 {
228 	return ERR_PTR(-EOPNOTSUPP);
229 }
230 
231 static inline struct net_iov *
net_devmem_get_niov_at(struct net_devmem_dmabuf_binding * binding,size_t addr,size_t * off,size_t * size)232 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
233 		       size_t *off, size_t *size)
234 {
235 	return NULL;
236 }
237 
238 static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov * niov)239 net_devmem_iov_binding(const struct net_iov *niov)
240 {
241 	return NULL;
242 }
243 #endif
244 
245 #endif /* _NET_DEVMEM_H */
246