1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Device memory TCP support
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemb@google.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com>
8 *
9 */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12
13 #include <net/netmem.h>
14 #include <net/netdev_netlink.h>
15
16 struct netlink_ext_ack;
17
18 struct net_devmem_dmabuf_binding {
19 struct dma_buf *dmabuf;
20 struct dma_buf_attachment *attachment;
21 struct sg_table *sgt;
22 struct net_device *dev;
23 struct gen_pool *chunk_pool;
24 /* Protect dev */
25 struct mutex lock;
26
27 /* The user holds a ref (via the netlink API) for as long as they want
28 * the binding to remain alive. Each page pool using this binding holds
29 * a ref to keep the binding alive. The page_pool does not release the
30 * ref until all the net_iovs allocated from this binding are released
31 * back to the page_pool.
32 *
33 * The binding undos itself and unmaps the underlying dmabuf once all
34 * those refs are dropped and the binding is no longer desired or in
35 * use.
36 *
37 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
38 * reference, making sure that the binding remains alive until all the
39 * net_iovs are no longer used. net_iovs allocated from this binding
40 * that are stuck in the TX path for any reason (such as awaiting
41 * retransmits) hold a reference to the binding until the skb holding
42 * them is freed.
43 */
44 struct percpu_ref ref;
45
46 /* The list of bindings currently active. Used for netlink to notify us
47 * of the user dropping the bind.
48 */
49 struct list_head list;
50
51 /* rxq's this binding is active on. */
52 struct xarray bound_rxqs;
53
54 /* ID of this binding. Globally unique to all bindings currently
55 * active.
56 */
57 u32 id;
58
59 /* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
60 enum dma_data_direction direction;
61
62 /* Array of net_iov pointers for this binding, sorted by virtual
63 * address. This array is convenient to map the virtual addresses to
64 * net_iovs in the TX path.
65 */
66 struct net_iov **tx_vec;
67
68 struct work_struct unbind_w;
69 };
70
71 #if defined(CONFIG_NET_DEVMEM)
72 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
73 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
74 * this owner struct to keep track of some metadata necessary to create
75 * allocations from this chunk.
76 */
77 struct dmabuf_genpool_chunk_owner {
78 struct net_iov_area area;
79 struct net_devmem_dmabuf_binding *binding;
80
81 /* dma_addr of the start of the chunk. */
82 dma_addr_t base_dma_addr;
83 };
84
85 void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
86 struct net_devmem_dmabuf_binding *
87 net_devmem_bind_dmabuf(struct net_device *dev,
88 struct device *dma_dev,
89 enum dma_data_direction direction,
90 unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
91 struct netlink_ext_ack *extack);
92 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
93 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
94 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
95 struct net_devmem_dmabuf_binding *binding,
96 struct netlink_ext_ack *extack);
97
98 static inline struct dmabuf_genpool_chunk_owner *
net_devmem_iov_to_chunk_owner(const struct net_iov * niov)99 net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
100 {
101 struct net_iov_area *owner = net_iov_owner(niov);
102
103 return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
104 }
105
106 static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov * niov)107 net_devmem_iov_binding(const struct net_iov *niov)
108 {
109 return net_devmem_iov_to_chunk_owner(niov)->binding;
110 }
111
net_devmem_iov_binding_id(const struct net_iov * niov)112 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
113 {
114 return net_devmem_iov_binding(niov)->id;
115 }
116
net_iov_virtual_addr(const struct net_iov * niov)117 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
118 {
119 struct net_iov_area *owner = net_iov_owner(niov);
120
121 return owner->base_virtual +
122 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
123 }
124
125 static inline bool
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding * binding)126 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
127 {
128 return percpu_ref_tryget(&binding->ref);
129 }
130
131 static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding * binding)132 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
133 {
134 percpu_ref_put(&binding->ref);
135 }
136
137 void net_devmem_get_net_iov(struct net_iov *niov);
138 void net_devmem_put_net_iov(struct net_iov *niov);
139
140 struct net_iov *
141 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
142 void net_devmem_free_dmabuf(struct net_iov *ppiov);
143
144
145 struct net_devmem_dmabuf_binding *
146 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
147 struct net_iov *
148 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
149 size_t *off, size_t *size);
150
151 #else
152 struct net_devmem_dmabuf_binding;
153
154 static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding * binding)155 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
156 {
157 }
158
net_devmem_get_net_iov(struct net_iov * niov)159 static inline void net_devmem_get_net_iov(struct net_iov *niov)
160 {
161 }
162
net_devmem_put_net_iov(struct net_iov * niov)163 static inline void net_devmem_put_net_iov(struct net_iov *niov)
164 {
165 }
166
167 static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,struct device * dma_dev,enum dma_data_direction direction,unsigned int dmabuf_fd,struct netdev_nl_sock * priv,struct netlink_ext_ack * extack)168 net_devmem_bind_dmabuf(struct net_device *dev,
169 struct device *dma_dev,
170 enum dma_data_direction direction,
171 unsigned int dmabuf_fd,
172 struct netdev_nl_sock *priv,
173 struct netlink_ext_ack *extack)
174 {
175 return ERR_PTR(-EOPNOTSUPP);
176 }
177
net_devmem_lookup_dmabuf(u32 id)178 static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
179 {
180 return NULL;
181 }
182
183 static inline void
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)184 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
185 {
186 }
187
188 static inline int
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)189 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
190 struct net_devmem_dmabuf_binding *binding,
191 struct netlink_ext_ack *extack)
192
193 {
194 return -EOPNOTSUPP;
195 }
196
197 static inline struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)198 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
199 {
200 return NULL;
201 }
202
net_devmem_free_dmabuf(struct net_iov * ppiov)203 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
204 {
205 }
206
net_iov_virtual_addr(const struct net_iov * niov)207 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
208 {
209 return 0;
210 }
211
net_devmem_iov_binding_id(const struct net_iov * niov)212 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
213 {
214 return 0;
215 }
216
217 static inline struct net_devmem_dmabuf_binding *
net_devmem_get_binding(struct sock * sk,unsigned int dmabuf_id)218 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
219 {
220 return ERR_PTR(-EOPNOTSUPP);
221 }
222
223 static inline struct net_iov *
net_devmem_get_niov_at(struct net_devmem_dmabuf_binding * binding,size_t addr,size_t * off,size_t * size)224 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
225 size_t *off, size_t *size)
226 {
227 return NULL;
228 }
229
230 static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov * niov)231 net_devmem_iov_binding(const struct net_iov *niov)
232 {
233 return NULL;
234 }
235 #endif
236
237 #endif /* _NET_DEVMEM_H */
238