xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <net/xdp_sock_drv.h>
5 #include "pool.h"
6 #include "setup.h"
7 #include "en/params.h"
8 
mlx5e_xsk_map_pool(struct mlx5_core_dev * mdev,struct xsk_buff_pool * pool)9 static int mlx5e_xsk_map_pool(struct mlx5_core_dev *mdev,
10 			      struct xsk_buff_pool *pool)
11 {
12 	struct device *dev = mlx5_core_dma_dev(mdev);
13 
14 	return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
15 }
16 
mlx5e_xsk_unmap_pool(struct mlx5e_priv * priv,struct xsk_buff_pool * pool)17 static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
18 				 struct xsk_buff_pool *pool)
19 {
20 	return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
21 }
22 
mlx5e_xsk_get_pools(struct mlx5e_xsk * xsk)23 static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
24 {
25 	if (!xsk->pools) {
26 		xsk->pools = kzalloc_objs(*xsk->pools, MLX5E_MAX_NUM_CHANNELS);
27 		if (unlikely(!xsk->pools))
28 			return -ENOMEM;
29 	}
30 
31 	xsk->refcnt++;
32 	xsk->ever_used = true;
33 
34 	return 0;
35 }
36 
mlx5e_xsk_put_pools(struct mlx5e_xsk * xsk)37 static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk)
38 {
39 	if (!--xsk->refcnt) {
40 		kfree(xsk->pools);
41 		xsk->pools = NULL;
42 	}
43 }
44 
mlx5e_xsk_add_pool(struct mlx5e_xsk * xsk,struct xsk_buff_pool * pool,u16 ix)45 static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix)
46 {
47 	int err;
48 
49 	err = mlx5e_xsk_get_pools(xsk);
50 	if (unlikely(err))
51 		return err;
52 
53 	xsk->pools[ix] = pool;
54 	return 0;
55 }
56 
mlx5e_xsk_remove_pool(struct mlx5e_xsk * xsk,u16 ix)57 static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
58 {
59 	xsk->pools[ix] = NULL;
60 
61 	mlx5e_xsk_put_pools(xsk);
62 }
63 
mlx5e_xsk_is_pool_sane(struct xsk_buff_pool * pool)64 static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
65 {
66 	return xsk_pool_get_headroom(pool) <= 0xffff &&
67 		xsk_pool_get_chunk_size(pool) <= 0xffff;
68 }
69 
mlx5e_build_xsk_param(struct xsk_buff_pool * pool,struct mlx5e_xsk_param * xsk)70 void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
71 {
72 	xsk->headroom = xsk_pool_get_headroom(pool);
73 	xsk->chunk_size = xsk_pool_get_chunk_size(pool);
74 	xsk->unaligned = pool->unaligned;
75 }
76 
mlx5e_xsk_enable_locked(struct mlx5e_priv * priv,struct xsk_buff_pool * pool,u16 ix)77 static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
78 				   struct xsk_buff_pool *pool, u16 ix)
79 {
80 	struct mlx5e_params *params = &priv->channels.params;
81 	struct mlx5e_xsk_param xsk;
82 	struct mlx5e_channel *c;
83 	int err;
84 
85 	if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix)))
86 		return -EBUSY;
87 
88 	if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
89 		return -EINVAL;
90 
91 	err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool);
92 	if (unlikely(err))
93 		return err;
94 
95 	err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
96 	if (unlikely(err))
97 		goto err_unmap_pool;
98 
99 	mlx5e_build_xsk_param(pool, &xsk);
100 
101 	if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
102 	    mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
103 		const char *recommendation = is_power_of_2(xsk.chunk_size) ?
104 			"Upgrade firmware" : "Disable striding RQ";
105 
106 		mlx5_core_warn(priv->mdev, "Expected slowdown with XSK frame size %u. %s for better performance.\n",
107 			       xsk.chunk_size, recommendation);
108 	}
109 
110 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
111 		/* XSK objects will be created on open. */
112 		goto validate_closed;
113 	}
114 
115 	if (!params->xdp_prog) {
116 		/* XSK objects will be created when an XDP program is set,
117 		 * and the channels are reopened.
118 		 */
119 		goto validate_closed;
120 	}
121 
122 	c = priv->channels.c[ix];
123 
124 	err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
125 	if (unlikely(err))
126 		goto err_remove_pool;
127 
128 	mlx5e_activate_xsk(c);
129 	mlx5e_trigger_napi_async_icosq(c);
130 
131 	/* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
132 	 * any Fill Ring entries at the setup stage.
133 	 */
134 
135 	mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true);
136 
137 	mlx5e_deactivate_rq(&c->rq);
138 	mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
139 
140 	return 0;
141 
142 err_remove_pool:
143 	mlx5e_xsk_remove_pool(&priv->xsk, ix);
144 
145 err_unmap_pool:
146 	mlx5e_xsk_unmap_pool(priv, pool);
147 
148 	return err;
149 
150 validate_closed:
151 	/* Check the configuration in advance, rather than fail at a later stage
152 	 * (in mlx5e_xdp_set or on open) and end up with no channels.
153 	 */
154 	if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
155 		err = -EINVAL;
156 		goto err_remove_pool;
157 	}
158 
159 	return 0;
160 }
161 
mlx5e_xsk_disable_locked(struct mlx5e_priv * priv,u16 ix)162 static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
163 {
164 	struct xsk_buff_pool *pool = mlx5e_xsk_get_pool(&priv->channels.params,
165 						   &priv->xsk, ix);
166 	struct mlx5e_channel *c;
167 
168 	if (unlikely(!pool))
169 		return -EINVAL;
170 
171 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
172 		goto remove_pool;
173 
174 	/* XSK RQ and SQ are only created if XDP program is set. */
175 	if (!priv->channels.params.xdp_prog)
176 		goto remove_pool;
177 
178 	c = priv->channels.c[ix];
179 
180 	mlx5e_activate_rq(&c->rq);
181 	mlx5e_trigger_napi_async_icosq(c);
182 	mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
183 
184 	mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false);
185 
186 	mlx5e_deactivate_xsk(c);
187 	mlx5e_close_xsk(c);
188 
189 remove_pool:
190 	mlx5e_xsk_remove_pool(&priv->xsk, ix);
191 	mlx5e_xsk_unmap_pool(priv, pool);
192 
193 	return 0;
194 }
195 
mlx5e_xsk_enable_pool(struct mlx5e_priv * priv,struct xsk_buff_pool * pool,u16 ix)196 static int mlx5e_xsk_enable_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool,
197 				 u16 ix)
198 {
199 	int err;
200 
201 	mutex_lock(&priv->state_lock);
202 	err = mlx5e_xsk_enable_locked(priv, pool, ix);
203 	mutex_unlock(&priv->state_lock);
204 
205 	return err;
206 }
207 
mlx5e_xsk_disable_pool(struct mlx5e_priv * priv,u16 ix)208 static int mlx5e_xsk_disable_pool(struct mlx5e_priv *priv, u16 ix)
209 {
210 	int err;
211 
212 	mutex_lock(&priv->state_lock);
213 	err = mlx5e_xsk_disable_locked(priv, ix);
214 	mutex_unlock(&priv->state_lock);
215 
216 	return err;
217 }
218 
mlx5e_xsk_setup_pool(struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)219 int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
220 {
221 	struct mlx5e_priv *priv = netdev_priv(dev);
222 	struct mlx5e_params *params = &priv->channels.params;
223 
224 	if (unlikely(qid >= params->num_channels))
225 		return -EINVAL;
226 
227 	return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) :
228 		      mlx5e_xsk_disable_pool(priv, qid);
229 }
230