1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 #include "priv.h"
7 #include "sf/dev/dev.h"
8 #include "mlx5_ifc_vhca_event.h"
9 #include "vhca_event.h"
10 #include "ecpf.h"
11 #define CREATE_TRACE_POINTS
12 #include "diag/sf_tracepoint.h"
13 
14 struct mlx5_sf {
15 	struct mlx5_devlink_port dl_port;
16 	unsigned int port_index;
17 	u32 controller;
18 	u16 id;
19 	u16 hw_fn_id;
20 	u16 hw_state;
21 };
22 
mlx5_sf_by_dl_port(struct devlink_port * dl_port)23 static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port)
24 {
25 	struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port);
26 
27 	return container_of(mlx5_dl_port, struct mlx5_sf, dl_port);
28 }
29 
30 struct mlx5_sf_table {
31 	struct mlx5_core_dev *dev; /* To refer from notifier context. */
32 	struct xarray function_ids; /* function id based lookup. */
33 	struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
34 	struct notifier_block esw_nb;
35 	struct notifier_block vhca_nb;
36 	struct notifier_block mdev_nb;
37 };
38 
39 static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table * table,unsigned int fn_id)40 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
41 {
42 	return xa_load(&table->function_ids, fn_id);
43 }
44 
mlx5_sf_function_id_insert(struct mlx5_sf_table * table,struct mlx5_sf * sf)45 static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
46 {
47 	return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
48 }
49 
mlx5_sf_function_id_erase(struct mlx5_sf_table * table,struct mlx5_sf * sf)50 static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
51 {
52 	xa_erase(&table->function_ids, sf->hw_fn_id);
53 }
54 
55 static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table * table,struct mlx5_eswitch * esw,u32 controller,u32 sfnum,struct netlink_ext_ack * extack)56 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
57 	      u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
58 {
59 	unsigned int dl_port_index;
60 	struct mlx5_sf *sf;
61 	u16 hw_fn_id;
62 	int id_err;
63 	int err;
64 
65 	if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
66 		NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
67 		return ERR_PTR(-EINVAL);
68 	}
69 
70 	id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
71 	if (id_err < 0) {
72 		err = id_err;
73 		goto id_err;
74 	}
75 
76 	sf = kzalloc(sizeof(*sf), GFP_KERNEL);
77 	if (!sf) {
78 		err = -ENOMEM;
79 		goto alloc_err;
80 	}
81 	sf->id = id_err;
82 	hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
83 	dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
84 	sf->port_index = dl_port_index;
85 	sf->hw_fn_id = hw_fn_id;
86 	sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
87 	sf->controller = controller;
88 
89 	err = mlx5_sf_function_id_insert(table, sf);
90 	if (err)
91 		goto insert_err;
92 
93 	return sf;
94 
95 insert_err:
96 	kfree(sf);
97 alloc_err:
98 	mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
99 id_err:
100 	if (err == -EEXIST)
101 		NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
102 	return ERR_PTR(err);
103 }
104 
mlx5_sf_free(struct mlx5_sf_table * table,struct mlx5_sf * sf)105 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
106 {
107 	mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
108 	trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
109 	kfree(sf);
110 }
111 
mlx5_sf_to_devlink_state(u8 hw_state)112 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
113 {
114 	switch (hw_state) {
115 	case MLX5_VHCA_STATE_ACTIVE:
116 	case MLX5_VHCA_STATE_IN_USE:
117 		return DEVLINK_PORT_FN_STATE_ACTIVE;
118 	case MLX5_VHCA_STATE_INVALID:
119 	case MLX5_VHCA_STATE_ALLOCATED:
120 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
121 	default:
122 		return DEVLINK_PORT_FN_STATE_INACTIVE;
123 	}
124 }
125 
mlx5_sf_to_devlink_opstate(u8 hw_state)126 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
127 {
128 	switch (hw_state) {
129 	case MLX5_VHCA_STATE_IN_USE:
130 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
131 		return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
132 	case MLX5_VHCA_STATE_INVALID:
133 	case MLX5_VHCA_STATE_ALLOCATED:
134 	case MLX5_VHCA_STATE_ACTIVE:
135 	default:
136 		return DEVLINK_PORT_FN_OPSTATE_DETACHED;
137 	}
138 }
139 
mlx5_sf_is_active(const struct mlx5_sf * sf)140 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
141 {
142 	return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
143 }
144 
mlx5_devlink_sf_port_fn_state_get(struct devlink_port * dl_port,enum devlink_port_fn_state * state,enum devlink_port_fn_opstate * opstate,struct netlink_ext_ack * extack)145 int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
146 				      enum devlink_port_fn_state *state,
147 				      enum devlink_port_fn_opstate *opstate,
148 				      struct netlink_ext_ack *extack)
149 {
150 	struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
151 	struct mlx5_sf_table *table = dev->priv.sf_table;
152 	struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
153 
154 	mutex_lock(&table->sf_state_lock);
155 	*state = mlx5_sf_to_devlink_state(sf->hw_state);
156 	*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
157 	mutex_unlock(&table->sf_state_lock);
158 	return 0;
159 }
160 
mlx5_sf_activate(struct mlx5_core_dev * dev,struct mlx5_sf * sf,struct netlink_ext_ack * extack)161 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
162 			    struct netlink_ext_ack *extack)
163 {
164 	int err;
165 
166 	if (mlx5_sf_is_active(sf))
167 		return 0;
168 	if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
169 		NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
170 		return -EBUSY;
171 	}
172 
173 	err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
174 	if (err)
175 		return err;
176 
177 	sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
178 	trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
179 	return 0;
180 }
181 
mlx5_sf_deactivate(struct mlx5_core_dev * dev,struct mlx5_sf * sf)182 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
183 {
184 	int err;
185 
186 	if (!mlx5_sf_is_active(sf))
187 		return 0;
188 
189 	err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
190 	if (err)
191 		return err;
192 
193 	sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
194 	trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
195 	return 0;
196 }
197 
mlx5_sf_state_set(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,struct mlx5_sf * sf,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)198 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
199 			     struct mlx5_sf *sf,
200 			     enum devlink_port_fn_state state,
201 			     struct netlink_ext_ack *extack)
202 {
203 	int err = 0;
204 
205 	mutex_lock(&table->sf_state_lock);
206 	if (state == mlx5_sf_to_devlink_state(sf->hw_state))
207 		goto out;
208 	if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
209 		err = mlx5_sf_activate(dev, sf, extack);
210 	else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
211 		err = mlx5_sf_deactivate(dev, sf);
212 	else
213 		err = -EINVAL;
214 out:
215 	mutex_unlock(&table->sf_state_lock);
216 	return err;
217 }
218 
mlx5_devlink_sf_port_fn_state_set(struct devlink_port * dl_port,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)219 int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
220 				      enum devlink_port_fn_state state,
221 				      struct netlink_ext_ack *extack)
222 {
223 	struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
224 	struct mlx5_sf_table *table = dev->priv.sf_table;
225 	struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
226 
227 	return mlx5_sf_state_set(dev, table, sf, state, extack);
228 }
229 
mlx5_sf_add(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)230 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
231 		       const struct devlink_port_new_attrs *new_attr,
232 		       struct netlink_ext_ack *extack,
233 		       struct devlink_port **dl_port)
234 {
235 	struct mlx5_eswitch *esw = dev->priv.eswitch;
236 	struct mlx5_sf *sf;
237 	int err;
238 
239 	sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
240 	if (IS_ERR(sf))
241 		return PTR_ERR(sf);
242 
243 	err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE,
244 					 &sf->dl_port, new_attr->controller, new_attr->sfnum);
245 	if (err)
246 		goto esw_err;
247 	*dl_port = &sf->dl_port.dl_port;
248 	trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
249 	return 0;
250 
251 esw_err:
252 	mlx5_sf_free(table, sf);
253 	return err;
254 }
255 
256 static int
mlx5_sf_new_check_attr(struct mlx5_core_dev * dev,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack)257 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
258 		       struct netlink_ext_ack *extack)
259 {
260 	if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
261 		NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
262 		return -EOPNOTSUPP;
263 	}
264 	if (new_attr->port_index_valid) {
265 		NL_SET_ERR_MSG_MOD(extack,
266 				   "Driver does not support user defined port index assignment");
267 		return -EOPNOTSUPP;
268 	}
269 	if (!new_attr->sfnum_valid) {
270 		NL_SET_ERR_MSG_MOD(extack,
271 				   "User must provide unique sfnum. Driver does not support auto assignment");
272 		return -EOPNOTSUPP;
273 	}
274 	if (new_attr->controller_valid && new_attr->controller &&
275 	    !mlx5_core_is_ecpf_esw_manager(dev)) {
276 		NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
277 		return -EOPNOTSUPP;
278 	}
279 	if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
280 		NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
281 		return -EOPNOTSUPP;
282 	}
283 	return 0;
284 }
285 
mlx5_sf_table_supported(const struct mlx5_core_dev * dev)286 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
287 {
288 	return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
289 	       mlx5_sf_hw_table_supported(dev);
290 }
291 
mlx5_devlink_sf_port_new(struct devlink * devlink,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)292 int mlx5_devlink_sf_port_new(struct devlink *devlink,
293 			     const struct devlink_port_new_attrs *new_attr,
294 			     struct netlink_ext_ack *extack,
295 			     struct devlink_port **dl_port)
296 {
297 	struct mlx5_core_dev *dev = devlink_priv(devlink);
298 	struct mlx5_sf_table *table = dev->priv.sf_table;
299 	int err;
300 
301 	err = mlx5_sf_new_check_attr(dev, new_attr, extack);
302 	if (err)
303 		return err;
304 
305 	if (!mlx5_sf_table_supported(dev)) {
306 		NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported.");
307 		return -EOPNOTSUPP;
308 	}
309 
310 	if (!is_mdev_switchdev_mode(dev)) {
311 		NL_SET_ERR_MSG_MOD(extack,
312 				   "SF ports are only supported in eswitch switchdev mode.");
313 		return -EOPNOTSUPP;
314 	}
315 
316 	return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
317 }
318 
mlx5_sf_dealloc(struct mlx5_sf_table * table,struct mlx5_sf * sf)319 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
320 {
321 	mutex_lock(&table->sf_state_lock);
322 
323 	mlx5_sf_function_id_erase(table, sf);
324 
325 	if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
326 		mlx5_sf_free(table, sf);
327 	} else if (mlx5_sf_is_active(sf)) {
328 		/* Even if its active, it is treated as in_use because by the time,
329 		 * it is disabled here, it may getting used. So it is safe to
330 		 * always look for the event to ensure that it is recycled only after
331 		 * firmware gives confirmation that it is detached by the driver.
332 		 */
333 		mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
334 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
335 		kfree(sf);
336 	} else {
337 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
338 		kfree(sf);
339 	}
340 
341 	mutex_unlock(&table->sf_state_lock);
342 }
343 
mlx5_sf_del(struct mlx5_sf_table * table,struct mlx5_sf * sf)344 static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
345 {
346 	struct mlx5_eswitch *esw = table->dev->priv.eswitch;
347 
348 	mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
349 	mlx5_sf_dealloc(table, sf);
350 }
351 
mlx5_devlink_sf_port_del(struct devlink * devlink,struct devlink_port * dl_port,struct netlink_ext_ack * extack)352 int mlx5_devlink_sf_port_del(struct devlink *devlink,
353 			     struct devlink_port *dl_port,
354 			     struct netlink_ext_ack *extack)
355 {
356 	struct mlx5_core_dev *dev = devlink_priv(devlink);
357 	struct mlx5_sf_table *table = dev->priv.sf_table;
358 	struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
359 
360 	mlx5_sf_del(table, sf);
361 	return 0;
362 }
363 
mlx5_sf_state_update_check(const struct mlx5_sf * sf,u8 new_state)364 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
365 {
366 	if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
367 		return true;
368 
369 	if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
370 		return true;
371 
372 	if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
373 	    new_state == MLX5_VHCA_STATE_ALLOCATED)
374 		return true;
375 
376 	return false;
377 }
378 
mlx5_sf_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)379 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
380 {
381 	struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
382 	const struct mlx5_vhca_state_event *event = data;
383 	bool update = false;
384 	struct mlx5_sf *sf;
385 
386 	mutex_lock(&table->sf_state_lock);
387 	sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
388 	if (!sf)
389 		goto unlock;
390 
391 	/* When driver is attached or detached to a function, an event
392 	 * notifies such state change.
393 	 */
394 	update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
395 	if (update)
396 		sf->hw_state = event->new_vhca_state;
397 	trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
398 				   sf->hw_fn_id, sf->hw_state);
399 unlock:
400 	mutex_unlock(&table->sf_state_lock);
401 	return 0;
402 }
403 
mlx5_sf_del_all(struct mlx5_sf_table * table)404 static void mlx5_sf_del_all(struct mlx5_sf_table *table)
405 {
406 	unsigned long index;
407 	struct mlx5_sf *sf;
408 
409 	xa_for_each(&table->function_ids, index, sf)
410 		mlx5_sf_del(table, sf);
411 }
412 
mlx5_sf_esw_event(struct notifier_block * nb,unsigned long event,void * data)413 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
414 {
415 	struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
416 	const struct mlx5_esw_event_info *mode = data;
417 
418 	switch (mode->new_mode) {
419 	case MLX5_ESWITCH_LEGACY:
420 		mlx5_sf_del_all(table);
421 		break;
422 	default:
423 		break;
424 	}
425 
426 	return 0;
427 }
428 
mlx5_sf_mdev_event(struct notifier_block * nb,unsigned long event,void * data)429 static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
430 {
431 	struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
432 	struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
433 	int ret = NOTIFY_DONE;
434 	struct mlx5_sf *sf;
435 
436 	if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
437 		return NOTIFY_DONE;
438 
439 
440 	mutex_lock(&table->sf_state_lock);
441 	sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
442 	if (!sf)
443 		goto out;
444 
445 	event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port,
446 						  event_ctx->devlink);
447 
448 	ret = NOTIFY_OK;
449 out:
450 	mutex_unlock(&table->sf_state_lock);
451 	return ret;
452 }
453 
mlx5_sf_table_init(struct mlx5_core_dev * dev)454 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
455 {
456 	struct mlx5_sf_table *table;
457 	int err;
458 
459 	if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
460 		return 0;
461 
462 	table = kzalloc(sizeof(*table), GFP_KERNEL);
463 	if (!table)
464 		return -ENOMEM;
465 
466 	mutex_init(&table->sf_state_lock);
467 	table->dev = dev;
468 	xa_init(&table->function_ids);
469 	dev->priv.sf_table = table;
470 	table->esw_nb.notifier_call = mlx5_sf_esw_event;
471 	err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
472 	if (err)
473 		goto reg_err;
474 
475 	table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
476 	err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
477 	if (err)
478 		goto vhca_err;
479 
480 	table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
481 	mlx5_blocking_notifier_register(dev, &table->mdev_nb);
482 
483 	return 0;
484 
485 vhca_err:
486 	mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
487 reg_err:
488 	mutex_destroy(&table->sf_state_lock);
489 	kfree(table);
490 	dev->priv.sf_table = NULL;
491 	return err;
492 }
493 
mlx5_sf_table_cleanup(struct mlx5_core_dev * dev)494 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
495 {
496 	struct mlx5_sf_table *table = dev->priv.sf_table;
497 
498 	if (!table)
499 		return;
500 
501 	mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
502 	mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
503 	mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
504 	mutex_destroy(&table->sf_state_lock);
505 	WARN_ON(!xa_empty(&table->function_ids));
506 	kfree(table);
507 }
508