xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include "fs_core.h"
5 #include "eswitch.h"
6 
mlx5_esw_adj_vport_modify(struct mlx5_core_dev * dev,u16 vport,bool connect)7 int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport,
8 			      bool connect)
9 {
10 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
11 
12 	MLX5_SET(modify_vport_state_in, in, opcode,
13 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
14 	MLX5_SET(modify_vport_state_in, in, op_mod,
15 		 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT);
16 	MLX5_SET(modify_vport_state_in, in, other_vport, 1);
17 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
18 	MLX5_SET(modify_vport_state_in, in, ingress_connect_valid, 1);
19 	MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1);
20 	MLX5_SET(modify_vport_state_in, in, ingress_connect, connect);
21 	MLX5_SET(modify_vport_state_in, in, egress_connect, connect);
22 	MLX5_SET(modify_vport_state_in, in, admin_state, connect);
23 	return mlx5_cmd_exec_in(dev, modify_vport_state, in);
24 }
25 
mlx5_esw_destroy_esw_vport(struct mlx5_core_dev * dev,u16 vport)26 static void mlx5_esw_destroy_esw_vport(struct mlx5_core_dev *dev, u16 vport)
27 {
28 	u32 in[MLX5_ST_SZ_DW(destroy_esw_vport_in)] = {};
29 
30 	MLX5_SET(destroy_esw_vport_in, in, opcode,
31 		 MLX5_CMD_OPCODE_DESTROY_ESW_VPORT);
32 	MLX5_SET(destroy_esw_vport_in, in, vport_num, vport);
33 
34 	mlx5_cmd_exec_in(dev, destroy_esw_vport, in);
35 }
36 
mlx5_esw_create_esw_vport(struct mlx5_core_dev * dev,u16 vhca_id,u16 * vport_num)37 static int mlx5_esw_create_esw_vport(struct mlx5_core_dev *dev, u16 vhca_id,
38 				     u16 *vport_num)
39 {
40 	u32 out[MLX5_ST_SZ_DW(create_esw_vport_out)] = {};
41 	u32 in[MLX5_ST_SZ_DW(create_esw_vport_in)] = {};
42 	int err;
43 
44 	MLX5_SET(create_esw_vport_in, in, opcode,
45 		 MLX5_CMD_OPCODE_CREATE_ESW_VPORT);
46 	MLX5_SET(create_esw_vport_in, in, managed_vhca_id, vhca_id);
47 
48 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
49 	if (!err)
50 		*vport_num = MLX5_GET(create_esw_vport_out, out, vport_num);
51 
52 	return err;
53 }
54 
mlx5_esw_adj_vport_create(struct mlx5_eswitch * esw,u16 vhca_id,const void * rid_info_reg)55 static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id,
56 				     const void *rid_info_reg)
57 {
58 	struct mlx5_vport *vport;
59 	u16 vport_num;
60 	int err;
61 
62 	err = mlx5_esw_create_esw_vport(esw->dev, vhca_id, &vport_num);
63 	if (err) {
64 		esw_warn(esw->dev,
65 			 "Failed to create adjacent vport for vhca_id %d, err %d\n",
66 			 vhca_id, err);
67 		return err;
68 	}
69 
70 	esw_debug(esw->dev, "Created adjacent vport[%d] %d for vhca_id 0x%x\n",
71 		  esw->last_vport_idx, vport_num, vhca_id);
72 
73 	err = mlx5_esw_vport_alloc(esw, esw->last_vport_idx++, vport_num);
74 	if (err)
75 		goto destroy_esw_vport;
76 
77 	xa_set_mark(&esw->vports, vport_num, MLX5_ESW_VPT_VF);
78 	vport = mlx5_eswitch_get_vport(esw, vport_num);
79 	vport->adjacent = true;
80 	vport->vhca_id = vhca_id;
81 
82 	vport->adj_info.parent_pci_devfn =
83 		MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
84 			 parent_pci_device_function);
85 	vport->adj_info.function_id =
86 		MLX5_GET(function_vhca_rid_info_reg, rid_info_reg, function_id);
87 
88 	mlx5_fs_vport_egress_acl_ns_add(esw->dev->priv.steering, vport->index);
89 	mlx5_fs_vport_ingress_acl_ns_add(esw->dev->priv.steering, vport->index);
90 	err = mlx5_esw_offloads_rep_add(esw, vport);
91 	if (err)
92 		goto acl_ns_remove;
93 
94 	return 0;
95 
96 acl_ns_remove:
97 	mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
98 					    vport->index);
99 	mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
100 					   vport->index);
101 	mlx5_esw_vport_free(esw, vport);
102 destroy_esw_vport:
103 	mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
104 	return err;
105 }
106 
mlx5_esw_adj_vport_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)107 static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw,
108 				       struct mlx5_vport *vport)
109 {
110 	u16 vport_num = vport->vport;
111 
112 	esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n",
113 		  vport_num, vport->vhca_id);
114 
115 	mlx5_esw_offloads_rep_remove(esw, vport);
116 	mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
117 					   vport->index);
118 	mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
119 					    vport->index);
120 	mlx5_esw_vport_free(esw, vport);
121 	/* Reset the vport index back so new adj vports can use this index.
122 	 * When vport count can incrementally change, this needs to be modified.
123 	 */
124 	esw->last_vport_idx--;
125 	mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
126 }
127 
mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch * esw)128 void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw)
129 {
130 	struct mlx5_vport *vport;
131 	unsigned long i;
132 
133 	if (!MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max))
134 		return;
135 
136 	mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
137 		if (!vport->adjacent)
138 			continue;
139 		mlx5_esw_adj_vport_destroy(esw, vport);
140 	}
141 }
142 
mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch * esw)143 void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw)
144 {
145 	u32 delegated_vhca_max = MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max);
146 	u32 in[MLX5_ST_SZ_DW(query_delegated_vhca_in)] = {};
147 	int outlen, err, i = 0;
148 	u8 *out;
149 	u32 count;
150 
151 	if (!delegated_vhca_max)
152 		return;
153 
154 	outlen = MLX5_ST_SZ_BYTES(query_delegated_vhca_out) +
155 		 delegated_vhca_max *
156 		 MLX5_ST_SZ_BYTES(delegated_function_vhca_rid_info);
157 
158 	esw_debug(esw->dev, "delegated_vhca_max=%d\n", delegated_vhca_max);
159 
160 	out = kvzalloc(outlen, GFP_KERNEL);
161 	if (!out)
162 		return;
163 
164 	MLX5_SET(query_delegated_vhca_in, in, opcode,
165 		 MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA);
166 
167 	err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
168 	if (err) {
169 		kvfree(out);
170 		esw_warn(esw->dev, "Failed to query delegated vhca, err %d\n",
171 			 err);
172 		return;
173 	}
174 
175 	count = MLX5_GET(query_delegated_vhca_out, out, functions_count);
176 	esw_debug(esw->dev, "Delegated vhca functions count %d\n", count);
177 
178 	for (i = 0; i < count; i++) {
179 		const void *rid_info, *rid_info_reg;
180 		u16 vhca_id;
181 
182 		rid_info = MLX5_ADDR_OF(query_delegated_vhca_out, out,
183 					delegated_function_vhca_rid_info[i]);
184 
185 		rid_info_reg = MLX5_ADDR_OF(delegated_function_vhca_rid_info,
186 					    rid_info, function_vhca_rid_info);
187 
188 		vhca_id = MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
189 				   vhca_id);
190 		esw_debug(esw->dev, "Delegating vhca_id 0x%x\n", vhca_id);
191 
192 		err = mlx5_esw_adj_vport_create(esw, vhca_id, rid_info_reg);
193 		if (err) {
194 			esw_warn(esw->dev,
195 				 "Failed to init adjacent vhca 0x%x, err %d\n",
196 				 vhca_id, err);
197 			break;
198 		}
199 	}
200 
201 	kvfree(out);
202 }
203