1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2020, Mellanox Technologies inc.  All rights reserved.
4  */
5 
6 #include <rdma/uverbs_ioctl.h>
7 #include <rdma/mlx5_user_ioctl_cmds.h>
8 #include <rdma/mlx5_user_ioctl_verbs.h>
9 #include <linux/mlx5/driver.h>
10 #include <linux/mlx5/eswitch.h>
11 #include <linux/mlx5/vport.h>
12 #include "mlx5_ib.h"
13 #include "data_direct.h"
14 
15 #define UVERBS_MODULE_NAME mlx5_ib
16 #include <rdma/uverbs_named_ioctl.h>
17 
UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)18 static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)(
19 	struct uverbs_attr_bundle *attrs)
20 {
21 	struct ib_pd *pd =
22 		uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_PD_HANDLE);
23 	struct mlx5_ib_pd *mpd = to_mpd(pd);
24 
25 	return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
26 			      &mpd->pdn, sizeof(mpd->pdn));
27 }
28 
fill_vport_icm_addr(struct mlx5_core_dev * mdev,u16 vport,struct mlx5_ib_uapi_query_port * info)29 static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
30 			       struct mlx5_ib_uapi_query_port *info)
31 {
32 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
33 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
34 	bool sw_owner_supp;
35 	u64 icm_rx;
36 	u64 icm_tx;
37 	int err;
38 
39 	sw_owner_supp = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner) ||
40 			MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
41 
42 	if (vport == MLX5_VPORT_UPLINK) {
43 		icm_rx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
44 			sw_steering_uplink_icm_address_rx);
45 		icm_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
46 			sw_steering_uplink_icm_address_tx);
47 	} else {
48 		MLX5_SET(query_esw_vport_context_in, in, opcode,
49 			 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
50 		MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
51 		MLX5_SET(query_esw_vport_context_in, in, other_vport, true);
52 
53 		err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in,
54 					  out);
55 
56 		if (err)
57 			return err;
58 
59 		icm_rx = MLX5_GET64(
60 			query_esw_vport_context_out, out,
61 			esw_vport_context.sw_steering_vport_icm_address_rx);
62 
63 		icm_tx = MLX5_GET64(
64 			query_esw_vport_context_out, out,
65 			esw_vport_context.sw_steering_vport_icm_address_tx);
66 	}
67 
68 	if (sw_owner_supp && icm_rx) {
69 		info->vport_steering_icm_rx = icm_rx;
70 		info->flags |=
71 			MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX;
72 	}
73 
74 	if (sw_owner_supp && icm_tx) {
75 		info->vport_steering_icm_tx = icm_tx;
76 		info->flags |=
77 			MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX;
78 	}
79 
80 	return 0;
81 }
82 
fill_vport_vhca_id(struct mlx5_core_dev * mdev,u16 vport,struct mlx5_ib_uapi_query_port * info)83 static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
84 			      struct mlx5_ib_uapi_query_port *info)
85 {
86 	size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
87 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
88 	void *out;
89 	int err;
90 
91 	out = kzalloc(out_sz, GFP_KERNEL);
92 	if (!out)
93 		return -ENOMEM;
94 
95 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
96 	MLX5_SET(query_hca_cap_in, in, other_function, true);
97 	MLX5_SET(query_hca_cap_in, in, function_id, vport);
98 	MLX5_SET(query_hca_cap_in, in, op_mod,
99 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
100 		 HCA_CAP_OPMOD_GET_CUR);
101 
102 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
103 	if (err)
104 		goto out;
105 
106 	info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
107 				       capability.cmd_hca_cap.vhca_id);
108 
109 	info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
110 out:
111 	kfree(out);
112 	return err;
113 }
114 
fill_multiport_info(struct mlx5_ib_dev * dev,u32 port_num,struct mlx5_ib_uapi_query_port * info)115 static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
116 			       struct mlx5_ib_uapi_query_port *info)
117 {
118 	struct mlx5_core_dev *mdev;
119 
120 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
121 	if (!mdev)
122 		return -EINVAL;
123 
124 	info->vport_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
125 	info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
126 
127 	mlx5_ib_put_native_port_mdev(dev, port_num);
128 
129 	return 0;
130 }
131 
fill_switchdev_info(struct mlx5_ib_dev * dev,u32 port_num,struct mlx5_ib_uapi_query_port * info)132 static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
133 			       struct mlx5_ib_uapi_query_port *info)
134 {
135 	struct mlx5_eswitch_rep *rep;
136 	struct mlx5_core_dev *mdev;
137 	int err;
138 
139 	rep = dev->port[port_num - 1].rep;
140 	if (!rep)
141 		return -EOPNOTSUPP;
142 
143 	mdev = mlx5_eswitch_get_core_dev(rep->esw);
144 	if (!mdev)
145 		return -EINVAL;
146 
147 	info->vport = rep->vport;
148 	info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
149 
150 	if (rep->vport != MLX5_VPORT_UPLINK) {
151 		err = fill_vport_vhca_id(mdev, rep->vport, info);
152 		if (err)
153 			return err;
154 	}
155 
156 	info->esw_owner_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
157 	info->flags |= MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID;
158 
159 	err = fill_vport_icm_addr(mdev, rep->vport, info);
160 	if (err)
161 		return err;
162 
163 	if (mlx5_eswitch_vport_match_metadata_enabled(rep->esw)) {
164 		info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
165 			rep->esw, rep->vport);
166 		info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
167 		info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
168 	}
169 
170 	return 0;
171 }
172 
UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)173 static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
174 	struct uverbs_attr_bundle *attrs)
175 {
176 	struct mlx5_ib_uapi_query_port info = {};
177 	struct mlx5_ib_ucontext *c;
178 	struct mlx5_ib_dev *dev;
179 	u32 port_num;
180 	int ret;
181 
182 	if (uverbs_copy_from(&port_num, attrs,
183 			     MLX5_IB_ATTR_QUERY_PORT_PORT_NUM))
184 		return -EFAULT;
185 
186 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
187 	if (IS_ERR(c))
188 		return PTR_ERR(c);
189 	dev = to_mdev(c->ibucontext.device);
190 
191 	if (!rdma_is_port_valid(&dev->ib_dev, port_num))
192 		return -EINVAL;
193 
194 	if (mlx5_eswitch_mode(dev->mdev) == MLX5_ESWITCH_OFFLOADS) {
195 		ret = fill_switchdev_info(dev, port_num, &info);
196 		if (ret)
197 			return ret;
198 	} else if (mlx5_core_mp_enabled(dev->mdev)) {
199 		ret = fill_multiport_info(dev, port_num, &info);
200 		if (ret)
201 			return ret;
202 	}
203 
204 	return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
205 					     sizeof(info));
206 }
207 
UVERBS_HANDLER(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)208 static int UVERBS_HANDLER(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)(
209 	struct uverbs_attr_bundle *attrs)
210 {
211 	struct mlx5_data_direct_dev *data_direct_dev;
212 	struct mlx5_ib_ucontext *c;
213 	struct mlx5_ib_dev *dev;
214 	int out_len = uverbs_attr_get_len(attrs,
215 			MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH);
216 	u32 dev_path_len;
217 	char *dev_path;
218 	int ret;
219 
220 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
221 	if (IS_ERR(c))
222 		return PTR_ERR(c);
223 	dev = to_mdev(c->ibucontext.device);
224 	mutex_lock(&dev->data_direct_lock);
225 	data_direct_dev = dev->data_direct_dev;
226 	if (!data_direct_dev) {
227 		ret = -ENODEV;
228 		goto end;
229 	}
230 
231 	dev_path = kobject_get_path(&data_direct_dev->device->kobj, GFP_KERNEL);
232 	if (!dev_path) {
233 		ret = -ENOMEM;
234 		goto end;
235 	}
236 
237 	dev_path_len = strlen(dev_path) + 1;
238 	if (dev_path_len > out_len) {
239 		ret = -ENOSPC;
240 		goto end;
241 	}
242 
243 	ret = uverbs_copy_to(attrs, MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH, dev_path,
244 			     dev_path_len);
245 	kfree(dev_path);
246 
247 end:
248 	mutex_unlock(&dev->data_direct_lock);
249 	return ret;
250 }
251 
252 DECLARE_UVERBS_NAMED_METHOD(
253 	MLX5_IB_METHOD_QUERY_PORT,
254 	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
255 			   UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
256 	UVERBS_ATTR_PTR_OUT(
257 		MLX5_IB_ATTR_QUERY_PORT,
258 		UVERBS_ATTR_STRUCT(struct mlx5_ib_uapi_query_port,
259 				   reg_c0),
260 		UA_MANDATORY));
261 
262 DECLARE_UVERBS_NAMED_METHOD(
263 	MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
264 	UVERBS_ATTR_PTR_OUT(
265 		MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH,
266 		UVERBS_ATTR_MIN_SIZE(0),
267 		UA_MANDATORY));
268 
269 ADD_UVERBS_METHODS(mlx5_ib_device,
270 		   UVERBS_OBJECT_DEVICE,
271 		   &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT),
272 		   &UVERBS_METHOD(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH));
273 
274 DECLARE_UVERBS_NAMED_METHOD(
275 	MLX5_IB_METHOD_PD_QUERY,
276 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE,
277 			UVERBS_OBJECT_PD,
278 			UVERBS_ACCESS_READ,
279 			UA_MANDATORY),
280 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
281 			   UVERBS_ATTR_TYPE(u32),
282 			   UA_MANDATORY));
283 
284 ADD_UVERBS_METHODS(mlx5_ib_pd,
285 		   UVERBS_OBJECT_PD,
286 		   &UVERBS_METHOD(MLX5_IB_METHOD_PD_QUERY));
287 
288 const struct uapi_definition mlx5_ib_std_types_defs[] = {
289 	UAPI_DEF_CHAIN_OBJ_TREE(
290 		UVERBS_OBJECT_PD,
291 		&mlx5_ib_pd),
292 	UAPI_DEF_CHAIN_OBJ_TREE(
293 		UVERBS_OBJECT_DEVICE,
294 		&mlx5_ib_device),
295 	{},
296 };
297