1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /*
3 * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
4 */
5 #include <linux/fwctl.h>
6 #include <linux/auxiliary_bus.h>
7 #include <linux/mlx5/device.h>
8 #include <linux/mlx5/driver.h>
9 #include <uapi/fwctl/mlx5.h>
10
11 #define mlx5ctl_err(mcdev, format, ...) \
12 dev_err(&mcdev->fwctl.dev, format, ##__VA_ARGS__)
13
14 #define mlx5ctl_dbg(mcdev, format, ...) \
15 dev_dbg(&mcdev->fwctl.dev, "PID %u: " format, current->pid, \
16 ##__VA_ARGS__)
17
18 struct mlx5ctl_uctx {
19 struct fwctl_uctx uctx;
20 u32 uctx_caps;
21 u32 uctx_uid;
22 };
23
24 struct mlx5ctl_dev {
25 struct fwctl_device fwctl;
26 struct mlx5_core_dev *mdev;
27 };
28 DEFINE_FREE(mlx5ctl, struct mlx5ctl_dev *, if (_T) fwctl_put(&_T->fwctl));
29
30 struct mlx5_ifc_mbox_in_hdr_bits {
31 u8 opcode[0x10];
32 u8 uid[0x10];
33
34 u8 reserved_at_20[0x10];
35 u8 op_mod[0x10];
36
37 u8 reserved_at_40[0x40];
38 };
39
40 struct mlx5_ifc_mbox_out_hdr_bits {
41 u8 status[0x8];
42 u8 reserved_at_8[0x18];
43
44 u8 syndrome[0x20];
45
46 u8 reserved_at_40[0x40];
47 };
48
49 enum {
50 MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES = 0x4,
51 };
52
53 enum {
54 MLX5_CMD_OP_QUERY_DRIVER_VERSION = 0x10c,
55 MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e,
56 MLX5_CMD_OP_QUERY_RDB = 0x512,
57 MLX5_CMD_OP_QUERY_PSV = 0x602,
58 MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
59 MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
60 MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
61 MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
62 MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
63 MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
64 MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS = 0x821,
65 MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911,
66 MLX5_CMD_OP_QUERY_AFU = 0x971,
67 MLX5_CMD_OP_QUERY_CAPI_PEC = 0x981,
68 MLX5_CMD_OP_QUERY_UCTX = 0xa05,
69 MLX5_CMD_OP_QUERY_UMEM = 0xa09,
70 MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE = 0xb02,
71 MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO = 0xb03,
72 MLX5_CMD_OP_QUERY_REGEXP_PARAMS = 0xb05,
73 MLX5_CMD_OP_QUERY_REGEXP_REGISTER = 0xb07,
74 MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY = 0xb08,
75 MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS = 0xb0a,
76 MLX5_CMD_OP_ACCESS_REGISTER_USER = 0xb0c,
77 MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING = 0xb0f,
78 MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO = 0xb13,
79 MLX5_CMD_OP_QUERY_CRYPTO_STATE = 0xb14,
80 MLX5_CMD_OP_QUERY_VUID = 0xb22,
81 MLX5_CMD_OP_QUERY_DPA_PARTITION = 0xb28,
82 MLX5_CMD_OP_QUERY_DPA_PARTITIONS = 0xb2a,
83 MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT = 0xb2e,
84 MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO = 0xb2f,
85 MLX5_CMD_OP_QUERY_RSV_RESOURCES = 0x8000,
86 MLX5_CMD_OP_QUERY_MTT = 0x8001,
87 MLX5_CMD_OP_QUERY_SCHED_QUEUE = 0x8006,
88 };
89
mlx5ctl_alloc_uid(struct mlx5ctl_dev * mcdev,u32 cap)90 static int mlx5ctl_alloc_uid(struct mlx5ctl_dev *mcdev, u32 cap)
91 {
92 u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
93 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
94 void *uctx;
95 int ret;
96 u16 uid;
97
98 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
99
100 mlx5ctl_dbg(mcdev, "%s: caps 0x%x\n", __func__, cap);
101 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
102 MLX5_SET(uctx, uctx, cap, cap);
103
104 ret = mlx5_cmd_exec(mcdev->mdev, in, sizeof(in), out, sizeof(out));
105 if (ret)
106 return ret;
107
108 uid = MLX5_GET(create_uctx_out, out, uid);
109 mlx5ctl_dbg(mcdev, "allocated uid %u with caps 0x%x\n", uid, cap);
110 return uid;
111 }
112
mlx5ctl_release_uid(struct mlx5ctl_dev * mcdev,u16 uid)113 static void mlx5ctl_release_uid(struct mlx5ctl_dev *mcdev, u16 uid)
114 {
115 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
116 struct mlx5_core_dev *mdev = mcdev->mdev;
117 int ret;
118
119 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
120 MLX5_SET(destroy_uctx_in, in, uid, uid);
121
122 ret = mlx5_cmd_exec_in(mdev, destroy_uctx, in);
123 mlx5ctl_dbg(mcdev, "released uid %u %pe\n", uid, ERR_PTR(ret));
124 }
125
mlx5ctl_open_uctx(struct fwctl_uctx * uctx)126 static int mlx5ctl_open_uctx(struct fwctl_uctx *uctx)
127 {
128 struct mlx5ctl_uctx *mfd =
129 container_of(uctx, struct mlx5ctl_uctx, uctx);
130 struct mlx5ctl_dev *mcdev =
131 container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
132 int uid;
133
134 /*
135 * New FW supports the TOOLS_RESOURCES uid security label
136 * which allows commands to manipulate the global device state.
137 * Otherwise only basic existing RDMA devx privilege are allowed.
138 */
139 if (MLX5_CAP_GEN(mcdev->mdev, uctx_cap) &
140 MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES)
141 mfd->uctx_caps |= MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES;
142
143 uid = mlx5ctl_alloc_uid(mcdev, mfd->uctx_caps);
144 if (uid < 0)
145 return uid;
146
147 mfd->uctx_uid = uid;
148 return 0;
149 }
150
mlx5ctl_close_uctx(struct fwctl_uctx * uctx)151 static void mlx5ctl_close_uctx(struct fwctl_uctx *uctx)
152 {
153 struct mlx5ctl_dev *mcdev =
154 container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
155 struct mlx5ctl_uctx *mfd =
156 container_of(uctx, struct mlx5ctl_uctx, uctx);
157
158 mlx5ctl_release_uid(mcdev, mfd->uctx_uid);
159 }
160
mlx5ctl_info(struct fwctl_uctx * uctx,size_t * length)161 static void *mlx5ctl_info(struct fwctl_uctx *uctx, size_t *length)
162 {
163 struct mlx5ctl_uctx *mfd =
164 container_of(uctx, struct mlx5ctl_uctx, uctx);
165 struct fwctl_info_mlx5 *info;
166
167 info = kzalloc(sizeof(*info), GFP_KERNEL);
168 if (!info)
169 return ERR_PTR(-ENOMEM);
170
171 info->uid = mfd->uctx_uid;
172 info->uctx_caps = mfd->uctx_caps;
173 *length = sizeof(*info);
174 return info;
175 }
176
mlx5ctl_validate_rpc(const void * in,enum fwctl_rpc_scope scope)177 static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
178 {
179 u16 opcode = MLX5_GET(mbox_in_hdr, in, opcode);
180 u16 op_mod = MLX5_GET(mbox_in_hdr, in, op_mod);
181
182 /*
183 * Currently the driver can't keep track of commands that allocate
184 * objects in the FW, these commands are safe from a security
185 * perspective but nothing will free the memory when the FD is closed.
186 * For now permit only query commands and set commands that don't alter
187 * objects. Also the caps for the scope have not been defined yet,
188 * filter commands manually for now.
189 */
190 switch (opcode) {
191 case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
192 case MLX5_CMD_OP_QUERY_ADAPTER:
193 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
194 case MLX5_CMD_OP_QUERY_HCA_CAP:
195 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
196 case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
197 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
198 case MLX5_CMD_OPCODE_QUERY_VUID:
199 /*
200 * FW limits SET_HCA_CAP on the tools UID to only the other function
201 * mode which is used for function pre-configuration
202 */
203 case MLX5_CMD_OP_SET_HCA_CAP:
204 return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
205
206 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
207 case MLX5_CMD_OP_FPGA_QUERY_QP:
208 case MLX5_CMD_OP_NOP:
209 case MLX5_CMD_OP_QUERY_AFU:
210 case MLX5_CMD_OP_QUERY_BURST_SIZE:
211 case MLX5_CMD_OP_QUERY_CAPI_PEC:
212 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
213 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
214 case MLX5_CMD_OP_QUERY_CONG_STATUS:
215 case MLX5_CMD_OP_QUERY_CQ:
216 case MLX5_CMD_OP_QUERY_CRYPTO_STATE:
217 case MLX5_CMD_OP_QUERY_DC_CNAK_TRACE:
218 case MLX5_CMD_OP_QUERY_DCT:
219 case MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS:
220 case MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS:
221 case MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS:
222 case MLX5_CMD_OP_QUERY_DPA_PARTITION:
223 case MLX5_CMD_OP_QUERY_DPA_PARTITIONS:
224 case MLX5_CMD_OP_QUERY_DRIVER_VERSION:
225 case MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO:
226 case MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO:
227 case MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING:
228 case MLX5_CMD_OP_QUERY_EQ:
229 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
230 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
231 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
232 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
233 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
234 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
235 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
236 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
237 case MLX5_CMD_OP_QUERY_ISSI:
238 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
239 case MLX5_CMD_OP_QUERY_LAG:
240 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
241 case MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO:
242 case MLX5_CMD_OP_QUERY_MKEY:
243 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
244 case MLX5_CMD_OP_QUERY_MTT:
245 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
246 case MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER:
247 case MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE:
248 case MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT:
249 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
250 case MLX5_CMD_OP_QUERY_PAGES:
251 case MLX5_CMD_OP_QUERY_PSV:
252 case MLX5_CMD_OP_QUERY_Q_COUNTER:
253 case MLX5_CMD_OP_QUERY_QP:
254 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
255 case MLX5_CMD_OP_QUERY_RDB:
256 case MLX5_CMD_OP_QUERY_REGEXP_PARAMS:
257 case MLX5_CMD_OP_QUERY_REGEXP_REGISTER:
258 case MLX5_CMD_OP_QUERY_RMP:
259 case MLX5_CMD_OP_QUERY_RQ:
260 case MLX5_CMD_OP_QUERY_RQT:
261 case MLX5_CMD_OP_QUERY_RSV_RESOURCES:
262 case MLX5_CMD_OP_QUERY_SCHED_QUEUE:
263 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
264 case MLX5_CMD_OP_QUERY_SF_PARTITION:
265 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
266 case MLX5_CMD_OP_QUERY_SQ:
267 case MLX5_CMD_OP_QUERY_SRQ:
268 case MLX5_CMD_OP_QUERY_TIR:
269 case MLX5_CMD_OP_QUERY_TIS:
270 case MLX5_CMD_OP_QUERY_UCTX:
271 case MLX5_CMD_OP_QUERY_UMEM:
272 case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
273 case MLX5_CMD_OP_QUERY_VHCA_STATE:
274 case MLX5_CMD_OP_QUERY_VNIC_ENV:
275 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
276 case MLX5_CMD_OP_QUERY_VPORT_STATE:
277 case MLX5_CMD_OP_QUERY_WOL_ROL:
278 case MLX5_CMD_OP_QUERY_XRC_SRQ:
279 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
280 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
281 case MLX5_CMD_OP_QUERY_XRQ:
282 case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
283 case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
284 return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
285
286 case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
287 return scope >= FWCTL_RPC_DEBUG_WRITE;
288
289 case MLX5_CMD_OP_ACCESS_REG:
290 case MLX5_CMD_OP_ACCESS_REGISTER_USER:
291 if (op_mod == 0) /* write */
292 return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
293 return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
294 default:
295 return false;
296 }
297 }
298
mlx5ctl_fw_rpc(struct fwctl_uctx * uctx,enum fwctl_rpc_scope scope,void * rpc_in,size_t in_len,size_t * out_len)299 static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
300 void *rpc_in, size_t in_len, size_t *out_len)
301 {
302 struct mlx5ctl_dev *mcdev =
303 container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
304 struct mlx5ctl_uctx *mfd =
305 container_of(uctx, struct mlx5ctl_uctx, uctx);
306 void *rpc_out;
307 int ret;
308
309 if (in_len < MLX5_ST_SZ_BYTES(mbox_in_hdr) ||
310 *out_len < MLX5_ST_SZ_BYTES(mbox_out_hdr))
311 return ERR_PTR(-EMSGSIZE);
312
313 mlx5ctl_dbg(mcdev, "[UID %d] cmdif: opcode 0x%x inlen %zu outlen %zu\n",
314 mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
315 in_len, *out_len);
316
317 if (!mlx5ctl_validate_rpc(rpc_in, scope))
318 return ERR_PTR(-EBADMSG);
319
320 /*
321 * mlx5_cmd_do() copies the input message to its own buffer before
322 * executing it, so we can reuse the allocation for the output.
323 */
324 if (*out_len <= in_len) {
325 rpc_out = rpc_in;
326 } else {
327 rpc_out = kvzalloc(*out_len, GFP_KERNEL);
328 if (!rpc_out)
329 return ERR_PTR(-ENOMEM);
330 }
331
332 /* Enforce the user context for the command */
333 MLX5_SET(mbox_in_hdr, rpc_in, uid, mfd->uctx_uid);
334 ret = mlx5_cmd_do(mcdev->mdev, rpc_in, in_len, rpc_out, *out_len);
335
336 mlx5ctl_dbg(mcdev,
337 "[UID %d] cmdif: opcode 0x%x status 0x%x retval %pe\n",
338 mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
339 MLX5_GET(mbox_out_hdr, rpc_out, status), ERR_PTR(ret));
340
341 /*
342 * -EREMOTEIO means execution succeeded and the out is valid,
343 * but an error code was returned inside out. Everything else
344 * means the RPC did not make it to the device.
345 */
346 if (ret && ret != -EREMOTEIO) {
347 if (rpc_out != rpc_in)
348 kfree(rpc_out);
349 return ERR_PTR(ret);
350 }
351 return rpc_out;
352 }
353
354 static const struct fwctl_ops mlx5ctl_ops = {
355 .device_type = FWCTL_DEVICE_TYPE_MLX5,
356 .uctx_size = sizeof(struct mlx5ctl_uctx),
357 .open_uctx = mlx5ctl_open_uctx,
358 .close_uctx = mlx5ctl_close_uctx,
359 .info = mlx5ctl_info,
360 .fw_rpc = mlx5ctl_fw_rpc,
361 };
362
mlx5ctl_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)363 static int mlx5ctl_probe(struct auxiliary_device *adev,
364 const struct auxiliary_device_id *id)
365
366 {
367 struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
368 struct mlx5_core_dev *mdev = madev->mdev;
369 struct mlx5ctl_dev *mcdev __free(mlx5ctl) = fwctl_alloc_device(
370 &mdev->pdev->dev, &mlx5ctl_ops, struct mlx5ctl_dev, fwctl);
371 int ret;
372
373 if (!mcdev)
374 return -ENOMEM;
375
376 mcdev->mdev = mdev;
377
378 ret = fwctl_register(&mcdev->fwctl);
379 if (ret)
380 return ret;
381 auxiliary_set_drvdata(adev, no_free_ptr(mcdev));
382 return 0;
383 }
384
mlx5ctl_remove(struct auxiliary_device * adev)385 static void mlx5ctl_remove(struct auxiliary_device *adev)
386 {
387 struct mlx5ctl_dev *mcdev = auxiliary_get_drvdata(adev);
388
389 fwctl_unregister(&mcdev->fwctl);
390 fwctl_put(&mcdev->fwctl);
391 }
392
393 static const struct auxiliary_device_id mlx5ctl_id_table[] = {
394 {.name = MLX5_ADEV_NAME ".fwctl",},
395 {}
396 };
397 MODULE_DEVICE_TABLE(auxiliary, mlx5ctl_id_table);
398
399 static struct auxiliary_driver mlx5ctl_driver = {
400 .name = "mlx5_fwctl",
401 .probe = mlx5ctl_probe,
402 .remove = mlx5ctl_remove,
403 .id_table = mlx5ctl_id_table,
404 };
405
406 module_auxiliary_driver(mlx5ctl_driver);
407
408 MODULE_IMPORT_NS("FWCTL");
409 MODULE_DESCRIPTION("mlx5 ConnectX fwctl driver");
410 MODULE_AUTHOR("Saeed Mahameed <saeedm@nvidia.com>");
411 MODULE_LICENSE("Dual BSD/GPL");
412