xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "nv_param.h"
5 #include "mlx5_core.h"
6 
7 enum {
8 	MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF               = 0x80,
9 	MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP                = 0x81,
10 	MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG             = 0x10a,
11 	MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP                = 0x10b,
12 	MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF            = 0x11d,
13 
14 	MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF                   = 0x80,
15 };
16 
17 struct mlx5_ifc_configuration_item_type_class_global_bits {
18 	u8         type_class[0x8];
19 	u8         parameter_index[0x18];
20 };
21 
22 struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits {
23 	u8         type_class[0x8];
24 	u8         pf_index[0x6];
25 	u8         pci_bus_index[0x8];
26 	u8         parameter_index[0xa];
27 };
28 
29 union mlx5_ifc_config_item_type_auto_bits {
30 	struct mlx5_ifc_configuration_item_type_class_global_bits
31 				configuration_item_type_class_global;
32 	struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits
33 				configuration_item_type_class_per_host_pf;
34 	u8 reserved_at_0[0x20];
35 };
36 
37 enum {
38 	MLX5_ACCESS_MODE_NEXT = 0,
39 	MLX5_ACCESS_MODE_CURRENT,
40 	MLX5_ACCESS_MODE_DEFAULT,
41 };
42 
43 struct mlx5_ifc_config_item_bits {
44 	u8         valid[0x2];
45 	u8         priority[0x2];
46 	u8         header_type[0x2];
47 	u8         ovr_en[0x1];
48 	u8         rd_en[0x1];
49 	u8         access_mode[0x2];
50 	u8         reserved_at_a[0x1];
51 	u8         writer_id[0x5];
52 	u8         version[0x4];
53 	u8         reserved_at_14[0x2];
54 	u8         host_id_valid[0x1];
55 	u8         length[0x9];
56 
57 	union mlx5_ifc_config_item_type_auto_bits type;
58 
59 	u8         reserved_at_40[0x10];
60 	u8         crc16[0x10];
61 };
62 
63 struct mlx5_ifc_mnvda_reg_bits {
64 	struct mlx5_ifc_config_item_bits configuration_item_header;
65 
66 	u8         configuration_item_data[64][0x20];
67 };
68 
69 struct mlx5_ifc_nv_global_pci_conf_bits {
70 	u8         sriov_valid[0x1];
71 	u8         reserved_at_1[0x10];
72 	u8         per_pf_total_vf[0x1];
73 	u8         reserved_at_12[0xe];
74 
75 	u8         sriov_en[0x1];
76 	u8         reserved_at_21[0xf];
77 	u8         total_vfs[0x10];
78 
79 	u8         reserved_at_40[0x20];
80 };
81 
82 struct mlx5_ifc_nv_global_pci_cap_bits {
83 	u8         max_vfs_per_pf_valid[0x1];
84 	u8         reserved_at_1[0x13];
85 	u8         per_pf_total_vf_supported[0x1];
86 	u8         reserved_at_15[0xb];
87 
88 	u8         sriov_support[0x1];
89 	u8         reserved_at_21[0xf];
90 	u8         max_vfs_per_pf[0x10];
91 
92 	u8         reserved_at_40[0x60];
93 };
94 
95 struct mlx5_ifc_nv_pf_pci_conf_bits {
96 	u8         reserved_at_0[0x9];
97 	u8         pf_total_vf_en[0x1];
98 	u8         reserved_at_a[0x16];
99 
100 	u8         reserved_at_20[0x20];
101 
102 	u8         reserved_at_40[0x10];
103 	u8         total_vf[0x10];
104 
105 	u8         reserved_at_60[0x20];
106 };
107 
108 struct mlx5_ifc_nv_sw_offload_conf_bits {
109 	u8         ip_over_vxlan_port[0x10];
110 	u8         tunnel_ecn_copy_offload_disable[0x1];
111 	u8         pci_atomic_mode[0x3];
112 	u8         sr_enable[0x1];
113 	u8         ptp_cyc2realtime[0x1];
114 	u8         vector_calc_disable[0x1];
115 	u8         uctx_en[0x1];
116 	u8         prio_tag_required_en[0x1];
117 	u8         esw_fdb_ipv4_ttl_modify_enable[0x1];
118 	u8         mkey_by_name[0x1];
119 	u8         ip_over_vxlan_en[0x1];
120 	u8         one_qp_per_recovery[0x1];
121 	u8         cqe_compression[0x3];
122 	u8         tunnel_udp_entropy_proto_disable[0x1];
123 	u8         reserved_at_21[0x1];
124 	u8         ar_enable[0x1];
125 	u8         log_max_outstanding_wqe[0x5];
126 	u8         vf_migration[0x2];
127 	u8         log_tx_psn_win[0x6];
128 	u8         lro_log_timeout3[0x4];
129 	u8         lro_log_timeout2[0x4];
130 	u8         lro_log_timeout1[0x4];
131 	u8         lro_log_timeout0[0x4];
132 };
133 
134 struct mlx5_ifc_nv_sw_offload_cap_bits {
135 	u8         reserved_at_0[0x19];
136 	u8         swp_l4_csum_mode_l4_only[0x1];
137 	u8         reserved_at_1a[0x6];
138 };
139 
140 struct mlx5_ifc_nv_sw_accelerate_conf_bits {
141 	u8         swp_l4_csum_mode[0x2];
142 	u8         reserved_at_2[0x3e];
143 };
144 
145 #define MNVDA_HDR_SZ \
146 	(MLX5_ST_SZ_BYTES(mnvda_reg) - \
147 	 MLX5_BYTE_OFF(mnvda_reg, configuration_item_data))
148 
149 #define MLX5_SET_CFG_ITEM_TYPE(_cls_name, _mnvda_ptr, _field, _val) \
150 	MLX5_SET(mnvda_reg, _mnvda_ptr, \
151 		 configuration_item_header.type.configuration_item_type_class_##_cls_name._field, \
152 		 _val)
153 
154 #define MLX5_SET_CFG_HDR_LEN(_mnvda_ptr, _cls_name) \
155 	MLX5_SET(mnvda_reg, _mnvda_ptr, configuration_item_header.length, \
156 		 MLX5_ST_SZ_BYTES(_cls_name))
157 
158 #define MLX5_GET_CFG_HDR_LEN(_mnvda_ptr) \
159 	MLX5_GET(mnvda_reg, _mnvda_ptr, configuration_item_header.length)
160 
mlx5_nv_param_read(struct mlx5_core_dev * dev,void * mnvda,size_t len)161 static int mlx5_nv_param_read(struct mlx5_core_dev *dev, void *mnvda,
162 			      size_t len)
163 {
164 	u32 param_idx, type_class;
165 	u32 header_len;
166 	void *cls_ptr;
167 	int err;
168 
169 	if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
170 		return -EINVAL; /* A caller bug */
171 
172 	err = mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
173 				   0, 0);
174 	if (!err)
175 		return 0;
176 
177 	cls_ptr = MLX5_ADDR_OF(mnvda_reg, mnvda,
178 			       configuration_item_header.type.configuration_item_type_class_global);
179 
180 	type_class = MLX5_GET(configuration_item_type_class_global, cls_ptr,
181 			      type_class);
182 	param_idx = MLX5_GET(configuration_item_type_class_global, cls_ptr,
183 			     parameter_index);
184 	header_len = MLX5_GET_CFG_HDR_LEN(mnvda);
185 
186 	mlx5_core_warn(dev, "Failed to read mnvda reg: type_class 0x%x, param_idx 0x%x, header_len %u, err %d\n",
187 		       type_class, param_idx, header_len, err);
188 
189 	return -EOPNOTSUPP;
190 }
191 
mlx5_nv_param_write(struct mlx5_core_dev * dev,void * mnvda,size_t len)192 static int mlx5_nv_param_write(struct mlx5_core_dev *dev, void *mnvda,
193 			       size_t len)
194 {
195 	if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
196 		return -EINVAL;
197 
198 	if (WARN_ON(MLX5_GET_CFG_HDR_LEN(mnvda) == 0))
199 		return -EINVAL;
200 
201 	return mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
202 				    0, 1);
203 }
204 
205 static int
mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev * dev,void * mnvda,size_t len)206 mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda,
207 				   size_t len)
208 {
209 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
210 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
211 			       MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG);
212 	MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_conf);
213 
214 	return mlx5_nv_param_read(dev, mnvda, len);
215 }
216 
217 static int
mlx5_nv_param_read_sw_offload_cap(struct mlx5_core_dev * dev,void * mnvda,size_t len)218 mlx5_nv_param_read_sw_offload_cap(struct mlx5_core_dev *dev, void *mnvda,
219 				  size_t len)
220 {
221 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
222 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
223 			       MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP);
224 	MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_cap);
225 
226 	return mlx5_nv_param_read(dev, mnvda, len);
227 }
228 
229 static int
mlx5_nv_param_read_sw_accelerate_conf(struct mlx5_core_dev * dev,void * mnvda,size_t len,int access_mode)230 mlx5_nv_param_read_sw_accelerate_conf(struct mlx5_core_dev *dev, void *mnvda,
231 				      size_t len, int access_mode)
232 {
233 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
234 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
235 			       MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF);
236 	MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_accelerate_conf);
237 	MLX5_SET(mnvda_reg, mnvda, configuration_item_header.access_mode,
238 		 access_mode);
239 
240 	return mlx5_nv_param_read(dev, mnvda, len);
241 }
242 
243 static const char *const
244 	cqe_compress_str[] = { "balanced", "aggressive" };
245 
246 static int
mlx5_nv_param_devlink_cqe_compress_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)247 mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id,
248 				       struct devlink_param_gset_ctx *ctx,
249 				       struct netlink_ext_ack *extack)
250 {
251 	struct mlx5_core_dev *dev = devlink_priv(devlink);
252 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
253 	u8 value = U8_MAX;
254 	void *data;
255 	int err;
256 
257 	err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
258 	if (err)
259 		return err;
260 
261 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
262 	value = MLX5_GET(nv_sw_offload_conf, data, cqe_compression);
263 
264 	if (value >= ARRAY_SIZE(cqe_compress_str))
265 		return -EOPNOTSUPP;
266 
267 	strscpy(ctx->val.vstr, cqe_compress_str[value], sizeof(ctx->val.vstr));
268 	return 0;
269 }
270 
271 static int
mlx5_nv_param_devlink_cqe_compress_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)272 mlx5_nv_param_devlink_cqe_compress_validate(struct devlink *devlink, u32 id,
273 					    union devlink_param_value val,
274 					    struct netlink_ext_ack *extack)
275 {
276 	int i;
277 
278 	for (i = 0; i < ARRAY_SIZE(cqe_compress_str); i++) {
279 		if (!strcmp(val.vstr, cqe_compress_str[i]))
280 			return 0;
281 	}
282 
283 	NL_SET_ERR_MSG_MOD(extack,
284 			   "Invalid value, supported values are balanced/aggressive");
285 	return -EOPNOTSUPP;
286 }
287 
288 static int
mlx5_nv_param_devlink_cqe_compress_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)289 mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id,
290 				       struct devlink_param_gset_ctx *ctx,
291 				       struct netlink_ext_ack *extack)
292 {
293 	struct mlx5_core_dev *dev = devlink_priv(devlink);
294 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
295 	int err = 0;
296 	void *data;
297 	u8 value;
298 
299 	if (!strcmp(ctx->val.vstr, "aggressive"))
300 		value = 1;
301 	else /* balanced: can't be anything else already validated above */
302 		value = 0;
303 
304 	err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
305 	if (err) {
306 		NL_SET_ERR_MSG_MOD(extack,
307 				   "Failed to read sw_offload_conf mnvda reg");
308 		return err;
309 	}
310 
311 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
312 	MLX5_SET(nv_sw_offload_conf, data, cqe_compression, value);
313 
314 	return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
315 }
316 
317 enum swp_l4_csum_mode {
318 	SWP_L4_CSUM_MODE_DEFAULT = 0,
319 	SWP_L4_CSUM_MODE_FULL_CSUM = 1,
320 	SWP_L4_CSUM_MODE_L4_ONLY = 2,
321 };
322 
323 static const char *const
324 	swp_l4_csum_mode_str[] = { "default", "full_csum", "l4_only" };
325 
326 static int
mlx5_swp_l4_csum_mode_get(struct devlink * devlink,u32 id,int access_mode,u8 * value,struct netlink_ext_ack * extack)327 mlx5_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
328 			  int access_mode, u8 *value,
329 			  struct netlink_ext_ack *extack)
330 {
331 	struct mlx5_core_dev *dev = devlink_priv(devlink);
332 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
333 	void *data;
334 	int err;
335 
336 	err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
337 						    access_mode);
338 	if (err) {
339 		NL_SET_ERR_MSG_MOD(extack,
340 				   "Failed to read sw_accelerate_conf mnvda reg");
341 		return err;
342 	}
343 
344 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
345 	*value = MLX5_GET(nv_sw_accelerate_conf, data, swp_l4_csum_mode);
346 
347 	if (*value >= ARRAY_SIZE(swp_l4_csum_mode_str)) {
348 		NL_SET_ERR_MSG_FMT_MOD(extack,
349 				       "Invalid swp_l4_csum_mode value %u read from device",
350 				       *value);
351 		return -EINVAL;
352 	}
353 
354 	return 0;
355 }
356 
357 static int
mlx5_devlink_swp_l4_csum_mode_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)358 mlx5_devlink_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
359 				  struct devlink_param_gset_ctx *ctx,
360 				  struct netlink_ext_ack *extack)
361 {
362 	u8 value;
363 	int err;
364 
365 	err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_NEXT,
366 					&value, extack);
367 	if (err)
368 		return err;
369 
370 	strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
371 		sizeof(ctx->val.vstr));
372 	return 0;
373 }
374 
375 static int
mlx5_devlink_swp_l4_csum_mode_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)376 mlx5_devlink_swp_l4_csum_mode_validate(struct devlink *devlink, u32 id,
377 				       union devlink_param_value val,
378 				       struct netlink_ext_ack *extack)
379 {
380 	struct mlx5_core_dev *dev = devlink_priv(devlink);
381 	u32 cap[MLX5_ST_SZ_DW(mnvda_reg)] = {};
382 	void *data;
383 	int err, i;
384 
385 	for (i = 0; i < ARRAY_SIZE(swp_l4_csum_mode_str); i++) {
386 		if (!strcmp(val.vstr, swp_l4_csum_mode_str[i]))
387 			break;
388 	}
389 
390 	if (i >= ARRAY_SIZE(swp_l4_csum_mode_str) ||
391 	    i == SWP_L4_CSUM_MODE_DEFAULT) {
392 		NL_SET_ERR_MSG_MOD(extack,
393 				   "Invalid value, supported values are full_csum/l4_only");
394 		return -EINVAL;
395 	}
396 
397 	if (i == SWP_L4_CSUM_MODE_L4_ONLY) {
398 		err = mlx5_nv_param_read_sw_offload_cap(dev, cap, sizeof(cap));
399 		if (err) {
400 			NL_SET_ERR_MSG_MOD(extack,
401 					   "Failed to read sw_offload_cap");
402 			return err;
403 		}
404 
405 		data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
406 		if (!MLX5_GET(nv_sw_offload_cap, data, swp_l4_csum_mode_l4_only)) {
407 			NL_SET_ERR_MSG_MOD(extack,
408 					   "l4_only mode is not supported on this device");
409 			return -EOPNOTSUPP;
410 		}
411 	}
412 
413 	return 0;
414 }
415 
416 static int
mlx5_swp_l4_csum_mode_set(struct devlink * devlink,u32 id,u8 value,struct netlink_ext_ack * extack)417 mlx5_swp_l4_csum_mode_set(struct devlink *devlink, u32 id, u8 value,
418 			  struct netlink_ext_ack *extack)
419 {
420 	struct mlx5_core_dev *dev = devlink_priv(devlink);
421 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
422 	void *data;
423 	int err;
424 
425 	err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
426 						    MLX5_ACCESS_MODE_NEXT);
427 	if (err) {
428 		NL_SET_ERR_MSG_MOD(extack,
429 				   "Failed to read sw_accelerate_conf mnvda reg");
430 		return err;
431 	}
432 
433 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
434 	MLX5_SET(nv_sw_accelerate_conf, data, swp_l4_csum_mode, value);
435 
436 	err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
437 	if (err)
438 		NL_SET_ERR_MSG_MOD(extack,
439 				   "Failed to write sw_accelerate_conf mnvda reg");
440 
441 	return err;
442 }
443 
444 static int
mlx5_devlink_swp_l4_csum_mode_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)445 mlx5_devlink_swp_l4_csum_mode_set(struct devlink *devlink, u32 id,
446 				  struct devlink_param_gset_ctx *ctx,
447 				  struct netlink_ext_ack *extack)
448 {
449 	u8 value;
450 
451 	if (!strcmp(ctx->val.vstr, "full_csum"))
452 		value = SWP_L4_CSUM_MODE_FULL_CSUM;
453 	else
454 		value = SWP_L4_CSUM_MODE_L4_ONLY;
455 
456 	return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
457 }
458 
459 static int
mlx5_devlink_swp_l4_csum_mode_get_default(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)460 mlx5_devlink_swp_l4_csum_mode_get_default(struct devlink *devlink, u32 id,
461 					  struct devlink_param_gset_ctx *ctx,
462 					  struct netlink_ext_ack *extack)
463 {
464 	u8 value;
465 	int err;
466 
467 	err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
468 					&value, extack);
469 	if (err)
470 		return err;
471 
472 	strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
473 		sizeof(ctx->val.vstr));
474 	return 0;
475 }
476 
477 static int
mlx5_devlink_swp_l4_csum_mode_set_default(struct devlink * devlink,u32 id,enum devlink_param_cmode cmode,struct netlink_ext_ack * extack)478 mlx5_devlink_swp_l4_csum_mode_set_default(struct devlink *devlink, u32 id,
479 					  enum devlink_param_cmode cmode,
480 					  struct netlink_ext_ack *extack)
481 {
482 	u8 value;
483 	int err;
484 
485 	err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
486 					&value, extack);
487 	if (err)
488 		return err;
489 
490 	return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
491 }
492 
mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev * dev,void * mnvda,size_t len)493 static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev,
494 					      void *mnvda, size_t len)
495 {
496 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
497 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
498 			       MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF);
499 	MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_conf);
500 
501 	return mlx5_nv_param_read(dev, mnvda, len);
502 }
503 
mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev * dev,void * mnvda,size_t len)504 static int mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev,
505 					     void *mnvda, size_t len)
506 {
507 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
508 	MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
509 			       MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP);
510 	MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_cap);
511 
512 	return mlx5_nv_param_read(dev, mnvda, len);
513 }
514 
mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev * dev,void * mnvda,size_t len)515 static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev,
516 					       void *mnvda, size_t len)
517 {
518 	MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3);
519 	MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, parameter_index,
520 			       MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF);
521 	MLX5_SET_CFG_HDR_LEN(mnvda, nv_pf_pci_conf);
522 
523 	return mlx5_nv_param_read(dev, mnvda, len);
524 }
525 
mlx5_devlink_enable_sriov_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)526 static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
527 					 struct devlink_param_gset_ctx *ctx,
528 					 struct netlink_ext_ack *extack)
529 {
530 	struct mlx5_core_dev *dev = devlink_priv(devlink);
531 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
532 	bool sriov_en = false;
533 	void *data;
534 	int err;
535 
536 	err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
537 	if (err)
538 		return err;
539 
540 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
541 	if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
542 		ctx->val.vbool = false;
543 		return 0;
544 	}
545 
546 	memset(mnvda, 0, sizeof(mnvda));
547 	err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
548 	if (err)
549 		return err;
550 
551 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
552 	sriov_en = MLX5_GET(nv_global_pci_conf, data, sriov_en);
553 	if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
554 		ctx->val.vbool = sriov_en;
555 		return 0;
556 	}
557 
558 	/* SRIOV is per PF */
559 	memset(mnvda, 0, sizeof(mnvda));
560 	err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
561 	if (err)
562 		return err;
563 
564 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
565 	ctx->val.vbool = sriov_en &&
566 			 MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en);
567 	return 0;
568 }
569 
mlx5_devlink_enable_sriov_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)570 static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
571 					 struct devlink_param_gset_ctx *ctx,
572 					 struct netlink_ext_ack *extack)
573 {
574 	struct mlx5_core_dev *dev = devlink_priv(devlink);
575 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
576 	bool per_pf_support;
577 	void *cap, *data;
578 	int err;
579 
580 	err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
581 	if (err) {
582 		NL_SET_ERR_MSG_MOD(extack,
583 				   "Failed to read global PCI capability");
584 		return err;
585 	}
586 
587 	cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
588 	per_pf_support = MLX5_GET(nv_global_pci_cap, cap,
589 				  per_pf_total_vf_supported);
590 
591 	if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) {
592 		NL_SET_ERR_MSG_MOD(extack,
593 				   "SRIOV is not supported on this device");
594 		return -EOPNOTSUPP;
595 	}
596 
597 	if (!per_pf_support) {
598 		/* We don't allow global SRIOV setting on per PF devlink */
599 		NL_SET_ERR_MSG_MOD(extack,
600 				   "SRIOV is not per PF on this device");
601 		return -EOPNOTSUPP;
602 	}
603 
604 	memset(mnvda, 0, sizeof(mnvda));
605 	err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
606 	if (err) {
607 		NL_SET_ERR_MSG_MOD(extack,
608 				   "Unable to read global PCI configuration");
609 		return err;
610 	}
611 
612 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
613 
614 	/* setup per PF sriov mode */
615 	MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
616 	MLX5_SET(nv_global_pci_conf, data, sriov_en, 1);
617 	MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
618 
619 	err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
620 	if (err) {
621 		NL_SET_ERR_MSG_MOD(extack,
622 				   "Unable to write global PCI configuration");
623 		return err;
624 	}
625 
626 	/* enable/disable sriov on this PF */
627 	memset(mnvda, 0, sizeof(mnvda));
628 	err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
629 	if (err) {
630 		NL_SET_ERR_MSG_MOD(extack,
631 				   "Unable to read per host PF configuration");
632 		return err;
633 	}
634 	MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool);
635 	return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
636 }
637 
mlx5_devlink_total_vfs_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)638 static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id,
639 				      struct devlink_param_gset_ctx *ctx,
640 				      struct netlink_ext_ack *extack)
641 {
642 	struct mlx5_core_dev *dev = devlink_priv(devlink);
643 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
644 	void *data;
645 	int err;
646 
647 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
648 
649 	err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
650 	if (err)
651 		return err;
652 
653 	if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
654 		ctx->val.vu32 = 0;
655 		return 0;
656 	}
657 
658 	memset(mnvda, 0, sizeof(mnvda));
659 	err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
660 	if (err)
661 		return err;
662 
663 	if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
664 		ctx->val.vu32 = MLX5_GET(nv_global_pci_conf, data, total_vfs);
665 		return 0;
666 	}
667 
668 	/* SRIOV is per PF */
669 	memset(mnvda, 0, sizeof(mnvda));
670 	err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
671 	if (err)
672 		return err;
673 
674 	ctx->val.vu32 = MLX5_GET(nv_pf_pci_conf, data, total_vf);
675 
676 	return 0;
677 }
678 
mlx5_devlink_total_vfs_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)679 static int mlx5_devlink_total_vfs_set(struct devlink *devlink, u32 id,
680 				      struct devlink_param_gset_ctx *ctx,
681 				      struct netlink_ext_ack *extack)
682 {
683 	struct mlx5_core_dev *dev = devlink_priv(devlink);
684 	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)];
685 	void *data;
686 	int err;
687 
688 	err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
689 	if (err) {
690 		NL_SET_ERR_MSG_MOD(extack, "Failed to read global pci cap");
691 		return err;
692 	}
693 
694 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
695 	if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
696 		NL_SET_ERR_MSG_MOD(extack, "Not configurable on this device");
697 		return -EOPNOTSUPP;
698 	}
699 
700 	if (!MLX5_GET(nv_global_pci_cap, data, per_pf_total_vf_supported)) {
701 		/* We don't allow global SRIOV setting on per PF devlink */
702 		NL_SET_ERR_MSG_MOD(extack,
703 				   "SRIOV is not per PF on this device");
704 		return -EOPNOTSUPP;
705 	}
706 
707 	memset(mnvda, 0, sizeof(mnvda));
708 	err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
709 	if (err)
710 		return err;
711 
712 	MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
713 	MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
714 
715 	err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
716 	if (err)
717 		return err;
718 
719 	memset(mnvda, 0, sizeof(mnvda));
720 	err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
721 	if (err)
722 		return err;
723 
724 	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
725 	MLX5_SET(nv_pf_pci_conf, data, total_vf, ctx->val.vu32);
726 	return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
727 }
728 
mlx5_devlink_total_vfs_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)729 static int mlx5_devlink_total_vfs_validate(struct devlink *devlink, u32 id,
730 					   union devlink_param_value val,
731 					   struct netlink_ext_ack *extack)
732 {
733 	struct mlx5_core_dev *dev = devlink_priv(devlink);
734 	u32 cap[MLX5_ST_SZ_DW(mnvda_reg)];
735 	void *data;
736 	u16 max;
737 	int err;
738 
739 	data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
740 
741 	err = mlx5_nv_param_read_global_pci_cap(dev, cap, sizeof(cap));
742 	if (err)
743 		return err;
744 
745 	if (!MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf_valid))
746 		return 0; /* optimistic, but set might fail later */
747 
748 	max = MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf);
749 	if (val.vu16 > max) {
750 		NL_SET_ERR_MSG_FMT_MOD(extack,
751 				       "Max allowed by device is %u", max);
752 		return -EINVAL;
753 	}
754 
755 	return 0;
756 }
757 
758 static const struct devlink_param mlx5_nv_param_devlink_params[] = {
759 	DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
760 			      mlx5_devlink_enable_sriov_get,
761 			      mlx5_devlink_enable_sriov_set, NULL),
762 	DEVLINK_PARAM_GENERIC(TOTAL_VFS, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
763 			      mlx5_devlink_total_vfs_get,
764 			      mlx5_devlink_total_vfs_set,
765 			      mlx5_devlink_total_vfs_validate),
766 	DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
767 			     "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING,
768 			     BIT(DEVLINK_PARAM_CMODE_PERMANENT),
769 			     mlx5_nv_param_devlink_cqe_compress_get,
770 			     mlx5_nv_param_devlink_cqe_compress_set,
771 			     mlx5_nv_param_devlink_cqe_compress_validate),
772 	DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
773 					   "swp_l4_csum_mode", DEVLINK_PARAM_TYPE_STRING,
774 					   BIT(DEVLINK_PARAM_CMODE_PERMANENT),
775 					   mlx5_devlink_swp_l4_csum_mode_get,
776 					   mlx5_devlink_swp_l4_csum_mode_set,
777 					   mlx5_devlink_swp_l4_csum_mode_validate,
778 					   mlx5_devlink_swp_l4_csum_mode_get_default,
779 					   mlx5_devlink_swp_l4_csum_mode_set_default),
780 };
781 
mlx5_nv_param_register_dl_params(struct devlink * devlink)782 int mlx5_nv_param_register_dl_params(struct devlink *devlink)
783 {
784 	if (!mlx5_core_is_pf(devlink_priv(devlink)))
785 		return 0;
786 
787 	return devl_params_register(devlink, mlx5_nv_param_devlink_params,
788 				    ARRAY_SIZE(mlx5_nv_param_devlink_params));
789 }
790 
mlx5_nv_param_unregister_dl_params(struct devlink * devlink)791 void mlx5_nv_param_unregister_dl_params(struct devlink *devlink)
792 {
793 	if (!mlx5_core_is_pf(devlink_priv(devlink)))
794 		return;
795 
796 	devl_params_unregister(devlink, mlx5_nv_param_devlink_params,
797 			       ARRAY_SIZE(mlx5_nv_param_devlink_params));
798 }
799 
800