xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h (revision 7ce4de1cdaf11c39b507008dfb5a4e59079d4e8a)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_CORE_H__
34 #define __MLX5_CORE_H__
35 
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/if_link.h>
40 #include <linux/firmware.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/fs.h>
43 #include <linux/mlx5/driver.h>
44 #include "lib/devcom.h"
45 
46 extern uint mlx5_core_debug_mask;
47 
48 #define mlx5_core_dbg(__dev, format, ...)				\
49 	dev_dbg((__dev)->device, "%s:%d:(pid %d): " format,		\
50 		 __func__, __LINE__, current->pid,			\
51 		 ##__VA_ARGS__)
52 
53 #define mlx5_core_dbg_once(__dev, format, ...)		\
54 	dev_dbg_once((__dev)->device,		\
55 		     "%s:%d:(pid %d): " format,		\
56 		     __func__, __LINE__, current->pid,	\
57 		     ##__VA_ARGS__)
58 
59 #define mlx5_core_dbg_mask(__dev, mask, format, ...)		\
60 do {								\
61 	if ((mask) & mlx5_core_debug_mask)			\
62 		mlx5_core_dbg(__dev, format, ##__VA_ARGS__);	\
63 } while (0)
64 
65 #define mlx5_core_err(__dev, format, ...)			\
66 	dev_err((__dev)->device, "%s:%d:(pid %d): " format,	\
67 		__func__, __LINE__, current->pid,		\
68 	       ##__VA_ARGS__)
69 
70 #define mlx5_core_err_rl(__dev, format, ...)			\
71 	dev_err_ratelimited((__dev)->device,			\
72 			    "%s:%d:(pid %d): " format,		\
73 			    __func__, __LINE__, current->pid,	\
74 			    ##__VA_ARGS__)
75 
76 #define mlx5_core_warn(__dev, format, ...)			\
77 	dev_warn((__dev)->device, "%s:%d:(pid %d): " format,	\
78 		 __func__, __LINE__, current->pid,		\
79 		 ##__VA_ARGS__)
80 
81 #define mlx5_core_warn_once(__dev, format, ...)				\
82 	dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format,	\
83 		      __func__, __LINE__, current->pid,			\
84 		      ##__VA_ARGS__)
85 
86 #define mlx5_core_warn_rl(__dev, format, ...)			\
87 	dev_warn_ratelimited((__dev)->device,			\
88 			     "%s:%d:(pid %d): " format,		\
89 			     __func__, __LINE__, current->pid,	\
90 			     ##__VA_ARGS__)
91 
92 #define mlx5_core_info(__dev, format, ...)		\
93 	dev_info((__dev)->device, format, ##__VA_ARGS__)
94 
95 #define mlx5_core_info_rl(__dev, format, ...)			\
96 	dev_info_ratelimited((__dev)->device,			\
97 			     "%s:%d:(pid %d): " format,		\
98 			     __func__, __LINE__, current->pid,	\
99 			     ##__VA_ARGS__)
100 
101 #define ACCESS_KEY_LEN  32
102 #define FT_ID_FT_TYPE_OFFSET 24
103 
104 struct mlx5_cmd_allow_other_vhca_access_attr {
105 	u16 obj_type;
106 	u32 obj_id;
107 	u8 access_key[ACCESS_KEY_LEN];
108 };
109 
110 struct mlx5_cmd_alias_obj_create_attr {
111 	u32 obj_id;
112 	u16 vhca_id;
113 	u16 obj_type;
114 	u8 access_key[ACCESS_KEY_LEN];
115 };
116 
117 struct mlx5_port_eth_proto {
118 	u32 cap;
119 	u32 admin;
120 	u32 oper;
121 };
122 
123 struct mlx5_module_eeprom_query_params {
124 	u16 size;
125 	u16 offset;
126 	u16 i2c_address;
127 	u32 page;
128 	u32 bank;
129 	u32 module_number;
130 };
131 
132 struct mlx5_link_info {
133 	u32 speed;
134 	u32 lanes;
135 };
136 
mlx5_printk(struct mlx5_core_dev * dev,int level,const char * format,...)137 static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
138 {
139 	struct device *device = dev->device;
140 	struct va_format vaf;
141 	va_list args;
142 
143 	if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG,
144 		      "Level %d is out of range, set to default level\n", level))
145 		level = LOGLEVEL_DEFAULT;
146 
147 	va_start(args, format);
148 	vaf.fmt = format;
149 	vaf.va = &args;
150 
151 	dev_printk_emit(level, device, "%s %s: %pV", dev_driver_string(device), dev_name(device),
152 			&vaf);
153 	va_end(args);
154 }
155 
156 #define mlx5_log(__dev, level, format, ...)			\
157 	mlx5_printk(__dev, level, "%s:%d:(pid %d): " format,	\
158 		    __func__, __LINE__, current->pid,		\
159 		    ##__VA_ARGS__)
160 
mlx5_core_dma_dev(struct mlx5_core_dev * dev)161 static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
162 {
163 	return &dev->pdev->dev;
164 }
165 
166 enum {
167 	MLX5_CMD_DATA, /* print command payload only */
168 	MLX5_CMD_TIME, /* print command execution time */
169 };
170 
171 enum {
172 	MLX5_DRIVER_STATUS_ABORTED = 0xfe,
173 	MLX5_DRIVER_SYND = 0xbadd00de,
174 };
175 
176 enum mlx5_semaphore_space_address {
177 	MLX5_SEMAPHORE_SPACE_DOMAIN     = 0xA,
178 	MLX5_SEMAPHORE_SW_RESET         = 0x20,
179 };
180 
181 #define MLX5_DEFAULT_PROF       2
182 #define MLX5_SF_PROF		3
183 #define MLX5_NUM_FW_CMD_THREADS 8
184 #define MLX5_DEV_MAX_WQS	MLX5_NUM_FW_CMD_THREADS
185 
mlx5_flexible_inlen(struct mlx5_core_dev * dev,size_t fixed,size_t item_size,size_t num_items,const char * func,int line)186 static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
187 				      size_t item_size, size_t num_items,
188 				      const char *func, int line)
189 {
190 	int inlen;
191 
192 	if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
193 		mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
194 			      __func__, func, line, fixed, item_size, num_items);
195 		return -ENOMEM;
196 	}
197 
198 	if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
199 		mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
200 			      __func__, func, line, fixed, item_size, num_items);
201 		return -ENOMEM;
202 	}
203 
204 	if (check_add_overflow((int)fixed, inlen, &inlen)) {
205 		mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
206 			      __func__, func, line, fixed, item_size, num_items);
207 		return -ENOMEM;
208 	}
209 
210 	return inlen;
211 }
212 
213 #define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
214 	mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
215 
216 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
217 int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
218 			    enum mlx5_cap_mode cap_mode);
219 int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
220 int mlx5_query_board_id(struct mlx5_core_dev *dev);
221 int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num);
222 int mlx5_cmd_init(struct mlx5_core_dev *dev);
223 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
224 int mlx5_cmd_enable(struct mlx5_core_dev *dev);
225 void mlx5_cmd_disable(struct mlx5_core_dev *dev);
226 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
227 			enum mlx5_cmdif_state cmdif_state);
228 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id);
229 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
230 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
231 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
232 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
233 void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
234 u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
235 int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
236 void mlx5_disable_device(struct mlx5_core_dev *dev);
237 int mlx5_recover_device(struct mlx5_core_dev *dev);
238 int mlx5_sriov_init(struct mlx5_core_dev *dev);
239 void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
240 int mlx5_sriov_attach(struct mlx5_core_dev *dev);
241 void mlx5_sriov_detach(struct mlx5_core_dev *dev);
242 int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
243 void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change);
244 int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
245 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
246 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
247 bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
248 bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
249 int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
250 				       void *context, u32 *element_id);
251 int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
252 				       void *context, u32 element_id,
253 				       u32 modify_bitmask);
254 int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
255 					u32 element_id);
256 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
257 
258 void mlx5_cmd_flush(struct mlx5_core_dev *dev);
259 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
260 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
261 
262 int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
263 			u8 access_reg_group);
264 int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
265 			u8 access_reg_group);
266 int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
267 			u8 feature_group, u8 access_reg_group);
268 int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir);
269 
270 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
271 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
272 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
273 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
274 void mlx5_lag_disable_change(struct mlx5_core_dev *dev);
275 void mlx5_lag_enable_change(struct mlx5_core_dev *dev);
276 
277 int mlx5_events_init(struct mlx5_core_dev *dev);
278 void mlx5_events_cleanup(struct mlx5_core_dev *dev);
279 void mlx5_events_start(struct mlx5_core_dev *dev);
280 void mlx5_events_stop(struct mlx5_core_dev *dev);
281 
282 int mlx5_adev_idx_alloc(void);
283 void mlx5_adev_idx_free(int idx);
284 void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
285 int mlx5_adev_init(struct mlx5_core_dev *dev);
286 
287 int mlx5_attach_device(struct mlx5_core_dev *dev);
288 void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
289 int mlx5_register_device(struct mlx5_core_dev *dev);
290 void mlx5_unregister_device(struct mlx5_core_dev *dev);
291 void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
292 bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
293 
294 void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
295 int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
296 int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
297 int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
298 int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
299 
300 struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
301 void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
302 
303 #ifdef CONFIG_PCIE_TPH
304 struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev);
305 void mlx5_st_destroy(struct mlx5_core_dev *dev);
306 #else
307 static inline struct mlx5_st *
mlx5_st_create(struct mlx5_core_dev * dev)308 mlx5_st_create(struct mlx5_core_dev *dev) { return NULL; }
mlx5_st_destroy(struct mlx5_core_dev * dev)309 static inline void mlx5_st_destroy(struct mlx5_core_dev *dev) { return; }
310 #endif
311 
312 void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
313 int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
314 			       enum mlx5_port_status status);
315 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
316 				 enum mlx5_port_status *status);
317 int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
318 
319 int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
320 int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
321 int mlx5_query_port_pause(struct mlx5_core_dev *dev,
322 			  u32 *rx_pause, u32 *tx_pause);
323 
324 int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
325 int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
326 			u8 *pfc_en_rx);
327 
328 int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
329 				  u16 stall_critical_watermark,
330 				  u16 stall_minor_watermark);
331 int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
332 				    u16 *stall_critical_watermark,
333 				    u16 *stall_minor_watermark);
334 
335 int mlx5_max_tc(struct mlx5_core_dev *mdev);
336 int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
337 int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
338 			    u8 prio, u8 *tc);
339 int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
340 int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
341 			     u8 tc, u8 *tc_group);
342 int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
343 int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
344 				u8 tc, u8 *bw_pct);
345 int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
346 				    u8 *max_bw_value,
347 				    u8 *max_bw_unit);
348 int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
349 				   u8 *max_bw_value,
350 				   u8 *max_bw_unit);
351 int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
352 int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
353 
354 int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
355 int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
356 int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
357 void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
358 			 bool *enabled);
359 int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
360 			     u16 offset, u16 size, u8 *data);
361 int
362 mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
363 				 struct mlx5_module_eeprom_query_params *params,
364 				 u8 *data);
365 
366 int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
367 int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
368 int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
369 int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
370 int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
371 int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
372 
373 int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
374 			      struct mlx5_port_eth_proto *eproto);
375 bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
376 const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
377 						 u32 eth_proto_oper,
378 						 bool force_legacy);
379 u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
380 			     struct mlx5_link_info *info,
381 			     bool force_legacy);
382 int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
383 
384 #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) &&		\
385 			    MLX5_CAP_GEN((mdev), pps_modify) &&		\
386 			    MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) &&	\
387 			    MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
388 
389 int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
390 			struct netlink_ext_ack *extack);
391 int mlx5_fw_version_query(struct mlx5_core_dev *dev,
392 			  u32 *running_ver, u32 *stored_ver);
393 
394 #ifdef CONFIG_MLX5_CORE_EN
395 int mlx5e_init(void);
396 void mlx5e_cleanup(void);
397 #else
mlx5e_init(void)398 static inline int mlx5e_init(void){ return 0; }
mlx5e_cleanup(void)399 static inline void mlx5e_cleanup(void){}
400 #endif
401 
mlx5_sriov_is_enabled(struct mlx5_core_dev * dev)402 static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
403 {
404 	return pci_num_vf(dev->pdev) ? true : false;
405 }
406 
407 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
mlx5_rescan_drivers(struct mlx5_core_dev * dev)408 static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
409 {
410 	int ret;
411 
412 	mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
413 	ret = mlx5_rescan_drivers_locked(dev);
414 	mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
415 	return ret;
416 }
417 
418 u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
419 void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
420 
mlx5_core_is_sf(const struct mlx5_core_dev * dev)421 static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
422 {
423 	return dev->coredev_type == MLX5_COREDEV_SF;
424 }
425 
426 static inline struct auxiliary_device *
mlx5_sf_coredev_to_adev(struct mlx5_core_dev * mdev)427 mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
428 {
429 	return container_of(mdev->device, struct auxiliary_device, dev);
430 }
431 
432 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
433 void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
434 int mlx5_init_one(struct mlx5_core_dev *dev);
435 int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev);
436 void mlx5_uninit_one(struct mlx5_core_dev *dev);
437 void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
438 void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
439 int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
440 int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
441 int mlx5_init_one_light(struct mlx5_core_dev *dev);
442 void mlx5_uninit_one_light(struct mlx5_core_dev *dev);
443 void mlx5_unload_one_light(struct mlx5_core_dev *dev);
444 
445 int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
446 				  u16 opmod);
447 #define mlx5_vport_get_other_func_general_cap(dev, vport, out)		\
448 	mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
449 
450 int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
451 
mlx5_sriov_get_vf_total_msix(struct pci_dev * pdev)452 static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
453 {
454 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
455 
456 	return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
457 }
458 
459 bool mlx5_eth_supported(struct mlx5_core_dev *dev);
460 bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
461 bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
462 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
463 int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
464 				     struct mlx5_cmd_allow_other_vhca_access_attr *attr);
465 int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
466 			      struct mlx5_cmd_alias_obj_create_attr *alias_attr,
467 			      u32 *obj_id);
468 int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type);
469 
mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev * dev)470 static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
471 {
472 	return MLX5_CAP_GEN_2(dev, ec_vf_vport_base);
473 }
474 
mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev * dev)475 static inline u16 mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev *dev)
476 {
477 	return mlx5_core_is_ecpf(dev) && mlx5_core_ec_vf_vport_base(dev);
478 }
479 
mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev * dev,u16 vport_num)480 static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 vport_num)
481 {
482 	int base_vport = mlx5_core_ec_vf_vport_base(dev);
483 	int max_vport = base_vport + mlx5_core_max_ec_vfs(dev);
484 
485 	if (!mlx5_core_ec_sriov_enabled(dev))
486 		return false;
487 
488 	return (vport_num >= base_vport && vport_num < max_vport);
489 }
490 
mlx5_vport_to_func_id(const struct mlx5_core_dev * dev,u16 vport,bool ec_vf_func)491 static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
492 {
493 	return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1
494 			  : vport;
495 }
496 
mlx5_max_eq_cap_get(const struct mlx5_core_dev * dev)497 static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
498 {
499 	if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b))
500 		return MLX5_CAP_GEN_2(dev, max_num_eqs_24b);
501 
502 	if (MLX5_CAP_GEN(dev, max_num_eqs))
503 		return MLX5_CAP_GEN(dev, max_num_eqs);
504 
505 	return 1 << MLX5_CAP_GEN(dev, log_max_eq);
506 }
507 
mlx5_pcie_cong_event_supported(struct mlx5_core_dev * dev)508 static inline bool mlx5_pcie_cong_event_supported(struct mlx5_core_dev *dev)
509 {
510 	u64 features = MLX5_CAP_GEN_2_64(dev, general_obj_types_127_64);
511 
512 	if (!(features & MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT))
513 		return false;
514 
515 	if (dev->sd)
516 		return false;
517 
518 	return true;
519 }
520 #endif /* __MLX5_CORE_H__ */
521