xref: /linux/drivers/infiniband/hw/mlx5/mlx5_ib.h (revision 311aa68319f6a3d64a1e6d940d885830c7acba4c)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  * Copyright (c) 2020, Intel Corporation. All rights reserved.
5  */
6 
7 #ifndef MLX5_IB_H
8 #define MLX5_IB_H
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <rdma/ib_smi.h>
15 #include <linux/mlx5/driver.h>
16 #include <linux/mlx5/cq.h>
17 #include <linux/mlx5/fs.h>
18 #include <linux/mlx5/qp.h>
19 #include <linux/types.h>
20 #include <linux/mlx5/transobj.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/mlx5-abi.h>
23 #include <rdma/uverbs_ioctl.h>
24 #include <rdma/mlx5_user_ioctl_cmds.h>
25 #include <rdma/mlx5_user_ioctl_verbs.h>
26 
27 #include "srq.h"
28 #include "qp.h"
29 #include "macsec.h"
30 
31 #define mlx5_ib_dbg(_dev, format, arg...)                                      \
32 	dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
33 		__LINE__, current->pid, ##arg)
34 
35 #define mlx5_ib_err(_dev, format, arg...)                                      \
36 	dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
37 		__LINE__, current->pid, ##arg)
38 
39 #define mlx5_ib_warn(_dev, format, arg...)                                     \
40 	dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,     \
41 		 __LINE__, current->pid, ##arg)
42 
43 #define mlx5_ib_log(lvl, _dev, format, arg...)                                 \
44 	dev_printk(lvl, &(_dev)->ib_dev.dev,  "%s:%d:(pid %d): " format,       \
45 		   __func__, __LINE__, current->pid, ##arg)
46 
47 #define MLX5_IB_DEFAULT_UIDX 0xffffff
48 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
49 
50 static __always_inline unsigned long
__mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,unsigned int pgsz_shift)51 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
52 			       unsigned int pgsz_shift)
53 {
54 	unsigned int largest_pg_shift =
55 		min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
56 		      BITS_PER_LONG - 1);
57 
58 	/*
59 	 * Despite a command allowing it, the device does not support lower than
60 	 * 4k page size.
61 	 */
62 	pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
63 	return GENMASK(largest_pg_shift, pgsz_shift);
64 }
65 
66 static __always_inline unsigned long
__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,unsigned int offset_shift)67 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
68 			      unsigned int offset_shift)
69 {
70 	unsigned int largest_offset_shift =
71 		min_t(unsigned long, page_offset_bits - 1 + offset_shift,
72 		      BITS_PER_LONG - 1);
73 
74 	return GENMASK(largest_offset_shift, offset_shift);
75 }
76 
77 /*
78  * QP/CQ/WQ/etc type commands take a page offset that satisifies:
79  *   page_offset_quantized * (page_size/scale) = page_offset
80  * Which restricts allowed page sizes to ones that satisify the above.
81  */
82 unsigned long __mlx5_umem_find_best_quantized_pgoff(
83 	struct ib_umem *umem, unsigned long pgsz_bitmap,
84 	unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
85 	unsigned int *page_offset_quantized);
86 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld,           \
87 					    pgsz_shift, page_offset_fld,       \
88 					    scale, page_offset_quantized)      \
89 	__mlx5_umem_find_best_quantized_pgoff(                                 \
90 		umem,                                                          \
91 		__mlx5_log_page_size_to_bitmap(                                \
92 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
93 		__mlx5_bit_sz(typ, page_offset_fld),                           \
94 		GENMASK(31, order_base_2(scale)), scale,                       \
95 		page_offset_quantized)
96 
97 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld,        \
98 					       pgsz_shift, page_offset_fld,    \
99 					       scale, page_offset_quantized)   \
100 	__mlx5_umem_find_best_quantized_pgoff(                                 \
101 		umem,                                                          \
102 		__mlx5_log_page_size_to_bitmap(                                \
103 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
104 		__mlx5_bit_sz(typ, page_offset_fld), 0, scale,                 \
105 		page_offset_quantized)
106 
107 enum {
108 	MLX5_IB_MMAP_OFFSET_START = 9,
109 	MLX5_IB_MMAP_OFFSET_END = 255,
110 };
111 
112 enum {
113 	MLX5_IB_MMAP_CMD_SHIFT	= 8,
114 	MLX5_IB_MMAP_CMD_MASK	= 0xff,
115 };
116 
117 enum {
118 	MLX5_RES_SCAT_DATA32_CQE	= 0x1,
119 	MLX5_RES_SCAT_DATA64_CQE	= 0x2,
120 	MLX5_REQ_SCAT_DATA32_CQE	= 0x11,
121 	MLX5_REQ_SCAT_DATA64_CQE	= 0x22,
122 };
123 
124 enum mlx5_ib_mad_ifc_flags {
125 	MLX5_MAD_IFC_IGNORE_MKEY	= 1,
126 	MLX5_MAD_IFC_IGNORE_BKEY	= 2,
127 	MLX5_MAD_IFC_NET_VIEW		= 4,
128 };
129 
130 enum {
131 	MLX5_CROSS_CHANNEL_BFREG         = 0,
132 };
133 
134 enum {
135 	MLX5_CQE_VERSION_V0,
136 	MLX5_CQE_VERSION_V1,
137 };
138 
139 enum {
140 	MLX5_TM_MAX_RNDV_MSG_SIZE	= 64,
141 	MLX5_TM_MAX_SGE			= 1,
142 };
143 
144 enum {
145 	MLX5_IB_INVALID_UAR_INDEX	= BIT(31),
146 	MLX5_IB_INVALID_BFREG		= BIT(31),
147 };
148 
149 enum {
150 	MLX5_MAX_MEMIC_PAGES = 0x100,
151 	MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
152 };
153 
154 enum {
155 	MLX5_MEMIC_BASE_ALIGN	= 6,
156 	MLX5_MEMIC_BASE_SIZE	= 1 << MLX5_MEMIC_BASE_ALIGN,
157 };
158 
159 enum mlx5_ib_mmap_type {
160 	MLX5_IB_MMAP_TYPE_MEMIC = 1,
161 	MLX5_IB_MMAP_TYPE_VAR = 2,
162 	MLX5_IB_MMAP_TYPE_UAR_WC = 3,
163 	MLX5_IB_MMAP_TYPE_UAR_NC = 4,
164 	MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
165 };
166 
167 struct mlx5_bfreg_info {
168 	u32 *sys_pages;
169 	int num_low_latency_bfregs;
170 	unsigned int *count;
171 
172 	/*
173 	 * protect bfreg allocation data structs
174 	 */
175 	struct mutex lock;
176 	u32 ver;
177 	u8 lib_uar_4k : 1;
178 	u8 lib_uar_dyn : 1;
179 	u32 num_sys_pages;
180 	u32 num_static_sys_pages;
181 	u32 total_num_bfregs;
182 	u32 num_dyn_bfregs;
183 };
184 
185 struct mlx5_ib_ucontext {
186 	struct ib_ucontext	ibucontext;
187 	struct list_head	db_page_list;
188 
189 	/* protect doorbell record alloc/free
190 	 */
191 	struct mutex		db_page_mutex;
192 	struct mlx5_bfreg_info	bfregi;
193 	u8			cqe_version;
194 	/* Transport Domain number */
195 	u32			tdn;
196 
197 	u64			lib_caps;
198 	u16			devx_uid;
199 	/* For RoCE LAG TX affinity */
200 	atomic_t		tx_port_affinity;
201 };
202 
to_mucontext(struct ib_ucontext * ibucontext)203 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
204 {
205 	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
206 }
207 
208 struct mlx5_ib_pd {
209 	struct ib_pd		ibpd;
210 	u32			pdn;
211 	u16			uid;
212 };
213 
214 enum {
215 	MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
216 	MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
217 	MLX5_IB_FLOW_ACTION_DECAP,
218 };
219 
220 #define MLX5_IB_FLOW_MCAST_PRIO		(MLX5_BY_PASS_NUM_PRIOS - 1)
221 #define MLX5_IB_FLOW_LAST_PRIO		(MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
222 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
223 #error "Invalid number of bypass priorities"
224 #endif
225 #define MLX5_IB_FLOW_LEFTOVERS_PRIO	(MLX5_IB_FLOW_MCAST_PRIO + 1)
226 
227 #define MLX5_IB_NUM_FLOW_FT		(MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
228 #define MLX5_IB_NUM_SNIFFER_FTS		2
229 #define MLX5_IB_NUM_EGRESS_FTS		1
230 #define MLX5_IB_NUM_FDB_FTS		MLX5_BY_PASS_NUM_REGULAR_PRIOS
231 
232 struct mlx5_ib_anchor {
233 	struct mlx5_flow_table *ft;
234 	struct mlx5_flow_group *fg_goto_table;
235 	struct mlx5_flow_group *fg_drop;
236 	struct mlx5_flow_handle *rule_goto_table;
237 	struct mlx5_flow_handle *rule_drop;
238 	unsigned int rule_goto_table_ref;
239 };
240 
241 struct mlx5_ib_flow_prio {
242 	struct mlx5_flow_table		*flow_table;
243 	struct mlx5_ib_anchor		anchor;
244 	unsigned int			refcount;
245 };
246 
247 struct mlx5_ib_flow_handler {
248 	struct list_head		list;
249 	struct ib_flow			ibflow;
250 	struct mlx5_ib_flow_prio	*prio;
251 	struct mlx5_flow_handle		*rule;
252 	struct ib_counters		*ibcounters;
253 	struct mlx5_ib_dev		*dev;
254 	struct mlx5_ib_flow_matcher	*flow_matcher;
255 };
256 
257 struct mlx5_ib_flow_matcher {
258 	struct mlx5_ib_match_params matcher_mask;
259 	int			mask_len;
260 	enum mlx5_ib_flow_type	flow_type;
261 	enum mlx5_flow_namespace_type ns_type;
262 	u16			priority;
263 	struct mlx5_core_dev	*mdev;
264 	atomic_t		usecnt;
265 	u8			match_criteria_enable;
266 	u32			ib_port;
267 };
268 
269 struct mlx5_ib_steering_anchor {
270 	struct mlx5_ib_flow_prio *ft_prio;
271 	struct mlx5_ib_dev *dev;
272 	atomic_t usecnt;
273 };
274 
275 struct mlx5_ib_pp {
276 	u16 index;
277 	struct mlx5_core_dev *mdev;
278 };
279 
280 enum mlx5_ib_optional_counter_type {
281 	MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
282 	MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
283 	MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
284 	MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS,
285 	MLX5_IB_OPCOUNTER_RDMA_TX_BYTES,
286 	MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS,
287 	MLX5_IB_OPCOUNTER_RDMA_RX_BYTES,
288 
289 	MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP,
290 	MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP,
291 	MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP,
292 	MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP,
293 	MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP,
294 	MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP,
295 	MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP,
296 
297 	MLX5_IB_OPCOUNTER_MAX,
298 };
299 
300 struct mlx5_ib_flow_db {
301 	struct mlx5_ib_flow_prio	prios[MLX5_IB_NUM_FLOW_FT];
302 	struct mlx5_ib_flow_prio	egress_prios[MLX5_IB_NUM_FLOW_FT];
303 	struct mlx5_ib_flow_prio	sniffer[MLX5_IB_NUM_SNIFFER_FTS];
304 	struct mlx5_ib_flow_prio	egress[MLX5_IB_NUM_EGRESS_FTS];
305 	struct mlx5_ib_flow_prio	fdb[MLX5_IB_NUM_FDB_FTS];
306 	struct mlx5_ib_flow_prio	rdma_rx[MLX5_IB_NUM_FLOW_FT];
307 	struct mlx5_ib_flow_prio	rdma_tx[MLX5_IB_NUM_FLOW_FT];
308 	struct mlx5_ib_flow_prio	opfcs[MLX5_IB_OPCOUNTER_MAX];
309 	struct mlx5_flow_table		*lag_demux_ft;
310 	struct mlx5_ib_flow_prio        *rdma_transport_rx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
311 	struct mlx5_ib_flow_prio        *rdma_transport_tx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
312 	/* Protect flow steering bypass flow tables
313 	 * when add/del flow rules.
314 	 * only single add/removal of flow steering rule could be done
315 	 * simultaneously.
316 	 */
317 	struct mutex			lock;
318 };
319 
320 /* Use macros here so that don't have to duplicate
321  * enum ib_qp_type for low-level driver
322  */
323 
324 #define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1
325 /*
326  * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
327  * creates the actual hardware QP.
328  */
329 #define MLX5_IB_QPT_HW_GSI	IB_QPT_RESERVED2
330 #define MLX5_IB_QPT_DCI		IB_QPT_RESERVED3
331 #define MLX5_IB_QPT_DCT		IB_QPT_RESERVED4
332 #define MLX5_IB_WR_UMR		IB_WR_RESERVED1
333 
334 #define MLX5_IB_UPD_XLT_ZAP	      BIT(0)
335 #define MLX5_IB_UPD_XLT_ENABLE	      BIT(1)
336 #define MLX5_IB_UPD_XLT_ATOMIC	      BIT(2)
337 #define MLX5_IB_UPD_XLT_ADDR	      BIT(3)
338 #define MLX5_IB_UPD_XLT_PD	      BIT(4)
339 #define MLX5_IB_UPD_XLT_ACCESS	      BIT(5)
340 #define MLX5_IB_UPD_XLT_INDIRECT      BIT(6)
341 #define MLX5_IB_UPD_XLT_DOWNGRADE     BIT(7)
342 #define MLX5_IB_UPD_XLT_KEEP_PGSZ     BIT(8)
343 
344 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
345  *
346  * These flags are intended for internal use by the mlx5_ib driver, and they
347  * rely on the range reserved for that use in the ib_qp_create_flags enum.
348  */
349 #define MLX5_IB_QP_CREATE_SQPN_QP1	IB_QP_CREATE_RESERVED_START
350 
351 struct wr_list {
352 	u16	opcode;
353 	u16	next;
354 };
355 
356 enum mlx5_ib_rq_flags {
357 	MLX5_IB_RQ_CVLAN_STRIPPING	= 1 << 0,
358 	MLX5_IB_RQ_PCI_WRITE_END_PADDING	= 1 << 1,
359 };
360 
361 struct mlx5_ib_wq {
362 	struct mlx5_frag_buf_ctrl fbc;
363 	u64		       *wrid;
364 	u32		       *wr_data;
365 	struct wr_list	       *w_list;
366 	unsigned	       *wqe_head;
367 	u16		        unsig_count;
368 
369 	/* serialize post to the work queue
370 	 */
371 	spinlock_t		lock;
372 	int			wqe_cnt;
373 	int			max_post;
374 	int			max_gs;
375 	int			offset;
376 	int			wqe_shift;
377 	unsigned		head;
378 	unsigned		tail;
379 	u16			cur_post;
380 	u16			last_poll;
381 	void			*cur_edge;
382 };
383 
384 enum mlx5_ib_wq_flags {
385 	MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
386 	MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
387 };
388 
389 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
390 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
391 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
392 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
393 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
394 
395 struct mlx5_ib_rwq {
396 	struct ib_wq		ibwq;
397 	struct mlx5_core_qp	core_qp;
398 	u32			rq_num_pas;
399 	u32			log_rq_stride;
400 	u32			log_rq_size;
401 	u32			rq_page_offset;
402 	u32			log_page_size;
403 	u32			log_num_strides;
404 	u32			two_byte_shift_en;
405 	u32			single_stride_log_num_of_bytes;
406 	struct ib_umem		*umem;
407 	size_t			buf_size;
408 	unsigned int		page_shift;
409 	struct mlx5_db		db;
410 	u32			user_index;
411 	u32			wqe_count;
412 	u32			wqe_shift;
413 	int			wq_sig;
414 	u32			create_flags; /* Use enum mlx5_ib_wq_flags */
415 };
416 
417 struct mlx5_ib_rwq_ind_table {
418 	struct ib_rwq_ind_table ib_rwq_ind_tbl;
419 	u32			rqtn;
420 	u16			uid;
421 };
422 
423 struct mlx5_ib_ubuffer {
424 	struct ib_umem	       *umem;
425 	int			buf_size;
426 	u64			buf_addr;
427 };
428 
429 struct mlx5_ib_qp_base {
430 	struct mlx5_ib_qp	*container_mibqp;
431 	struct mlx5_core_qp	mqp;
432 	struct mlx5_ib_ubuffer	ubuffer;
433 };
434 
435 struct mlx5_ib_qp_trans {
436 	struct mlx5_ib_qp_base	base;
437 	u16			xrcdn;
438 	u32			alt_port;
439 	u8			atomic_rd_en;
440 	u8			resp_depth;
441 };
442 
443 struct mlx5_ib_rss_qp {
444 	u32	tirn;
445 };
446 
447 struct mlx5_ib_rq {
448 	struct mlx5_ib_qp_base base;
449 	struct mlx5_ib_wq	*rq;
450 	struct mlx5_ib_ubuffer	ubuffer;
451 	struct mlx5_db		*doorbell;
452 	u32			tirn;
453 	u8			state;
454 	u32			flags;
455 };
456 
457 struct mlx5_ib_sq {
458 	struct mlx5_ib_qp_base base;
459 	struct mlx5_ib_wq	*sq;
460 	struct mlx5_ib_ubuffer  ubuffer;
461 	struct mlx5_db		*doorbell;
462 	struct mlx5_flow_handle	*flow_rule;
463 	u32			tisn;
464 	u8			state;
465 };
466 
467 struct mlx5_ib_raw_packet_qp {
468 	struct mlx5_ib_sq sq;
469 	struct mlx5_ib_rq rq;
470 };
471 
472 struct mlx5_bf {
473 	int			buf_size;
474 	unsigned long		offset;
475 	struct mlx5_sq_bfreg   *bfreg;
476 };
477 
478 struct mlx5_ib_dct {
479 	struct mlx5_core_dct    mdct;
480 	u32                     *in;
481 };
482 
483 struct mlx5_ib_gsi_qp {
484 	struct ib_qp *rx_qp;
485 	u32 port_num;
486 	struct ib_qp_cap cap;
487 	struct ib_cq *cq;
488 	struct mlx5_ib_gsi_wr *outstanding_wrs;
489 	u32 outstanding_pi, outstanding_ci;
490 	int num_qps;
491 	/* Protects access to the tx_qps. Post send operations synchronize
492 	 * with tx_qp creation in setup_qp(). Also protects the
493 	 * outstanding_wrs array and indices.
494 	 */
495 	spinlock_t lock;
496 	struct ib_qp **tx_qps;
497 };
498 
499 struct mlx5_ib_qp {
500 	struct ib_qp		ibqp;
501 	union {
502 		struct mlx5_ib_qp_trans trans_qp;
503 		struct mlx5_ib_raw_packet_qp raw_packet_qp;
504 		struct mlx5_ib_rss_qp rss_qp;
505 		struct mlx5_ib_dct dct;
506 		struct mlx5_ib_gsi_qp gsi;
507 	};
508 	struct mlx5_frag_buf	buf;
509 
510 	struct mlx5_db		db;
511 	struct mlx5_ib_wq	rq;
512 
513 	u8			sq_signal_bits;
514 	u8			next_fence;
515 	struct mlx5_ib_wq	sq;
516 
517 	/* serialize qp state modifications
518 	 */
519 	struct mutex		mutex;
520 	/* cached variant of create_flags from struct ib_qp_init_attr */
521 	u32			flags;
522 	u32			port;
523 	u8			state;
524 	int			max_inline_data;
525 	struct mlx5_bf	        bf;
526 	u8			has_rq:1;
527 	u8			is_rss:1;
528 	u8			is_ooo_rq:1;
529 
530 	/* only for user space QPs. For kernel
531 	 * we have it from the bf object
532 	 */
533 	int			bfregn;
534 
535 	struct list_head	qps_list;
536 	struct list_head	cq_recv_list;
537 	struct list_head	cq_send_list;
538 	struct mlx5_rate_limit	rl;
539 	u32                     underlay_qpn;
540 	u32			flags_en;
541 	/*
542 	 * IB/core doesn't store low-level QP types, so
543 	 * store both MLX and IBTA types in the field below.
544 	 */
545 	enum ib_qp_type		type;
546 	/* A flag to indicate if there's a new counter is configured
547 	 * but not take effective
548 	 */
549 	u32                     counter_pending;
550 	u16			gsi_lag_port;
551 };
552 
553 struct mlx5_ib_cq_buf {
554 	struct mlx5_frag_buf_ctrl fbc;
555 	struct mlx5_frag_buf    frag_buf;
556 	struct ib_umem		*umem;
557 	int			cqe_size;
558 	int			nent;
559 };
560 
561 enum mlx5_ib_cq_pr_flags {
562 	MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD	= 1 << 0,
563 	MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
564 };
565 
566 struct mlx5_ib_cq {
567 	struct ib_cq		ibcq;
568 	struct mlx5_core_cq	mcq;
569 	struct mlx5_ib_cq_buf	buf;
570 	struct mlx5_db		db;
571 
572 	/* serialize access to the CQ
573 	 */
574 	spinlock_t		lock;
575 
576 	/* protect resize cq
577 	 */
578 	struct mutex		resize_mutex;
579 	struct mlx5_ib_cq_buf  *resize_buf;
580 	struct ib_umem	       *resize_umem;
581 	int			cqe_size;
582 	struct list_head	list_send_qp;
583 	struct list_head	list_recv_qp;
584 	u32			create_flags;
585 	struct list_head	wc_list;
586 	enum ib_cq_notify_flags notify_flags;
587 	struct work_struct	notify_work;
588 	u16			private_flags; /* Use mlx5_ib_cq_pr_flags */
589 };
590 
591 struct mlx5_ib_wc {
592 	struct ib_wc wc;
593 	struct list_head list;
594 };
595 
596 struct mlx5_ib_srq {
597 	struct ib_srq		ibsrq;
598 	struct mlx5_core_srq	msrq;
599 	struct mlx5_frag_buf	buf;
600 	struct mlx5_db		db;
601 	struct mlx5_frag_buf_ctrl fbc;
602 	u64		       *wrid;
603 	/* protect SRQ hanlding
604 	 */
605 	spinlock_t		lock;
606 	int			head;
607 	int			tail;
608 	u16			wqe_ctr;
609 	struct ib_umem	       *umem;
610 	/* serialize arming a SRQ
611 	 */
612 	struct mutex		mutex;
613 	int			wq_sig;
614 };
615 
616 struct mlx5_ib_xrcd {
617 	struct ib_xrcd		ibxrcd;
618 	u32			xrcdn;
619 };
620 
621 enum mlx5_ib_mtt_access_flags {
622 	MLX5_IB_MTT_READ  = (1 << 0),
623 	MLX5_IB_MTT_WRITE = (1 << 1),
624 };
625 
626 struct mlx5_user_mmap_entry {
627 	struct rdma_user_mmap_entry rdma_entry;
628 	u8 mmap_flag;
629 	u64 address;
630 	u32 page_idx;
631 };
632 
633 enum mlx5_mkey_type {
634 	MLX5_MKEY_MR = 1,
635 	MLX5_MKEY_MW,
636 	MLX5_MKEY_INDIRECT_DEVX,
637 	MLX5_MKEY_NULL,
638 	MLX5_MKEY_IMPLICIT_CHILD,
639 };
640 
641 /* Used for non-existent ph value */
642 #define MLX5_IB_NO_PH 0xff
643 
644 struct mlx5r_cache_rb_key {
645 	u8 ats:1;
646 	u8 ph;
647 	u16 st_index;
648 	unsigned int access_mode;
649 	unsigned int access_flags;
650 	unsigned int ndescs;
651 };
652 
653 struct mlx5_ib_mkey {
654 	u32 key;
655 	enum mlx5_mkey_type type;
656 	unsigned int ndescs;
657 	struct wait_queue_head wait;
658 	refcount_t usecount;
659 	/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
660 	struct mlx5r_cache_rb_key rb_key;
661 	struct mlx5_cache_ent *cache_ent;
662 	u8 cacheable : 1;
663 };
664 
665 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
666 
667 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
668 					 IB_ACCESS_REMOTE_WRITE  |\
669 					 IB_ACCESS_REMOTE_READ   |\
670 					 IB_ACCESS_REMOTE_ATOMIC |\
671 					 IB_ZERO_BASED)
672 
673 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
674 					  IB_ACCESS_REMOTE_WRITE  |\
675 					  IB_ACCESS_REMOTE_READ   |\
676 					  IB_ZERO_BASED)
677 
678 #define mlx5_update_odp_stats(mr, counter_name, value)		\
679 	atomic64_add(value, &((mr)->odp_stats.counter_name))
680 
681 #define mlx5_update_odp_stats_with_handled(mr, counter_name, value)         \
682 	do {                                                                \
683 		mlx5_update_odp_stats(mr, counter_name, value);             \
684 		atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
685 	} while (0)
686 
687 struct mlx5_ib_mr {
688 	struct ib_mr ibmr;
689 	struct mlx5_ib_mkey mmkey;
690 
691 	struct ib_umem *umem;
692 	/* The mr is data direct related */
693 	u8 data_direct :1;
694 
695 	union {
696 		/* Used only by kernel MRs (umem == NULL) */
697 		struct {
698 			void *descs;
699 			void *descs_alloc;
700 			dma_addr_t desc_map;
701 			int max_descs;
702 			int desc_size;
703 			int access_mode;
704 
705 			/* For Kernel IB_MR_TYPE_INTEGRITY */
706 			struct mlx5_core_sig_ctx *sig;
707 			struct mlx5_ib_mr *pi_mr;
708 			struct mlx5_ib_mr *klm_mr;
709 			struct mlx5_ib_mr *mtt_mr;
710 			u64 data_iova;
711 			u64 pi_iova;
712 			int meta_ndescs;
713 			int meta_length;
714 			int data_length;
715 		};
716 
717 		/* Used only by User MRs (umem != NULL) */
718 		struct {
719 			unsigned int page_shift;
720 			/* Current access_flags */
721 			int access_flags;
722 
723 			/* For User ODP */
724 			struct mlx5_ib_mr *parent;
725 			struct xarray implicit_children;
726 			union {
727 				struct work_struct work;
728 			} odp_destroy;
729 			struct ib_odp_counters odp_stats;
730 			bool is_odp_implicit;
731 			/* The affilated data direct crossed mr */
732 			struct mlx5_ib_mr *dd_crossed_mr;
733 			struct list_head dd_node;
734 			u8 revoked :1;
735 			/* Indicates previous dmabuf page fault occurred */
736 			u8 dmabuf_faulted:1;
737 			struct mlx5_ib_mkey null_mmkey;
738 		};
739 	};
740 };
741 
is_odp_mr(struct mlx5_ib_mr * mr)742 static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
743 {
744 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
745 	       mr->umem->is_odp;
746 }
747 
is_dmabuf_mr(struct mlx5_ib_mr * mr)748 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
749 {
750 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
751 	       mr->umem->is_dmabuf;
752 }
753 
754 struct mlx5_ib_mw {
755 	struct ib_mw		ibmw;
756 	struct mlx5_ib_mkey	mmkey;
757 };
758 
759 struct mlx5_ib_umr_context {
760 	struct ib_cqe		cqe;
761 	enum ib_wc_status	status;
762 	struct completion	done;
763 };
764 
765 enum {
766 	MLX5_UMR_STATE_UNINIT,
767 	MLX5_UMR_STATE_ACTIVE,
768 	MLX5_UMR_STATE_RECOVER,
769 	MLX5_UMR_STATE_ERR,
770 };
771 
772 struct umr_common {
773 	struct ib_pd	*pd;
774 	struct ib_cq	*cq;
775 	struct ib_qp	*qp;
776 	/* Protects from UMR QP overflow
777 	 */
778 	struct semaphore	sem;
779 	/* Protects from using UMR while the UMR is not active
780 	 */
781 	struct mutex lock;
782 	unsigned int state;
783 	/* Protects from repeat UMR QP creation */
784 	struct mutex init_lock;
785 };
786 
787 #define NUM_MKEYS_PER_PAGE \
788 	((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
789 
790 struct mlx5_mkeys_page {
791 	u32 mkeys[NUM_MKEYS_PER_PAGE];
792 	struct list_head list;
793 };
794 static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
795 
796 struct mlx5_mkeys_queue {
797 	struct list_head pages_list;
798 	u32 num_pages;
799 	unsigned long ci;
800 	spinlock_t lock; /* sync list ops */
801 };
802 
803 struct mlx5_cache_ent {
804 	struct mlx5_mkeys_queue	mkeys_queue;
805 	u32			pending;
806 
807 	char                    name[4];
808 
809 	struct rb_node		node;
810 	struct mlx5r_cache_rb_key rb_key;
811 
812 	u8 is_tmp:1;
813 	u8 disabled:1;
814 	u8 fill_to_high_water:1;
815 	u8 tmp_cleanup_scheduled:1;
816 
817 	/*
818 	 * - limit is the low water mark for stored mkeys, 2* limit is the
819 	 *   upper water mark.
820 	 */
821 	u32 in_use;
822 	u32 limit;
823 
824 	/* Statistics */
825 	u32                     miss;
826 
827 	struct mlx5_ib_dev     *dev;
828 	struct delayed_work	dwork;
829 };
830 
831 struct mlx5r_async_create_mkey {
832 	union {
833 		u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
834 		u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
835 	};
836 	struct mlx5_async_work cb_work;
837 	struct mlx5_cache_ent *ent;
838 	u32 mkey;
839 };
840 
841 struct mlx5_mkey_cache {
842 	struct workqueue_struct *wq;
843 	struct rb_root		rb_root;
844 	struct mutex		rb_lock;
845 	struct dentry		*fs_root;
846 	unsigned long		last_add;
847 };
848 
849 struct mlx5_ib_port_resources {
850 	struct mlx5_ib_gsi_qp *gsi;
851 	struct work_struct pkey_change_work;
852 };
853 
854 struct mlx5_data_direct_resources {
855 	u32 pdn;
856 	u32 mkey;
857 	u32 mkey_ro;
858 	u8 mkey_ro_valid :1;
859 };
860 
861 struct mlx5_ib_resources {
862 	struct ib_cq	*c0;
863 	struct mutex cq_lock;
864 	u32 xrcdn0;
865 	u32 xrcdn1;
866 	struct ib_pd	*p0;
867 	struct ib_srq	*s0;
868 	struct ib_srq	*s1;
869 	struct mutex srq_lock;
870 	struct mlx5_ib_port_resources ports[2];
871 };
872 
873 #define MAX_OPFC_RULES 2
874 
875 struct mlx5_ib_op_fc {
876 	struct mlx5_fc *fc;
877 	struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
878 };
879 
880 struct mlx5_ib_counters {
881 	struct rdma_stat_desc *descs;
882 	size_t *offsets;
883 	u32 num_q_counters;
884 	u32 num_cong_counters;
885 	u32 num_ext_ppcnt_counters;
886 	u32 num_op_counters;
887 	u16 set_id;
888 	struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
889 };
890 
891 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
892 			 struct mlx5_ib_op_fc *opfc,
893 			 enum mlx5_ib_optional_counter_type type);
894 
895 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
896 			     struct mlx5_ib_op_fc *opfc,
897 			     enum mlx5_ib_optional_counter_type type);
898 
899 int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
900 			struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
901 			struct xarray *qpn_opfc_xa, u32 port);
902 
903 void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa);
904 
905 void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
906 			  struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX]);
907 
908 struct mlx5_ib_multiport_info;
909 
910 struct mlx5_ib_multiport {
911 	struct mlx5_ib_multiport_info *mpi;
912 	/* To be held when accessing the multiport info */
913 	spinlock_t mpi_lock;
914 };
915 
916 struct mlx5_roce {
917 	/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
918 	 * netdev pointer
919 	 */
920 	struct notifier_block	nb;
921 	struct netdev_net_notifier nn;
922 	struct notifier_block	mdev_nb;
923 	struct net_device	*tracking_netdev;
924 	atomic_t		tx_port_affinity;
925 	enum ib_port_state last_port_state;
926 	struct mlx5_ib_dev	*dev;
927 	u32			native_port_num;
928 };
929 
930 struct mlx5_ib_port {
931 	struct mlx5_ib_counters cnts;
932 	struct mlx5_ib_multiport mp;
933 	struct mlx5_ib_dbg_cc_params *dbg_cc_params;
934 	struct mlx5_roce roce;
935 	struct mlx5_eswitch_rep		*rep;
936 #ifdef CONFIG_MLX5_MACSEC
937 	struct mlx5_reserved_gids *reserved_gids;
938 #endif
939 };
940 
941 struct mlx5_ib_dbg_param {
942 	int			offset;
943 	struct mlx5_ib_dev	*dev;
944 	struct dentry		*dentry;
945 	u32			port_num;
946 };
947 
948 enum mlx5_ib_dbg_cc_types {
949 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
950 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
951 	MLX5_IB_DBG_CC_RP_TIME_RESET,
952 	MLX5_IB_DBG_CC_RP_BYTE_RESET,
953 	MLX5_IB_DBG_CC_RP_THRESHOLD,
954 	MLX5_IB_DBG_CC_RP_AI_RATE,
955 	MLX5_IB_DBG_CC_RP_MAX_RATE,
956 	MLX5_IB_DBG_CC_RP_HAI_RATE,
957 	MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
958 	MLX5_IB_DBG_CC_RP_MIN_RATE,
959 	MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
960 	MLX5_IB_DBG_CC_RP_DCE_TCP_G,
961 	MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
962 	MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
963 	MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
964 	MLX5_IB_DBG_CC_RP_GD,
965 	MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
966 	MLX5_IB_DBG_CC_NP_CNP_DSCP,
967 	MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
968 	MLX5_IB_DBG_CC_NP_CNP_PRIO,
969 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID,
970 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP,
971 	MLX5_IB_DBG_CC_MAX,
972 };
973 
974 struct mlx5_ib_dbg_cc_params {
975 	struct dentry			*root;
976 	struct mlx5_ib_dbg_param	params[MLX5_IB_DBG_CC_MAX];
977 };
978 
979 enum {
980 	MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
981 };
982 
983 struct mlx5_ib_delay_drop {
984 	struct mlx5_ib_dev     *dev;
985 	struct work_struct	delay_drop_work;
986 	/* serialize setting of delay drop */
987 	struct mutex		lock;
988 	u32			timeout;
989 	bool			activate;
990 	atomic_t		events_cnt;
991 	atomic_t		rqs_cnt;
992 	struct dentry		*dir_debugfs;
993 };
994 
995 enum mlx5_ib_stages {
996 	MLX5_IB_STAGE_INIT,
997 	MLX5_IB_STAGE_FS,
998 	MLX5_IB_STAGE_CAPS,
999 	MLX5_IB_STAGE_NON_DEFAULT_CB,
1000 	MLX5_IB_STAGE_ROCE,
1001 	MLX5_IB_STAGE_QP,
1002 	MLX5_IB_STAGE_SRQ,
1003 	MLX5_IB_STAGE_DEVICE_RESOURCES,
1004 	MLX5_IB_STAGE_ODP,
1005 	MLX5_IB_STAGE_COUNTERS,
1006 	MLX5_IB_STAGE_CONG_DEBUGFS,
1007 	MLX5_IB_STAGE_BFREG,
1008 	MLX5_IB_STAGE_PRE_IB_REG_UMR,
1009 	MLX5_IB_STAGE_WHITELIST_UID,
1010 	MLX5_IB_STAGE_SYS_ERROR_NOTIFIER,
1011 	MLX5_IB_STAGE_IB_REG,
1012 	MLX5_IB_STAGE_DEVICE_NOTIFIER,
1013 	MLX5_IB_STAGE_POST_IB_REG_UMR,
1014 	MLX5_IB_STAGE_DELAY_DROP,
1015 	MLX5_IB_STAGE_RESTRACK,
1016 	MLX5_IB_STAGE_MAX,
1017 };
1018 
1019 struct mlx5_ib_stage {
1020 	int (*init)(struct mlx5_ib_dev *dev);
1021 	void (*cleanup)(struct mlx5_ib_dev *dev);
1022 };
1023 
1024 #define STAGE_CREATE(_stage, _init, _cleanup) \
1025 	.stage[_stage] = {.init = _init, .cleanup = _cleanup}
1026 
1027 struct mlx5_ib_profile {
1028 	struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
1029 };
1030 
1031 struct mlx5_ib_multiport_info {
1032 	struct list_head list;
1033 	struct mlx5_ib_dev *ibdev;
1034 	struct mlx5_core_dev *mdev;
1035 	struct notifier_block mdev_events;
1036 	struct completion unref_comp;
1037 	u64 sys_image_guid;
1038 	u32 mdev_refcnt;
1039 	bool is_master;
1040 	bool unaffiliate;
1041 };
1042 
1043 struct mlx5_ib_flow_action {
1044 	struct ib_flow_action		ib_action;
1045 	union {
1046 		struct {
1047 			u64			    ib_flags;
1048 			struct mlx5_accel_esp_xfrm *ctx;
1049 		} esp_aes_gcm;
1050 		struct {
1051 			struct mlx5_ib_dev *dev;
1052 			u32 sub_type;
1053 			union {
1054 				struct mlx5_modify_hdr *modify_hdr;
1055 				struct mlx5_pkt_reformat *pkt_reformat;
1056 			};
1057 		} flow_action_raw;
1058 	};
1059 };
1060 
1061 struct mlx5_dm {
1062 	struct mlx5_core_dev *dev;
1063 	/* This lock is used to protect the access to the shared
1064 	 * allocation map when concurrent requests by different
1065 	 * processes are handled.
1066 	 */
1067 	spinlock_t lock;
1068 	DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
1069 };
1070 
1071 struct mlx5_read_counters_attr {
1072 	struct mlx5_fc *hw_cntrs_hndl;
1073 	u64 *out;
1074 	u32 flags;
1075 };
1076 
1077 enum mlx5_ib_counters_type {
1078 	MLX5_IB_COUNTERS_FLOW,
1079 };
1080 
1081 struct mlx5_ib_mcounters {
1082 	struct ib_counters ibcntrs;
1083 	enum mlx5_ib_counters_type type;
1084 	/* number of counters supported for this counters type */
1085 	u32 counters_num;
1086 	struct mlx5_fc *hw_cntrs_hndl;
1087 	/* read function for this counters type */
1088 	int (*read_counters)(struct ib_device *ibdev,
1089 			     struct mlx5_read_counters_attr *read_attr);
1090 	/* max index set as part of create_flow */
1091 	u32 cntrs_max_index;
1092 	/* number of counters data entries (<description,index> pair) */
1093 	u32 ncounters;
1094 	/* counters data array for descriptions and indexes */
1095 	struct mlx5_ib_flow_counters_desc *counters_data;
1096 	/* protects access to mcounters internal data */
1097 	struct mutex mcntrs_mutex;
1098 };
1099 
1100 static inline struct mlx5_ib_mcounters *
to_mcounters(struct ib_counters * ibcntrs)1101 to_mcounters(struct ib_counters *ibcntrs)
1102 {
1103 	return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
1104 }
1105 
1106 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
1107 			   bool is_egress,
1108 			   struct mlx5_flow_act *action);
1109 struct mlx5_ib_lb_state {
1110 	/* protect the user_td */
1111 	struct mutex		mutex;
1112 	u32			user_td;
1113 	int			qps;
1114 	bool			enabled;
1115 	bool			force_enable;
1116 };
1117 
1118 struct mlx5_ib_pf_eq {
1119 	struct notifier_block irq_nb;
1120 	struct mlx5_ib_dev *dev;
1121 	struct mlx5_eq *core;
1122 	struct work_struct work;
1123 	spinlock_t lock; /* Pagefaults spinlock */
1124 	struct workqueue_struct *wq;
1125 	mempool_t *pool;
1126 };
1127 
1128 struct mlx5_devx_event_table {
1129 	struct mlx5_nb devx_nb;
1130 	/* serialize updating the event_xa */
1131 	struct mutex event_xa_lock;
1132 	struct xarray event_xa;
1133 };
1134 
1135 struct mlx5_var_table {
1136 	/* serialize updating the bitmap */
1137 	struct mutex bitmap_lock;
1138 	unsigned long *bitmap;
1139 	u64 hw_start_addr;
1140 	u32 stride_size;
1141 	u64 num_var_hw_entries;
1142 };
1143 
1144 struct mlx5_port_caps {
1145 	bool has_smi;
1146 	u8 ext_port_cap;
1147 };
1148 
1149 
1150 struct mlx5_special_mkeys {
1151 	u32 dump_fill_mkey;
1152 	__be32 null_mkey;
1153 	__be32 terminate_scatter_list_mkey;
1154 };
1155 
1156 struct mlx5_macsec {
1157 	struct mutex lock; /* Protects mlx5_macsec internal contexts */
1158 	struct list_head macsec_devices_list;
1159 	struct notifier_block blocking_events_nb;
1160 };
1161 
1162 struct mlx5_ib_dev {
1163 	struct ib_device		ib_dev;
1164 	struct mlx5_core_dev		*mdev;
1165 	struct mlx5_data_direct_dev	*data_direct_dev;
1166 	/* protect accessing data_direct_dev */
1167 	struct mutex			data_direct_lock;
1168 	struct notifier_block		mdev_events;
1169 	struct notifier_block		sys_error_events;
1170 	struct notifier_block           lag_events;
1171 	int				num_ports;
1172 	/* serialize update of capability mask
1173 	 */
1174 	struct mutex			cap_mask_mutex;
1175 	u8				ib_active:1;
1176 	u8				is_rep:1;
1177 	u8				lag_active:1;
1178 	u8				fill_delay;
1179 	struct umr_common		umrc;
1180 	/* sync used page count stats
1181 	 */
1182 	struct mlx5_ib_resources	devr;
1183 
1184 	atomic_t			mkey_var;
1185 	struct mlx5_mkey_cache		cache;
1186 	struct timer_list		delay_timer;
1187 	/* Prevents soft lock on massive reg MRs */
1188 	struct mutex			slow_path_mutex;
1189 	struct ib_odp_caps	odp_caps;
1190 	u64			odp_max_size;
1191 	struct mutex		odp_eq_mutex;
1192 	struct mlx5_ib_pf_eq	odp_pf_eq;
1193 
1194 	struct xarray		odp_mkeys;
1195 
1196 	struct mlx5_ib_flow_db	*flow_db;
1197 	/* protect resources needed as part of reset flow */
1198 	spinlock_t		reset_flow_resource_lock;
1199 	struct list_head	qp_list;
1200 	struct list_head data_direct_mr_list;
1201 	/* Array with num_ports elements */
1202 	struct mlx5_ib_port	*port;
1203 	struct mlx5_sq_bfreg	bfreg;
1204 	struct mlx5_sq_bfreg	fp_bfreg;
1205 	struct mlx5_ib_delay_drop	delay_drop;
1206 	const struct mlx5_ib_profile	*profile;
1207 
1208 	struct mlx5_ib_lb_state		lb;
1209 	u8			umr_fence;
1210 	struct list_head	ib_dev_list;
1211 	u64			sys_image_guid;
1212 	struct mlx5_dm		dm;
1213 	u16			devx_whitelist_uid;
1214 	struct mlx5_srq_table   srq_table;
1215 	struct mlx5_qp_table    qp_table;
1216 	struct mlx5_async_ctx   async_ctx;
1217 	struct mlx5_devx_event_table devx_event_table;
1218 	struct mlx5_var_table var_table;
1219 
1220 	struct xarray sig_mrs;
1221 	struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
1222 	u16 pkey_table_len;
1223 	u8 lag_ports;
1224 	struct mlx5_special_mkeys mkeys;
1225 	struct mlx5_data_direct_resources ddr;
1226 
1227 #ifdef CONFIG_MLX5_MACSEC
1228 	struct mlx5_macsec macsec;
1229 #endif
1230 
1231 	u8 num_plane;
1232 	struct mlx5_ib_dev *smi_dev;
1233 	const char *sub_dev_name;
1234 };
1235 
to_mibcq(struct mlx5_core_cq * mcq)1236 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1237 {
1238 	return container_of(mcq, struct mlx5_ib_cq, mcq);
1239 }
1240 
to_mxrcd(struct ib_xrcd * ibxrcd)1241 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1242 {
1243 	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1244 }
1245 
to_mdev(struct ib_device * ibdev)1246 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1247 {
1248 	return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1249 }
1250 
mr_to_mdev(struct mlx5_ib_mr * mr)1251 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
1252 {
1253 	return to_mdev(mr->ibmr.device);
1254 }
1255 
mlx5_udata_to_mdev(struct ib_udata * udata)1256 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1257 {
1258 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1259 		udata, struct mlx5_ib_ucontext, ibucontext);
1260 
1261 	return to_mdev(context->ibucontext.device);
1262 }
1263 
to_mcq(struct ib_cq * ibcq)1264 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1265 {
1266 	return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1267 }
1268 
to_mibqp(struct mlx5_core_qp * mqp)1269 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1270 {
1271 	return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1272 }
1273 
to_mibrwq(struct mlx5_core_qp * core_qp)1274 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1275 {
1276 	return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1277 }
1278 
to_mpd(struct ib_pd * ibpd)1279 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1280 {
1281 	return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1282 }
1283 
to_msrq(struct ib_srq * ibsrq)1284 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1285 {
1286 	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1287 }
1288 
to_mqp(struct ib_qp * ibqp)1289 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1290 {
1291 	return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1292 }
1293 
to_mrwq(struct ib_wq * ibwq)1294 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1295 {
1296 	return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1297 }
1298 
to_mrwq_ind_table(struct ib_rwq_ind_table * ib_rwq_ind_tbl)1299 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1300 {
1301 	return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1302 }
1303 
to_mibsrq(struct mlx5_core_srq * msrq)1304 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1305 {
1306 	return container_of(msrq, struct mlx5_ib_srq, msrq);
1307 }
1308 
to_mmr(struct ib_mr * ibmr)1309 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1310 {
1311 	return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1312 }
1313 
to_mmw(struct ib_mw * ibmw)1314 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1315 {
1316 	return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1317 }
1318 
1319 static inline struct mlx5_ib_flow_action *
to_mflow_act(struct ib_flow_action * ibact)1320 to_mflow_act(struct ib_flow_action *ibact)
1321 {
1322 	return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1323 }
1324 
1325 static inline struct mlx5_user_mmap_entry *
to_mmmap(struct rdma_user_mmap_entry * rdma_entry)1326 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1327 {
1328 	return container_of(rdma_entry,
1329 		struct mlx5_user_mmap_entry, rdma_entry);
1330 }
1331 
1332 int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev);
1333 int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev);
1334 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
1335 			struct mlx5_db *db);
1336 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1337 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1338 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1339 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1340 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1341 		      struct ib_udata *udata);
1342 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
mlx5_ib_destroy_ah(struct ib_ah * ah,u32 flags)1343 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1344 {
1345 	return 0;
1346 }
1347 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1348 		       struct ib_udata *udata);
1349 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1350 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1351 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1352 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1353 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1354 			  const struct ib_recv_wr **bad_wr);
1355 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1356 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1357 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1358 		      struct ib_udata *udata);
1359 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1360 		      int attr_mask, struct ib_udata *udata);
1361 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1362 		     struct ib_qp_init_attr *qp_init_attr);
1363 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1364 void mlx5_ib_drain_sq(struct ib_qp *qp);
1365 void mlx5_ib_drain_rq(struct ib_qp *qp);
1366 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1367 			size_t buflen, size_t *bc);
1368 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1369 			size_t buflen, size_t *bc);
1370 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1371 			 size_t buflen, size_t *bc);
1372 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1373 		      struct uverbs_attr_bundle *attrs);
1374 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1375 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1376 int mlx5_ib_pre_destroy_cq(struct ib_cq *cq);
1377 void mlx5_ib_post_destroy_cq(struct ib_cq *cq);
1378 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1379 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1380 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1381 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1382 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1383 				  u64 virt_addr, int access_flags,
1384 				  struct ib_dmah *dmah,
1385 				  struct ib_udata *udata);
1386 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
1387 					 u64 length, u64 virt_addr,
1388 					 int fd, int access_flags,
1389 					 struct ib_dmah *dmah,
1390 					 struct uverbs_attr_bundle *attrs);
1391 int mlx5_ib_advise_mr(struct ib_pd *pd,
1392 		      enum ib_uverbs_advise_mr_advice advice,
1393 		      u32 flags,
1394 		      struct ib_sge *sg_list,
1395 		      u32 num_sge,
1396 		      struct uverbs_attr_bundle *attrs);
1397 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1398 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1399 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1400 					     int access_flags);
1401 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
1402 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1403 				    u64 length, u64 virt_addr, int access_flags,
1404 				    struct ib_pd *pd, struct ib_udata *udata);
1405 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1406 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1407 			       u32 max_num_sg);
1408 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1409 					 u32 max_num_sg,
1410 					 u32 max_num_meta_sg);
1411 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1412 		      unsigned int *sg_offset);
1413 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1414 			 int data_sg_nents, unsigned int *data_sg_offset,
1415 			 struct scatterlist *meta_sg, int meta_sg_nents,
1416 			 unsigned int *meta_sg_offset);
1417 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
1418 			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1419 			const struct ib_mad *in, struct ib_mad *out,
1420 			size_t *out_mad_size, u16 *out_mad_pkey_index);
1421 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1422 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1423 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
1424 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1425 					 __be64 *sys_image_guid);
1426 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1427 				 u16 *max_pkeys);
1428 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1429 				 u32 *vendor_id);
1430 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1431 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1432 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
1433 			    u16 *pkey);
1434 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
1435 			    union ib_gid *gid);
1436 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
1437 			    struct ib_port_attr *props);
1438 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1439 		       struct ib_port_attr *props);
1440 int mlx5_ib_query_port_speed(struct ib_device *ibdev, u32 port_num,
1441 			      u64 *speed);
1442 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
1443 			  u64 access_flags);
1444 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1445 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
1446 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
1447 struct mlx5_cache_ent *
1448 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
1449 			      struct mlx5r_cache_rb_key rb_key,
1450 			      bool persistent_entry);
1451 
1452 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1453 				       int access_flags, int access_mode,
1454 				       int ndescs);
1455 
1456 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1457 			    struct ib_mr_status *mr_status);
1458 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1459 				struct ib_wq_init_attr *init_attr,
1460 				struct ib_udata *udata);
1461 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1462 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1463 		      u32 wq_attr_mask, struct ib_udata *udata);
1464 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1465 				 struct ib_rwq_ind_table_init_attr *init_attr,
1466 				 struct ib_udata *udata);
1467 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1468 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1469 				struct ib_dm_mr_attr *attr,
1470 				struct uverbs_attr_bundle *attrs);
1471 void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
1472 			      struct mlx5_data_direct_dev *dev);
1473 void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev);
1474 void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev);
1475 
1476 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1477 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1478 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
1479 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1480 int __init mlx5_ib_odp_init(void);
1481 void mlx5_ib_odp_cleanup(void);
1482 int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
1483 int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1484 			  struct mlx5_ib_mr *mr, int flags);
1485 
1486 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1487 			       enum ib_uverbs_advise_mr_advice advice,
1488 			       u32 flags, struct ib_sge *sg_list, u32 num_sge);
1489 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
1490 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
1491 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
mlx5_ib_odp_init_one(struct mlx5_ib_dev * ibdev)1492 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
mlx5r_odp_create_eq(struct mlx5_ib_dev * dev,struct mlx5_ib_pf_eq * eq)1493 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
1494 				      struct mlx5_ib_pf_eq *eq)
1495 {
1496 	return 0;
1497 }
mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev * ibdev)1498 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
mlx5_ib_odp_init(void)1499 static inline int mlx5_ib_odp_init(void) { return 0; }
mlx5_ib_odp_cleanup(void)1500 static inline void mlx5_ib_odp_cleanup(void)				    {}
mlx5_odp_init_mkey_cache(struct mlx5_ib_dev * dev)1501 static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1502 {
1503 	return 0;
1504 }
mlx5_odp_populate_xlt(void * xlt,size_t idx,size_t nentries,struct mlx5_ib_mr * mr,int flags)1505 static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1506 					struct mlx5_ib_mr *mr, int flags)
1507 {
1508 	return -EOPNOTSUPP;
1509 }
1510 
1511 static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 flags,struct ib_sge * sg_list,u32 num_sge)1512 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1513 			   enum ib_uverbs_advise_mr_advice advice, u32 flags,
1514 			   struct ib_sge *sg_list, u32 num_sge)
1515 {
1516 	return -EOPNOTSUPP;
1517 }
mlx5_ib_init_odp_mr(struct mlx5_ib_mr * mr)1518 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
1519 {
1520 	return -EOPNOTSUPP;
1521 }
mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr * mr)1522 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
1523 {
1524 	return -EOPNOTSUPP;
1525 }
1526 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1527 
1528 extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1529 
1530 /* Needed for rep profile */
1531 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1532 		      const struct mlx5_ib_profile *profile,
1533 		      int stage);
1534 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
1535 		  const struct mlx5_ib_profile *profile);
1536 
1537 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1538 			  u32 port, struct ifla_vf_info *info);
1539 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1540 			      u32 port, int state);
1541 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1542 			 u32 port, struct ifla_vf_stats *stats);
1543 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
1544 			struct ifla_vf_guid *node_guid,
1545 			struct ifla_vf_guid *port_guid);
1546 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
1547 			u64 guid, int type);
1548 
1549 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1550 				   const struct ib_gid_attr *attr);
1551 
1552 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1553 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1554 
1555 /* GSI QP helper functions */
1556 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
1557 		       struct ib_qp_init_attr *attr);
1558 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
1559 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1560 			  int attr_mask);
1561 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1562 			 int qp_attr_mask,
1563 			 struct ib_qp_init_attr *qp_init_attr);
1564 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1565 			  const struct ib_send_wr **bad_wr);
1566 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1567 			  const struct ib_recv_wr **bad_wr);
1568 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1569 
1570 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1571 
1572 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1573 			int bfregn);
1574 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1575 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1576 						   u32 ib_port_num,
1577 						   u32 *native_port_num);
1578 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1579 				  u32 port_num);
1580 
1581 extern const struct uapi_definition mlx5_ib_devx_defs[];
1582 extern const struct uapi_definition mlx5_ib_flow_defs[];
1583 extern const struct uapi_definition mlx5_ib_qos_defs[];
1584 extern const struct uapi_definition mlx5_ib_std_types_defs[];
1585 extern const struct uapi_definition mlx5_ib_create_cq_defs[];
1586 
is_qp1(enum ib_qp_type qp_type)1587 static inline int is_qp1(enum ib_qp_type qp_type)
1588 {
1589 	return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
1590 }
1591 
check_cq_create_flags(u32 flags)1592 static inline u32 check_cq_create_flags(u32 flags)
1593 {
1594 	/*
1595 	 * It returns non-zero value for unsupported CQ
1596 	 * create flags, otherwise it returns zero.
1597 	 */
1598 	return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1599 			  IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1600 }
1601 
verify_assign_uidx(u8 cqe_version,u32 cmd_uidx,u32 * user_index)1602 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1603 				     u32 *user_index)
1604 {
1605 	if (cqe_version) {
1606 		if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1607 		    (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1608 			return -EINVAL;
1609 		*user_index = cmd_uidx;
1610 	} else {
1611 		*user_index = MLX5_IB_DEFAULT_UIDX;
1612 	}
1613 
1614 	return 0;
1615 }
1616 
get_qp_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_qp * ucmd,int inlen,u32 * user_index)1617 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1618 				    struct mlx5_ib_create_qp *ucmd,
1619 				    int inlen,
1620 				    u32 *user_index)
1621 {
1622 	u8 cqe_version = ucontext->cqe_version;
1623 
1624 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1625 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1626 		return 0;
1627 
1628 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1629 		return -EINVAL;
1630 
1631 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1632 }
1633 
get_srq_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_srq * ucmd,int inlen,u32 * user_index)1634 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1635 				     struct mlx5_ib_create_srq *ucmd,
1636 				     int inlen,
1637 				     u32 *user_index)
1638 {
1639 	u8 cqe_version = ucontext->cqe_version;
1640 
1641 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1642 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1643 		return 0;
1644 
1645 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1646 		return -EINVAL;
1647 
1648 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1649 }
1650 
get_uars_per_sys_page(struct mlx5_ib_dev * dev,bool lib_support)1651 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1652 {
1653 	return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1654 				MLX5_UARS_IN_PAGE : 1;
1655 }
1656 
1657 extern void *xlt_emergency_page;
1658 
1659 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1660 			struct mlx5_bfreg_info *bfregi, u32 bfregn,
1661 			bool dyn_bfreg);
1662 
mlx5r_store_odp_mkey(struct mlx5_ib_dev * dev,struct mlx5_ib_mkey * mmkey)1663 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
1664 				       struct mlx5_ib_mkey *mmkey)
1665 {
1666 	refcount_set(&mmkey->usecount, 1);
1667 
1668 	return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
1669 			       mmkey, GFP_KERNEL));
1670 }
1671 
1672 /* deref an mkey that can participate in ODP flow */
mlx5r_deref_odp_mkey(struct mlx5_ib_mkey * mmkey)1673 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
1674 {
1675 	if (refcount_dec_and_test(&mmkey->usecount))
1676 		wake_up(&mmkey->wait);
1677 }
1678 
1679 /* deref an mkey that can participate in ODP flow and wait for relese */
mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey * mmkey)1680 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
1681 {
1682 	mlx5r_deref_odp_mkey(mmkey);
1683 	wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
1684 }
1685 
mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev * dev)1686 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1687 {
1688 	/*
1689 	 * If the driver is in hash mode and the port_select_flow_table_bypass cap
1690 	 * is supported, it means that the driver no longer needs to assign the port
1691 	 * affinity by default. If a user wants to set the port affinity explicitly,
1692 	 * the user has a dedicated API to do that, so there is no need to assign
1693 	 * the port affinity by default.
1694 	 */
1695 	if (dev->lag_active &&
1696 	    mlx5_lag_mode_is_hash(dev->mdev) &&
1697 	    MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
1698 		return 0;
1699 
1700 	if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
1701 		return 0;
1702 
1703 	return dev->lag_active ||
1704 		(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1705 		 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1706 }
1707 
rt_supported(int ts_cap)1708 static inline bool rt_supported(int ts_cap)
1709 {
1710 	return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
1711 	       ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1712 }
1713 
1714 /*
1715  * PCI Peer to Peer is a trainwreck. If no switch is present then things
1716  * sometimes work, depending on the pci_distance_p2p logic for excluding broken
1717  * root complexes. However if a switch is present in the path, then things get
1718  * really ugly depending on how the switch is setup. This table assumes that the
1719  * root complex is strict and is validating that all req/reps are matches
1720  * perfectly - so any scenario where it sees only half the transaction is a
1721  * failure.
1722  *
1723  * CR/RR/DT  ATS RO P2P
1724  * 00X       X   X  OK
1725  * 010       X   X  fails (request is routed to root but root never sees comp)
1726  * 011       0   X  fails (request is routed to root but root never sees comp)
1727  * 011       1   X  OK
1728  * 10X       X   1  OK
1729  * 101       X   0  fails (completion is routed to root but root didn't see req)
1730  * 110       X   0  SLOW
1731  * 111       0   0  SLOW
1732  * 111       1   0  fails (completion is routed to root but root didn't see req)
1733  * 111       1   1  OK
1734  *
1735  * Unfortunately we cannot reliably know if a switch is present or what the
1736  * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
1737  * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
1738  *
1739  * For now assume if the umem is a dma_buf then it is P2P.
1740  */
mlx5_umem_needs_ats(struct mlx5_ib_dev * dev,struct ib_umem * umem,int access_flags)1741 static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
1742 				       struct ib_umem *umem, int access_flags)
1743 {
1744 	if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
1745 		return false;
1746 	return access_flags & IB_ACCESS_RELAXED_ORDERING;
1747 }
1748 
1749 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
1750 		  unsigned int index, const union ib_gid *gid,
1751 		  const struct ib_gid_attr *attr);
1752 
smi_to_native_portnum(struct mlx5_ib_dev * dev,u32 port)1753 static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
1754 {
1755 	return (port - 1) / dev->num_ports + 1;
1756 }
1757 
get_max_log_entity_size_cap(struct mlx5_ib_dev * dev,int access_mode)1758 static inline unsigned int get_max_log_entity_size_cap(struct mlx5_ib_dev *dev,
1759 						       int access_mode)
1760 {
1761 	int max_log_size = 0;
1762 
1763 	if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1764 		max_log_size =
1765 			MLX5_CAP_GEN_2(dev->mdev, max_mkey_log_entity_size_mtt);
1766 	else if (access_mode == MLX5_MKC_ACCESS_MODE_KSM)
1767 		max_log_size = MLX5_CAP_GEN_2(
1768 			dev->mdev, max_mkey_log_entity_size_fixed_buffer);
1769 
1770 	if (!max_log_size ||
1771 	    (max_log_size > 31 &&
1772 	     !MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5)))
1773 		max_log_size = 31;
1774 
1775 	return max_log_size;
1776 }
1777 
get_min_log_entity_size_cap(struct mlx5_ib_dev * dev,int access_mode)1778 static inline unsigned int get_min_log_entity_size_cap(struct mlx5_ib_dev *dev,
1779 						       int access_mode)
1780 {
1781 	int min_log_size = 0;
1782 
1783 	if (access_mode == MLX5_MKC_ACCESS_MODE_KSM &&
1784 	    MLX5_CAP_GEN_2(dev->mdev,
1785 			   min_mkey_log_entity_size_fixed_buffer_valid))
1786 		min_log_size = MLX5_CAP_GEN_2(
1787 			dev->mdev, min_mkey_log_entity_size_fixed_buffer);
1788 	else
1789 		min_log_size =
1790 			MLX5_CAP_GEN_2(dev->mdev, log_min_mkey_entity_size);
1791 
1792 	min_log_size = max(min_log_size, MLX5_ADAPTER_PAGE_SHIFT);
1793 	return min_log_size;
1794 }
1795 
1796 /*
1797  * For mkc users, instead of a page_offset the command has a start_iova which
1798  * specifies both the page_offset and the on-the-wire IOVA
1799  */
1800 static __always_inline unsigned long
mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev * dev,struct ib_umem * umem,u64 iova,int access_mode)1801 mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1802 			     u64 iova, int access_mode)
1803 {
1804 	unsigned int max_log_entity_size_cap, min_log_entity_size_cap;
1805 	unsigned long bitmap;
1806 
1807 	max_log_entity_size_cap = get_max_log_entity_size_cap(dev, access_mode);
1808 	min_log_entity_size_cap = get_min_log_entity_size_cap(dev, access_mode);
1809 
1810 	bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
1811 
1812 	/* In KSM mode HW requires IOVA and mkey's page size to be aligned */
1813 	if (access_mode == MLX5_MKC_ACCESS_MODE_KSM && iova)
1814 		bitmap &= GENMASK_ULL(__ffs64(iova), 0);
1815 
1816 	return ib_umem_find_best_pgsz(umem, bitmap, iova);
1817 }
1818 
1819 static inline unsigned long
mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf * umem_dmabuf,int access_mode)1820 mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf,
1821 				int access_mode)
1822 {
1823 	return mlx5_umem_mkc_find_best_pgsz(to_mdev(umem_dmabuf->umem.ibdev),
1824 					    &umem_dmabuf->umem,
1825 					    umem_dmabuf->umem.iova,
1826 					    access_mode);
1827 }
1828 
1829 #endif /* MLX5_IB_H */
1830