1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: QPLib resource manager (header)
37 */
38
39 #ifndef __BNXT_QPLIB_RES_H__
40 #define __BNXT_QPLIB_RES_H__
41
42 #include "bnxt_ulp.h"
43
44 extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
45
46 #define CHIP_NUM_57508 0x1750
47 #define CHIP_NUM_57504 0x1751
48 #define CHIP_NUM_57502 0x1752
49 #define CHIP_NUM_58818 0xd818
50 #define CHIP_NUM_57608 0x1760
51
52 #define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
53 #define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
54 #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
55 #define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
56 #define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
57 #define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
58
59 #define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
60 #define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
61 #define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
62
63 struct bnxt_qplib_drv_modes {
64 u8 wqe_mode;
65 bool db_push;
66 bool dbr_pacing;
67 u32 toggle_bits;
68 u8 roce_mirror;
69 };
70
71 enum bnxt_re_toggle_modes {
72 BNXT_QPLIB_CQ_TOGGLE_BIT = 0x1,
73 BNXT_QPLIB_SRQ_TOGGLE_BIT = 0x2,
74 };
75
76 struct bnxt_qplib_chip_ctx {
77 u16 chip_num;
78 u8 chip_rev;
79 u8 chip_metal;
80 u16 hw_stats_size;
81 u16 hwrm_cmd_max_timeout;
82 struct bnxt_qplib_drv_modes modes;
83 u64 hwrm_intf_ver;
84 u32 dbr_stat_db_fifo;
85 };
86
87 struct bnxt_qplib_db_pacing_data {
88 u32 do_pacing;
89 u32 pacing_th;
90 u32 alarm_th;
91 u32 fifo_max_depth;
92 u32 fifo_room_mask;
93 u32 fifo_room_shift;
94 u32 grc_reg_offset;
95 u32 dev_err_state;
96 };
97
98 #define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
99 #define BNXT_QPLIB_DBR_VF_DB_OFFSET 0x4000
100
101 #define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
102 #define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
103 #define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
104 #define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
105
106 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
107
108 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
109 ((HWQ_CMP(hwq->prod, hwq)\
110 - HWQ_CMP(hwq->cons, hwq))\
111 & (hwq->max_elements - 1)))
112 enum bnxt_qplib_hwq_type {
113 HWQ_TYPE_CTX,
114 HWQ_TYPE_QUEUE,
115 HWQ_TYPE_L2_CMPL,
116 HWQ_TYPE_MR
117 };
118
119 #define MAX_PBL_LVL_0_PGS 1
120 #define MAX_PBL_LVL_1_PGS 512
121 #define MAX_PBL_LVL_1_PGS_SHIFT 9
122 #define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
123 #define MAX_PBL_LVL_2_PGS (256 * 512)
124 #define MAX_PDL_LVL_SHIFT 9
125
126 enum bnxt_qplib_pbl_lvl {
127 PBL_LVL_0,
128 PBL_LVL_1,
129 PBL_LVL_2,
130 PBL_LVL_MAX
131 };
132
133 #define ROCE_PG_SIZE_4K (4 * 1024)
134 #define ROCE_PG_SIZE_8K (8 * 1024)
135 #define ROCE_PG_SIZE_64K (64 * 1024)
136 #define ROCE_PG_SIZE_2M (2 * 1024 * 1024)
137 #define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
138 #define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
139
140 enum bnxt_qplib_hwrm_pg_size {
141 BNXT_QPLIB_HWRM_PG_SIZE_4K = 0,
142 BNXT_QPLIB_HWRM_PG_SIZE_8K = 1,
143 BNXT_QPLIB_HWRM_PG_SIZE_64K = 2,
144 BNXT_QPLIB_HWRM_PG_SIZE_2M = 3,
145 BNXT_QPLIB_HWRM_PG_SIZE_8M = 4,
146 BNXT_QPLIB_HWRM_PG_SIZE_1G = 5,
147 };
148
149 struct bnxt_qplib_reg_desc {
150 u8 bar_id;
151 resource_size_t bar_base;
152 unsigned long offset;
153 void __iomem *bar_reg;
154 size_t len;
155 };
156
157 struct bnxt_qplib_pbl {
158 u32 pg_count;
159 u32 pg_size;
160 void **pg_arr;
161 dma_addr_t *pg_map_arr;
162 };
163
164 struct bnxt_qplib_sg_info {
165 struct ib_umem *umem;
166 u32 npages;
167 u32 pgshft;
168 u32 pgsize;
169 bool nopte;
170 };
171
172 struct bnxt_qplib_hwq_attr {
173 struct bnxt_qplib_res *res;
174 struct bnxt_qplib_sg_info *sginfo;
175 enum bnxt_qplib_hwq_type type;
176 u32 depth;
177 u32 stride;
178 u32 aux_stride;
179 u32 aux_depth;
180 };
181
182 struct bnxt_qplib_hwq {
183 struct pci_dev *pdev;
184 /* lock to protect qplib_hwq */
185 spinlock_t lock;
186 struct bnxt_qplib_pbl pbl[PBL_LVL_MAX + 1];
187 enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
188 /* ptr for easy access to the PBL entries */
189 void **pbl_ptr;
190 /* ptr for easy access to the dma_addr */
191 dma_addr_t *pbl_dma_ptr;
192 u32 max_elements;
193 u32 depth;
194 u16 element_size; /* Size of each entry */
195 u16 qe_ppg; /* queue entry per page */
196
197 u32 prod; /* raw */
198 u32 cons; /* raw */
199 u8 cp_bit;
200 u8 is_user;
201 u64 *pad_pg;
202 u32 pad_stride;
203 u32 pad_pgofft;
204 };
205
206 struct bnxt_qplib_db_info {
207 void __iomem *db;
208 void __iomem *priv_db;
209 struct bnxt_qplib_hwq *hwq;
210 u32 xid;
211 u32 max_slot;
212 u32 flags;
213 u8 toggle;
214 };
215
216 enum bnxt_qplib_db_info_flags_mask {
217 BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
218 BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
219 BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL,
220 BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL,
221 };
222
223 enum bnxt_qplib_db_epoch_flag_shift {
224 BNXT_QPLIB_DB_EPOCH_CONS_SHIFT = BNXT_QPLIB_DBR_EPOCH_SHIFT,
225 BNXT_QPLIB_DB_EPOCH_PROD_SHIFT = (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1),
226 };
227
228 /* Tables */
229 struct bnxt_qplib_pd_tbl {
230 unsigned long *tbl;
231 u32 max;
232 };
233
234 struct bnxt_qplib_sgid_tbl {
235 struct bnxt_qplib_gid_info *tbl;
236 u16 *hw_id;
237 u16 max;
238 u16 active;
239 void *ctx;
240 u8 *vlan;
241 };
242
243 enum {
244 BNXT_QPLIB_DPI_TYPE_KERNEL = 0,
245 BNXT_QPLIB_DPI_TYPE_UC = 1,
246 BNXT_QPLIB_DPI_TYPE_WC = 2
247 };
248
249 struct bnxt_qplib_dpi {
250 u32 dpi;
251 u32 bit;
252 void __iomem *dbr;
253 u64 umdbr;
254 u8 type;
255 };
256
257 struct bnxt_qplib_dpi_tbl {
258 void **app_tbl;
259 unsigned long *tbl;
260 u16 max;
261 struct bnxt_qplib_reg_desc ucreg; /* Hold entire DB bar. */
262 struct bnxt_qplib_reg_desc wcreg;
263 void __iomem *priv_db;
264 };
265
266 struct bnxt_qplib_stats {
267 dma_addr_t dma_map;
268 void *dma;
269 u32 size;
270 u32 fw_id;
271 };
272
273 struct bnxt_qplib_vf_res {
274 u32 max_qp_per_vf;
275 u32 max_mrw_per_vf;
276 u32 max_srq_per_vf;
277 u32 max_cq_per_vf;
278 u32 max_gid_per_vf;
279 };
280
281 #define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448
282 #define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64
283 #define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
284 #define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
285
286 #define MAX_TQM_ALLOC_REQ 48
287 #define MAX_TQM_ALLOC_BLK_SIZE 8
288 struct bnxt_qplib_tqm_ctx {
289 struct bnxt_qplib_hwq pde;
290 u8 pde_level; /* Original level */
291 struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ];
292 u8 qcount[MAX_TQM_ALLOC_REQ];
293 };
294
295 struct bnxt_qplib_ctx {
296 u32 qpc_count;
297 struct bnxt_qplib_hwq qpc_tbl;
298 u32 mrw_count;
299 struct bnxt_qplib_hwq mrw_tbl;
300 u32 srqc_count;
301 struct bnxt_qplib_hwq srqc_tbl;
302 u32 cq_count;
303 struct bnxt_qplib_hwq cq_tbl;
304 struct bnxt_qplib_hwq tim_tbl;
305 struct bnxt_qplib_tqm_ctx tqm_ctx;
306 struct bnxt_qplib_stats stats;
307 struct bnxt_qplib_stats stats3;
308 struct bnxt_qplib_vf_res vf_res;
309 };
310
311 struct bnxt_qplib_res {
312 struct pci_dev *pdev;
313 struct bnxt_qplib_chip_ctx *cctx;
314 struct bnxt_qplib_dev_attr *dattr;
315 struct net_device *netdev;
316 struct bnxt_en_dev *en_dev;
317 struct bnxt_qplib_rcfw *rcfw;
318 struct bnxt_qplib_pd_tbl pd_tbl;
319 /* To protect the pd table bit map */
320 struct mutex pd_tbl_lock;
321 struct bnxt_qplib_sgid_tbl sgid_tbl;
322 struct bnxt_qplib_dpi_tbl dpi_tbl;
323 /* To protect the dpi table bit map */
324 struct mutex dpi_tbl_lock;
325 bool prio;
326 bool is_vf;
327 struct bnxt_qplib_db_pacing_data *pacing_data;
328 };
329
bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx * cctx)330 static inline bool bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx *cctx)
331 {
332 return (cctx->chip_num == CHIP_NUM_58818 ||
333 cctx->chip_num == CHIP_NUM_57608);
334 }
335
bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx * cctx)336 static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
337 {
338 return (cctx->chip_num == CHIP_NUM_57508 ||
339 cctx->chip_num == CHIP_NUM_57504 ||
340 cctx->chip_num == CHIP_NUM_57502);
341 }
342
bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx * cctx)343 static inline bool bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
344 {
345 return bnxt_qplib_is_chip_gen_p5(cctx) || bnxt_qplib_is_chip_gen_p7(cctx);
346 }
347
bnxt_qplib_get_hwq_type(struct bnxt_qplib_res * res)348 static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
349 {
350 return bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
351 HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
352 }
353
bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx * cctx)354 static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
355 {
356 return bnxt_qplib_is_chip_gen_p5_p7(cctx) ?
357 RING_ALLOC_REQ_RING_TYPE_NQ :
358 RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
359 }
360
bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq * hwq)361 static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq)
362 {
363 u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
364 struct bnxt_qplib_pbl *pbl;
365
366 pbl = &hwq->pbl[PBL_LVL_0];
367 switch (pbl->pg_size) {
368 case ROCE_PG_SIZE_4K:
369 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
370 break;
371 case ROCE_PG_SIZE_8K:
372 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
373 break;
374 case ROCE_PG_SIZE_64K:
375 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
376 break;
377 case ROCE_PG_SIZE_2M:
378 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
379 break;
380 case ROCE_PG_SIZE_8M:
381 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
382 break;
383 case ROCE_PG_SIZE_1G:
384 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
385 break;
386 default:
387 break;
388 }
389
390 return pg_size;
391 }
392
bnxt_qplib_get_qe(struct bnxt_qplib_hwq * hwq,u32 indx,u64 * pg)393 static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
394 u32 indx, u64 *pg)
395 {
396 u32 pg_num, pg_idx;
397
398 pg_num = (indx / hwq->qe_ppg);
399 pg_idx = (indx % hwq->qe_ppg);
400 if (pg)
401 *pg = (u64)&hwq->pbl_ptr[pg_num];
402 return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
403 }
404
bnxt_qplib_get_prod_qe(struct bnxt_qplib_hwq * hwq,u32 idx)405 static inline void *bnxt_qplib_get_prod_qe(struct bnxt_qplib_hwq *hwq, u32 idx)
406 {
407 idx += hwq->prod;
408 if (idx >= hwq->depth)
409 idx -= hwq->depth;
410 return bnxt_qplib_get_qe(hwq, idx, NULL);
411 }
412
413 #define to_bnxt_qplib(ptr, type, member) \
414 container_of(ptr, type, member)
415
416 struct bnxt_qplib_pd;
417 struct bnxt_qplib_dev_attr;
418
419 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
420 struct bnxt_qplib_hwq *hwq);
421 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
422 struct bnxt_qplib_hwq_attr *hwq_attr);
423 int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
424 struct bnxt_qplib_pd *pd);
425 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
426 struct bnxt_qplib_pd_tbl *pd_tbl,
427 struct bnxt_qplib_pd *pd);
428 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
429 struct bnxt_qplib_dpi *dpi,
430 void *app, u8 type);
431 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
432 struct bnxt_qplib_dpi *dpi);
433 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
434 int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
435 void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
436 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
437 void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
438 struct bnxt_qplib_ctx *ctx);
439 int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
440 struct bnxt_qplib_ctx *ctx);
441 int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
442 void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
443
444 int bnxt_qplib_determine_atomics(struct pci_dev *dev);
445 int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
446 struct bnxt_qplib_chip_ctx *cctx,
447 struct bnxt_qplib_stats *stats);
448 void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
449 struct bnxt_qplib_stats *stats);
450
bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info * dbinfo,struct bnxt_qplib_hwq * hwq,u32 cnt)451 static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
452 struct bnxt_qplib_hwq *hwq, u32 cnt)
453 {
454 /* move prod and update toggle/epoch if wrap around */
455 hwq->prod += cnt;
456 if (hwq->prod >= hwq->depth) {
457 hwq->prod %= hwq->depth;
458 dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT;
459 }
460 }
461
bnxt_qplib_hwq_incr_cons(u32 max_elements,u32 * cons,u32 cnt,u32 * dbinfo_flags)462 static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt,
463 u32 *dbinfo_flags)
464 {
465 /* move cons and update toggle/epoch if wrap around */
466 *cons += cnt;
467 if (*cons >= max_elements) {
468 *cons %= max_elements;
469 *dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT;
470 }
471 }
472
bnxt_qplib_ring_db32(struct bnxt_qplib_db_info * info,bool arm)473 static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
474 bool arm)
475 {
476 u32 key = 0;
477
478 key |= info->hwq->cons | (CMPL_DOORBELL_IDX_VALID |
479 (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
480 if (!arm)
481 key |= CMPL_DOORBELL_MASK;
482 writel(key, info->db);
483 }
484
485 #define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \
486 (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
487 (type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \
488 (((u32)(toggle)) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
489
bnxt_qplib_ring_db(struct bnxt_qplib_db_info * info,u32 type)490 static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
491 u32 type)
492 {
493 u64 key = 0;
494 u32 indx;
495 u8 toggle = 0;
496
497 if (type == DBC_DBC_TYPE_CQ_ARMALL ||
498 type == DBC_DBC_TYPE_CQ_ARMSE)
499 toggle = info->toggle;
500
501 indx = (info->hwq->cons & DBC_DBC_INDEX_MASK) |
502 ((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) <<
503 BNXT_QPLIB_DB_EPOCH_CONS_SHIFT);
504
505 key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle);
506 writeq(key, info->db);
507 }
508
bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info * info,u32 type)509 static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
510 u32 type)
511 {
512 u64 key = 0;
513 u32 indx;
514
515 indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) |
516 ((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) <<
517 BNXT_QPLIB_DB_EPOCH_PROD_SHIFT));
518 key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0);
519 writeq(key, info->db);
520 }
521
bnxt_qplib_armen_db(struct bnxt_qplib_db_info * info,u32 type)522 static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
523 u32 type)
524 {
525 u64 key = 0;
526 u8 toggle = 0;
527
528 if (type == DBC_DBC_TYPE_CQ_ARMENA || type == DBC_DBC_TYPE_SRQ_ARMENA)
529 toggle = info->toggle;
530 /* Index always at 0 */
531 key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle);
532 writeq(key, info->priv_db);
533 }
534
bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info * info,u32 th)535 static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info,
536 u32 th)
537 {
538 u64 key = 0;
539
540 key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, th, info->toggle);
541 writeq(key, info->priv_db);
542 }
543
bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info * info,struct bnxt_qplib_chip_ctx * cctx,bool arm)544 static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
545 struct bnxt_qplib_chip_ctx *cctx,
546 bool arm)
547 {
548 u32 type;
549
550 type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
551 if (bnxt_qplib_is_chip_gen_p5_p7(cctx))
552 bnxt_qplib_ring_db(info, type);
553 else
554 bnxt_qplib_ring_db32(info, arm);
555 }
556
_is_ext_stats_supported(u16 dev_cap_flags)557 static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
558 {
559 return dev_cap_flags &
560 CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
561 }
562
bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx * ctx,u16 flags,bool virtfn)563 static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
564 u16 flags, bool virtfn)
565 {
566 /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
567 return (_is_ext_stats_supported(flags) &&
568 ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
569 }
570
_is_hw_retx_supported(u16 dev_cap_flags)571 static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
572 {
573 return dev_cap_flags &
574 (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
575 CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
576 }
577
578 #define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
579
_is_host_msn_table(u16 dev_cap_ext_flags2)580 static inline bool _is_host_msn_table(u16 dev_cap_ext_flags2)
581 {
582 return (dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK) ==
583 CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE;
584 }
585
bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx * cctx)586 static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
587 {
588 return cctx->modes.dbr_pacing;
589 }
590
bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx * cctx)591 static inline u8 bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx *cctx)
592 {
593 return cctx->modes.roce_mirror;
594 }
595
_is_alloc_mr_unified(u16 dev_cap_flags)596 static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
597 {
598 return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
599 }
600
_is_relaxed_ordering_supported(u16 dev_cap_ext_flags2)601 static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2)
602 {
603 return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED;
604 }
605
_is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)606 static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
607 {
608 return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
609 }
610
_is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)611 static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
612 {
613 return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
614 }
615
_is_cq_coalescing_supported(u16 dev_cap_ext_flags2)616 static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
617 {
618 return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
619 }
620
_is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)621 static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
622 {
623 return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
624 }
625
_is_modify_qp_rate_limit_supported(u16 dev_cap_ext_flags2)626 static inline bool _is_modify_qp_rate_limit_supported(u16 dev_cap_ext_flags2)
627 {
628 return dev_cap_ext_flags2 &
629 CREQ_QUERY_FUNC_RESP_SB_MODIFY_QP_RATE_LIMIT_SUPPORTED;
630 }
631
632 #endif /* __BNXT_QPLIB_RES_H__ */
633