xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/bnxt/hsi.h>
9 
10 #include "bnge.h"
11 #include "bnge_hwrm.h"
12 #include "bnge_hwrm_lib.h"
13 #include "bnge_rmem.h"
14 #include "bnge_resc.h"
15 
bnge_hwrm_ver_get(struct bnge_dev * bd)16 int bnge_hwrm_ver_get(struct bnge_dev *bd)
17 {
18 	u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code;
19 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
20 	struct hwrm_ver_get_output *resp;
21 	struct hwrm_ver_get_input *req;
22 	int rc;
23 
24 	rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET);
25 	if (rc)
26 		return rc;
27 
28 	bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT);
29 	bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
30 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
31 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
32 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
33 
34 	resp = bnge_hwrm_req_hold(bd, req);
35 	rc = bnge_hwrm_req_send(bd, req);
36 	if (rc)
37 		goto hwrm_ver_get_exit;
38 
39 	memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
40 
41 	hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
42 			 resp->hwrm_intf_min_8b << 8 |
43 			 resp->hwrm_intf_upd_8b;
44 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
45 			HWRM_VERSION_UPDATE;
46 
47 	if (hwrm_spec_code > hwrm_ver)
48 		snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
49 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
50 			 HWRM_VERSION_UPDATE);
51 	else
52 		snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
53 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
54 			 resp->hwrm_intf_upd_8b);
55 
56 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
57 	fw_min = le16_to_cpu(resp->hwrm_fw_minor);
58 	fw_bld = le16_to_cpu(resp->hwrm_fw_build);
59 	fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
60 
61 	bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
62 	snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d",
63 		 fw_maj, fw_min, fw_bld, fw_rsv);
64 
65 	if (strlen(resp->active_pkg_name)) {
66 		int fw_ver_len = strlen(bd->fw_ver_str);
67 
68 		snprintf(bd->fw_ver_str + fw_ver_len,
69 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
70 			 resp->active_pkg_name);
71 		bd->fw_cap |= BNGE_FW_CAP_PKG_VER;
72 	}
73 
74 	bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
75 	if (!bd->hwrm_cmd_timeout)
76 		bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT;
77 	bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
78 	if (!bd->hwrm_cmd_max_timeout)
79 		bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT;
80 	else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT)
81 		dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n",
82 			 bd->hwrm_cmd_max_timeout / 1000);
83 
84 	bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
85 	bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
86 
87 	if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
88 		bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
89 
90 	bd->chip_num = le16_to_cpu(resp->chip_num);
91 	bd->chip_rev = resp->chip_rev;
92 
93 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
94 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
95 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
96 		bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD;
97 
98 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
99 		bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL;
100 
101 	if (dev_caps_cfg &
102 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
103 		bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW;
104 
105 hwrm_ver_get_exit:
106 	bnge_hwrm_req_drop(bd, req);
107 	return rc;
108 }
109 
110 int
bnge_hwrm_nvm_dev_info(struct bnge_dev * bd,struct hwrm_nvm_get_dev_info_output * nvm_info)111 bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
112 		       struct hwrm_nvm_get_dev_info_output *nvm_info)
113 {
114 	struct hwrm_nvm_get_dev_info_output *resp;
115 	struct hwrm_nvm_get_dev_info_input *req;
116 	int rc;
117 
118 	rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO);
119 	if (rc)
120 		return rc;
121 
122 	resp = bnge_hwrm_req_hold(bd, req);
123 	rc = bnge_hwrm_req_send(bd, req);
124 	if (!rc)
125 		memcpy(nvm_info, resp, sizeof(*resp));
126 	bnge_hwrm_req_drop(bd, req);
127 	return rc;
128 }
129 
bnge_hwrm_func_reset(struct bnge_dev * bd)130 int bnge_hwrm_func_reset(struct bnge_dev *bd)
131 {
132 	struct hwrm_func_reset_input *req;
133 	int rc;
134 
135 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET);
136 	if (rc)
137 		return rc;
138 
139 	req->enables = 0;
140 	bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT);
141 	return bnge_hwrm_req_send(bd, req);
142 }
143 
bnge_hwrm_fw_set_time(struct bnge_dev * bd)144 int bnge_hwrm_fw_set_time(struct bnge_dev *bd)
145 {
146 	struct hwrm_fw_set_time_input *req;
147 	struct tm tm;
148 	int rc;
149 
150 	time64_to_tm(ktime_get_real_seconds(), 0, &tm);
151 
152 	rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME);
153 	if (rc)
154 		return rc;
155 
156 	req->year = cpu_to_le16(1900 + tm.tm_year);
157 	req->month = 1 + tm.tm_mon;
158 	req->day = tm.tm_mday;
159 	req->hour = tm.tm_hour;
160 	req->minute = tm.tm_min;
161 	req->second = tm.tm_sec;
162 	return bnge_hwrm_req_send(bd, req);
163 }
164 
bnge_hwrm_func_drv_rgtr(struct bnge_dev * bd)165 int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd)
166 {
167 	struct hwrm_func_drv_rgtr_output *resp;
168 	struct hwrm_func_drv_rgtr_input *req;
169 	u32 flags;
170 	int rc;
171 
172 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR);
173 	if (rc)
174 		return rc;
175 
176 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
177 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
178 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
179 
180 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
181 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
182 
183 	req->flags = cpu_to_le32(flags);
184 	req->ver_maj_8b = DRV_VER_MAJ;
185 	req->ver_min_8b = DRV_VER_MIN;
186 	req->ver_upd_8b = DRV_VER_UPD;
187 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
188 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
189 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
190 
191 	resp = bnge_hwrm_req_hold(bd, req);
192 	rc = bnge_hwrm_req_send(bd, req);
193 	if (!rc) {
194 		set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state);
195 		if (resp->flags &
196 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
197 			bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE;
198 	}
199 	bnge_hwrm_req_drop(bd, req);
200 	return rc;
201 }
202 
bnge_hwrm_func_drv_unrgtr(struct bnge_dev * bd)203 int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd)
204 {
205 	struct hwrm_func_drv_unrgtr_input *req;
206 	int rc;
207 
208 	if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state))
209 		return 0;
210 
211 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR);
212 	if (rc)
213 		return rc;
214 	return bnge_hwrm_req_send(bd, req);
215 }
216 
bnge_init_ctx_initializer(struct bnge_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)217 static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm,
218 				      u8 init_val, u8 init_offset,
219 				      bool init_mask_set)
220 {
221 	ctxm->init_value = init_val;
222 	ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET;
223 	if (init_mask_set)
224 		ctxm->init_offset = init_offset * 4;
225 	else
226 		ctxm->init_value = 0;
227 }
228 
bnge_alloc_all_ctx_pg_info(struct bnge_dev * bd,int ctx_max)229 static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max)
230 {
231 	struct bnge_ctx_mem_info *ctx = bd->ctx;
232 	u16 type;
233 
234 	for (type = 0; type < ctx_max; type++) {
235 		struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
236 		int n = 1;
237 
238 		if (!ctxm->max_entries)
239 			continue;
240 
241 		if (ctxm->instance_bmap)
242 			n = hweight32(ctxm->instance_bmap);
243 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
244 		if (!ctxm->pg_info)
245 			return -ENOMEM;
246 	}
247 
248 	return 0;
249 }
250 
251 #define BNGE_CTX_INIT_VALID(flags)	\
252 	(!!((flags) &			\
253 	    FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
254 
bnge_hwrm_func_backing_store_qcaps(struct bnge_dev * bd)255 int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd)
256 {
257 	struct hwrm_func_backing_store_qcaps_v2_output *resp;
258 	struct hwrm_func_backing_store_qcaps_v2_input *req;
259 	struct bnge_ctx_mem_info *ctx;
260 	u16 type;
261 	int rc;
262 
263 	if (bd->ctx)
264 		return 0;
265 
266 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
267 	if (rc)
268 		return rc;
269 
270 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
271 	if (!ctx)
272 		return -ENOMEM;
273 	bd->ctx = ctx;
274 
275 	resp = bnge_hwrm_req_hold(bd, req);
276 
277 	for (type = 0; type < BNGE_CTX_V2_MAX; ) {
278 		struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
279 		u8 init_val, init_off, i;
280 		__le32 *p;
281 		u32 flags;
282 
283 		req->type = cpu_to_le16(type);
284 		rc = bnge_hwrm_req_send(bd, req);
285 		if (rc)
286 			goto ctx_done;
287 		flags = le32_to_cpu(resp->flags);
288 		type = le16_to_cpu(resp->next_valid_type);
289 		if (!(flags &
290 		      FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
291 			continue;
292 
293 		ctxm->type = le16_to_cpu(resp->type);
294 		ctxm->entry_size = le16_to_cpu(resp->entry_size);
295 		ctxm->flags = flags;
296 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
297 		ctxm->entry_multiple = resp->entry_multiple;
298 		ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
299 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
300 		init_val = resp->ctx_init_value;
301 		init_off = resp->ctx_init_offset;
302 		bnge_init_ctx_initializer(ctxm, init_val, init_off,
303 					  BNGE_CTX_INIT_VALID(flags));
304 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
305 					      BNGE_MAX_SPLIT_ENTRY);
306 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
307 		     i++, p++)
308 			ctxm->split[i] = le32_to_cpu(*p);
309 	}
310 	rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX);
311 
312 ctx_done:
313 	bnge_hwrm_req_drop(bd, req);
314 	return rc;
315 }
316 
bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)317 static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr,
318 				  __le64 *pg_dir)
319 {
320 	if (!rmem->nr_pages)
321 		return;
322 
323 	BNGE_SET_CTX_PAGE_ATTR(*pg_attr);
324 	if (rmem->depth >= 1) {
325 		if (rmem->depth == 2)
326 			*pg_attr |= 2;
327 		else
328 			*pg_attr |= 1;
329 		*pg_dir = cpu_to_le64(rmem->dma_pg_tbl);
330 	} else {
331 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
332 	}
333 }
334 
bnge_hwrm_func_backing_store(struct bnge_dev * bd,struct bnge_ctx_mem_type * ctxm,bool last)335 int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
336 				 struct bnge_ctx_mem_type *ctxm,
337 				 bool last)
338 {
339 	struct hwrm_func_backing_store_cfg_v2_input *req;
340 	u32 instance_bmap = ctxm->instance_bmap;
341 	int i, j, rc = 0, n = 1;
342 	__le32 *p;
343 
344 	if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
345 		return 0;
346 
347 	if (instance_bmap)
348 		n = hweight32(ctxm->instance_bmap);
349 	else
350 		instance_bmap = 1;
351 
352 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
353 	if (rc)
354 		return rc;
355 	bnge_hwrm_req_hold(bd, req);
356 	req->type = cpu_to_le16(ctxm->type);
357 	req->entry_size = cpu_to_le16(ctxm->entry_size);
358 	req->subtype_valid_cnt = ctxm->split_entry_cnt;
359 	for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
360 		p[i] = cpu_to_le32(ctxm->split[i]);
361 	for (i = 0, j = 0; j < n && !rc; i++) {
362 		struct bnge_ctx_pg_info *ctx_pg;
363 
364 		if (!(instance_bmap & (1 << i)))
365 			continue;
366 		req->instance = cpu_to_le16(i);
367 		ctx_pg = &ctxm->pg_info[j++];
368 		if (!ctx_pg->entries)
369 			continue;
370 		req->num_entries = cpu_to_le32(ctx_pg->entries);
371 		bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem,
372 				      &req->page_size_pbl_level,
373 				      &req->page_dir);
374 		if (last && j == n)
375 			req->flags =
376 				cpu_to_le32(BNGE_BS_CFG_ALL_DONE);
377 		rc = bnge_hwrm_req_send(bd, req);
378 	}
379 	bnge_hwrm_req_drop(bd, req);
380 
381 	return rc;
382 }
383 
bnge_hwrm_get_rings(struct bnge_dev * bd)384 static int bnge_hwrm_get_rings(struct bnge_dev *bd)
385 {
386 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
387 	struct hwrm_func_qcfg_output *resp;
388 	struct hwrm_func_qcfg_input *req;
389 	u16 cp, stats;
390 	u16 rx, tx;
391 	int rc;
392 
393 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
394 	if (rc)
395 		return rc;
396 
397 	req->fid = cpu_to_le16(0xffff);
398 	resp = bnge_hwrm_req_hold(bd, req);
399 	rc = bnge_hwrm_req_send(bd, req);
400 	if (rc) {
401 		bnge_hwrm_req_drop(bd, req);
402 		return rc;
403 	}
404 
405 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
406 	hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
407 	hw_resc->resv_hw_ring_grps =
408 		le32_to_cpu(resp->alloc_hw_ring_grps);
409 	hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
410 	hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
411 	cp = le16_to_cpu(resp->alloc_cmpl_rings);
412 	stats = le16_to_cpu(resp->alloc_stat_ctx);
413 	hw_resc->resv_irqs = cp;
414 	rx = hw_resc->resv_rx_rings;
415 	tx = hw_resc->resv_tx_rings;
416 	if (bnge_is_agg_reqd(bd))
417 		rx >>= 1;
418 	if (cp < (rx + tx)) {
419 		rc = bnge_fix_rings_count(&rx, &tx, cp, false);
420 		if (rc)
421 			goto get_rings_exit;
422 		if (bnge_is_agg_reqd(bd))
423 			rx <<= 1;
424 		hw_resc->resv_rx_rings = rx;
425 		hw_resc->resv_tx_rings = tx;
426 	}
427 	hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
428 	hw_resc->resv_hw_ring_grps = rx;
429 	hw_resc->resv_cp_rings = cp;
430 	hw_resc->resv_stat_ctxs = stats;
431 
432 get_rings_exit:
433 	bnge_hwrm_req_drop(bd, req);
434 	return rc;
435 }
436 
437 static struct hwrm_func_cfg_input *
__bnge_hwrm_reserve_pf_rings(struct bnge_dev * bd,struct bnge_hw_rings * hwr)438 __bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
439 {
440 	struct hwrm_func_cfg_input *req;
441 	u32 enables = 0;
442 
443 	if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
444 		return NULL;
445 
446 	req->fid = cpu_to_le16(0xffff);
447 	enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
448 	req->num_tx_rings = cpu_to_le16(hwr->tx);
449 
450 	enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
451 	enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
452 	enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
453 	enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
454 	enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
455 	enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
456 
457 	req->num_rx_rings = cpu_to_le16(hwr->rx);
458 	req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
459 	req->num_cmpl_rings = cpu_to_le16(hwr->cmpl);
460 	req->num_msix = cpu_to_le16(hwr->nq);
461 	req->num_stat_ctxs = cpu_to_le16(hwr->stat);
462 	req->num_vnics = cpu_to_le16(hwr->vnic);
463 	req->enables = cpu_to_le32(enables);
464 
465 	return req;
466 }
467 
468 static int
bnge_hwrm_reserve_pf_rings(struct bnge_dev * bd,struct bnge_hw_rings * hwr)469 bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
470 {
471 	struct hwrm_func_cfg_input *req;
472 	int rc;
473 
474 	req = __bnge_hwrm_reserve_pf_rings(bd, hwr);
475 	if (!req)
476 		return -ENOMEM;
477 
478 	if (!req->enables) {
479 		bnge_hwrm_req_drop(bd, req);
480 		return 0;
481 	}
482 
483 	rc = bnge_hwrm_req_send(bd, req);
484 	if (rc)
485 		return rc;
486 
487 	return bnge_hwrm_get_rings(bd);
488 }
489 
bnge_hwrm_reserve_rings(struct bnge_dev * bd,struct bnge_hw_rings * hwr)490 int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
491 {
492 	return bnge_hwrm_reserve_pf_rings(bd, hwr);
493 }
494 
bnge_hwrm_func_qcfg(struct bnge_dev * bd)495 int bnge_hwrm_func_qcfg(struct bnge_dev *bd)
496 {
497 	struct hwrm_func_qcfg_output *resp;
498 	struct hwrm_func_qcfg_input *req;
499 	int rc;
500 
501 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
502 	if (rc)
503 		return rc;
504 
505 	req->fid = cpu_to_le16(0xffff);
506 	resp = bnge_hwrm_req_hold(bd, req);
507 	rc = bnge_hwrm_req_send(bd, req);
508 	if (rc)
509 		goto func_qcfg_exit;
510 
511 	bd->max_mtu = le16_to_cpu(resp->max_mtu_configured);
512 	if (!bd->max_mtu)
513 		bd->max_mtu = BNGE_MAX_MTU;
514 
515 	if (bd->db_size)
516 		goto func_qcfg_exit;
517 
518 	bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
519 	bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
520 			1024);
521 	if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) ||
522 	    bd->db_size <= bd->db_offset)
523 		bd->db_size = pci_resource_len(bd->pdev, 2);
524 
525 func_qcfg_exit:
526 	bnge_hwrm_req_drop(bd, req);
527 	return rc;
528 }
529 
bnge_hwrm_func_resc_qcaps(struct bnge_dev * bd)530 int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd)
531 {
532 	struct hwrm_func_resource_qcaps_output *resp;
533 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
534 	struct hwrm_func_resource_qcaps_input *req;
535 	int rc;
536 
537 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS);
538 	if (rc)
539 		return rc;
540 
541 	req->fid = cpu_to_le16(0xffff);
542 	resp = bnge_hwrm_req_hold(bd, req);
543 	rc = bnge_hwrm_req_send_silent(bd, req);
544 	if (rc)
545 		goto hwrm_func_resc_qcaps_exit;
546 
547 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
548 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
549 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
550 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
551 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
552 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
553 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
554 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
555 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
556 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
557 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
558 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
559 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
560 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
561 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
562 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
563 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
564 
565 	hw_resc->max_nqs = le16_to_cpu(resp->max_msix);
566 	hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
567 
568 hwrm_func_resc_qcaps_exit:
569 	bnge_hwrm_req_drop(bd, req);
570 	return rc;
571 }
572 
bnge_hwrm_func_qcaps(struct bnge_dev * bd)573 int bnge_hwrm_func_qcaps(struct bnge_dev *bd)
574 {
575 	struct hwrm_func_qcaps_output *resp;
576 	struct hwrm_func_qcaps_input *req;
577 	struct bnge_pf_info *pf = &bd->pf;
578 	u32 flags;
579 	int rc;
580 
581 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS);
582 	if (rc)
583 		return rc;
584 
585 	req->fid = cpu_to_le16(0xffff);
586 	resp = bnge_hwrm_req_hold(bd, req);
587 	rc = bnge_hwrm_req_send(bd, req);
588 	if (rc)
589 		goto hwrm_func_qcaps_exit;
590 
591 	flags = le32_to_cpu(resp->flags);
592 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
593 		bd->flags |= BNGE_EN_ROCE_V1;
594 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
595 		bd->flags |= BNGE_EN_ROCE_V2;
596 
597 	pf->fw_fid = le16_to_cpu(resp->fid);
598 	pf->port_id = le16_to_cpu(resp->port_id);
599 	memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
600 
601 	bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
602 
603 hwrm_func_qcaps_exit:
604 	bnge_hwrm_req_drop(bd, req);
605 	return rc;
606 }
607 
bnge_hwrm_vnic_qcaps(struct bnge_dev * bd)608 int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd)
609 {
610 	struct hwrm_vnic_qcaps_output *resp;
611 	struct hwrm_vnic_qcaps_input *req;
612 	int rc;
613 
614 	bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
615 	bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP;
616 
617 	rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS);
618 	if (rc)
619 		return rc;
620 
621 	resp = bnge_hwrm_req_hold(bd, req);
622 	rc = bnge_hwrm_req_send(bd, req);
623 	if (!rc) {
624 		u32 flags = le32_to_cpu(resp->flags);
625 
626 		if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP)
627 			bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP;
628 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
629 			bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA;
630 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
631 			bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM;
632 		bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
633 		if (bd->max_tpa_v2)
634 			bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE;
635 		if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
636 			bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA;
637 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
638 			bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP;
639 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
640 			bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP;
641 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
642 			bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP;
643 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
644 			bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP;
645 	}
646 	bnge_hwrm_req_drop(bd, req);
647 
648 	return rc;
649 }
650 
651 #define BNGE_CNPQ(q_profile)	\
652 		((q_profile) ==	\
653 		 QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
654 
bnge_hwrm_queue_qportcfg(struct bnge_dev * bd)655 int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
656 {
657 	struct hwrm_queue_qportcfg_output *resp;
658 	struct hwrm_queue_qportcfg_input *req;
659 	u8 i, j, *qptr;
660 	bool no_rdma;
661 	int rc;
662 
663 	rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG);
664 	if (rc)
665 		return rc;
666 
667 	resp = bnge_hwrm_req_hold(bd, req);
668 	rc = bnge_hwrm_req_send(bd, req);
669 	if (rc)
670 		goto qportcfg_exit;
671 
672 	if (!resp->max_configurable_queues) {
673 		rc = -EINVAL;
674 		goto qportcfg_exit;
675 	}
676 	bd->max_tc = resp->max_configurable_queues;
677 	bd->max_lltc = resp->max_configurable_lossless_queues;
678 	if (bd->max_tc > BNGE_MAX_QUEUE)
679 		bd->max_tc = BNGE_MAX_QUEUE;
680 
681 	no_rdma = !bnge_is_roce_en(bd);
682 	qptr = &resp->queue_id0;
683 	for (i = 0, j = 0; i < bd->max_tc; i++) {
684 		bd->q_info[j].queue_id = *qptr;
685 		bd->q_ids[i] = *qptr++;
686 		bd->q_info[j].queue_profile = *qptr++;
687 		bd->tc_to_qidx[j] = j;
688 		if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma)
689 			j++;
690 	}
691 	bd->max_q = bd->max_tc;
692 	bd->max_tc = max_t(u8, j, 1);
693 
694 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
695 		bd->max_tc = 1;
696 
697 	if (bd->max_lltc > bd->max_tc)
698 		bd->max_lltc = bd->max_tc;
699 
700 qportcfg_exit:
701 	bnge_hwrm_req_drop(bd, req);
702 	return rc;
703 }
704