1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2017 - 2026 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include "osdep.h"
36 #include "irdma_type.h"
37 #include "icrdma_hw.h"
38 #include "irdma_main.h"
39
40 void disable_prefetch(struct irdma_hw *hw);
41
42 void disable_tx_spad(struct irdma_hw *hw);
43
44 void rdpu_ackreqpmthresh(struct irdma_hw *hw);
45
46 static u32 icrdma_regs[IRDMA_MAX_REGS] = {
47 PFPE_CQPTAIL,
48 PFPE_CQPDB,
49 PFPE_CCQPSTATUS,
50 PFPE_CCQPHIGH,
51 PFPE_CCQPLOW,
52 PFPE_CQARM,
53 PFPE_CQACK,
54 PFPE_AEQALLOC,
55 PFPE_CQPERRCODES,
56 PFPE_WQEALLOC,
57 GLINT_DYN_CTL(0),
58 ICRDMA_DB_ADDR_OFFSET,
59
60 GLPCI_LBARCTRL,
61 GLPE_CPUSTATUS0,
62 GLPE_CPUSTATUS1,
63 GLPE_CPUSTATUS2,
64 PFINT_AEQCTL,
65 GLINT_CEQCTL(0),
66 VSIQF_PE_CTL1(0),
67 PFHMC_PDINV,
68 GLHMC_VFPDINV(0),
69 GLPE_CRITERR,
70 GLINT_RATE(0),
71 };
72
73 static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
74 ICRDMA_CCQPSTATUS_CCQP_DONE,
75 ICRDMA_CCQPSTATUS_CCQP_ERR,
76 ICRDMA_CQPSQ_STAG_PDID,
77 ICRDMA_CQPSQ_CQ_CEQID,
78 ICRDMA_CQPSQ_CQ_CQID,
79 ICRDMA_COMMIT_FPM_CQCNT,
80 ICRDMA_CQPSQ_UPESD_HMCFNID,
81 };
82
83 static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
84 ICRDMA_CCQPSTATUS_CCQP_DONE_S,
85 ICRDMA_CCQPSTATUS_CCQP_ERR_S,
86 ICRDMA_CQPSQ_STAG_PDID_S,
87 ICRDMA_CQPSQ_CQ_CEQID_S,
88 ICRDMA_CQPSQ_CQ_CQID_S,
89 ICRDMA_COMMIT_FPM_CQCNT_S,
90 ICRDMA_CQPSQ_UPESD_HMCFNID_S,
91 };
92
93 /**
94 * icrdma_ena_irq - Enable interrupt
95 * @dev: pointer to the device structure
96 * @idx: vector index
97 */
98 static void
icrdma_ena_irq(struct irdma_sc_dev * dev,u32 idx)99 icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
100 {
101 u32 val;
102 u32 interval = 0;
103
104 if (dev->ceq_itr && dev->aeq->msix_idx != idx)
105 interval = dev->ceq_itr >> 1; /* 2 usec units */
106 val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, IRDMA_IDX_ITR0) |
107 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
108 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, true) |
109 FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, true);
110 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
111 }
112
113 /**
114 * icrdma_disable_irq - Disable interrupt
115 * @dev: pointer to the device structure
116 * @idx: vector index
117 */
118 static void
icrdma_disable_irq(struct irdma_sc_dev * dev,u32 idx)119 icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
120 {
121 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
122 }
123
124 /**
125 * icrdma_cfg_ceq- Configure CEQ interrupt
126 * @dev: pointer to the device structure
127 * @ceq_id: Completion Event Queue ID
128 * @idx: vector index
129 * @enable: True to enable, False disables
130 */
131 static void
icrdma_cfg_ceq(struct irdma_sc_dev * dev,u32 ceq_id,u32 idx,bool enable)132 icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
133 bool enable)
134 {
135 u32 reg_val;
136
137 reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA : 0;
138 reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |
139 IRDMA_GLINT_CEQCTL_ITR_INDX;
140
141 writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
142 }
143
144 static const struct irdma_irq_ops icrdma_irq_ops = {
145 .irdma_cfg_aeq = irdma_cfg_aeq,
146 .irdma_cfg_ceq = icrdma_cfg_ceq,
147 .irdma_dis_irq = icrdma_disable_irq,
148 .irdma_en_irq = icrdma_ena_irq,
149 };
150
151 static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
152 [IRDMA_HW_STAT_INDEX_RXVLANERR] = {0, 32, IRDMA_MAX_STATS_24},
153 [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = {8, 0, IRDMA_MAX_STATS_48},
154 [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = {16, 0, IRDMA_MAX_STATS_48},
155 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = {24, 32, IRDMA_MAX_STATS_32},
156 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = {24, 0, IRDMA_MAX_STATS_32},
157 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = {32, 0, IRDMA_MAX_STATS_48},
158 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = {40, 0, IRDMA_MAX_STATS_48},
159 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = {48, 0, IRDMA_MAX_STATS_48},
160 [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = {56, 0, IRDMA_MAX_STATS_48},
161 [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = {64, 0, IRDMA_MAX_STATS_48},
162 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = {72, 32, IRDMA_MAX_STATS_32},
163 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = {72, 0, IRDMA_MAX_STATS_32},
164 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = {80, 0, IRDMA_MAX_STATS_48},
165 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = {88, 0, IRDMA_MAX_STATS_48},
166 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = {96, 0, IRDMA_MAX_STATS_48},
167 [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = {104, 0, IRDMA_MAX_STATS_48},
168 [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = {112, 0, IRDMA_MAX_STATS_48},
169 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = {120, 0, IRDMA_MAX_STATS_48},
170 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = {128, 0, IRDMA_MAX_STATS_48},
171 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = {136, 0, IRDMA_MAX_STATS_48},
172 [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = {144, 0, IRDMA_MAX_STATS_48},
173 [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = {152, 0, IRDMA_MAX_STATS_48},
174 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = {160, 0, IRDMA_MAX_STATS_48},
175 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = {168, 0, IRDMA_MAX_STATS_48},
176 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = {176, 0, IRDMA_MAX_STATS_48},
177 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = {184, 32, IRDMA_MAX_STATS_24},
178 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = {184, 0, IRDMA_MAX_STATS_24},
179 [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = {192, 32, IRDMA_MAX_STATS_48},
180 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = {200, 32, IRDMA_MAX_STATS_24},
181 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = {200, 0, IRDMA_MAX_STATS_24},
182 [IRDMA_HW_STAT_INDEX_TCPTXSEG] = {208, 0, IRDMA_MAX_STATS_48},
183 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = {216, 32, IRDMA_MAX_STATS_32},
184 [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = {224, 0, IRDMA_MAX_STATS_48},
185 [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = {232, 0, IRDMA_MAX_STATS_48},
186 [IRDMA_HW_STAT_INDEX_RDMARXWRS] = {240, 0, IRDMA_MAX_STATS_48},
187 [IRDMA_HW_STAT_INDEX_RDMARXRDS] = {248, 0, IRDMA_MAX_STATS_48},
188 [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = {256, 0, IRDMA_MAX_STATS_48},
189 [IRDMA_HW_STAT_INDEX_RDMATXWRS] = {264, 0, IRDMA_MAX_STATS_48},
190 [IRDMA_HW_STAT_INDEX_RDMATXRDS] = {272, 0, IRDMA_MAX_STATS_48},
191 [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = {280, 0, IRDMA_MAX_STATS_48},
192 [IRDMA_HW_STAT_INDEX_RDMAVBND] = {288, 0, IRDMA_MAX_STATS_48},
193 [IRDMA_HW_STAT_INDEX_RDMAVINV] = {296, 0, IRDMA_MAX_STATS_48},
194 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = {304, 0, IRDMA_MAX_STATS_48},
195 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = {312, 32, IRDMA_MAX_STATS_16},
196 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = {312, 0, IRDMA_MAX_STATS_32},
197 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = {320, 0, IRDMA_MAX_STATS_32},
198 };
199
200 void
icrdma_init_hw(struct irdma_sc_dev * dev)201 icrdma_init_hw(struct irdma_sc_dev *dev)
202 {
203 int i;
204 u8 IOMEM *hw_addr;
205
206 for (i = 0; i < IRDMA_MAX_REGS; ++i) {
207 hw_addr = dev->hw->hw_addr;
208
209 if (i == IRDMA_DB_ADDR_OFFSET)
210 hw_addr = NULL;
211
212 dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]);
213 }
214
215 for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
216 dev->hw_shifts[i] = icrdma_shifts[i];
217
218 for (i = 0; i < IRDMA_MAX_MASKS; ++i)
219 dev->hw_masks[i] = icrdma_masks[i];
220
221 dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
222 dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
223 dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
224 dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
225 dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
226 dev->irq_ops = &icrdma_irq_ops;
227 dev->hw_stats_map = icrdma_hw_stat_map;
228 dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
229 dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
230 dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
231 dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
232 dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
233 dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
234
235 dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
236 dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
237 dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
238 dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
239 disable_tx_spad(dev->hw);
240 disable_prefetch(dev->hw);
241 rdpu_ackreqpmthresh(dev->hw);
242 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RELAX_RQ_ORDER;
243 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
244 IRDMA_FEATURE_CQ_RESIZE;
245 }
246
247 void
irdma_init_config_check(struct irdma_config_check * cc,u8 traffic_class,u8 prio,u16 qs_handle)248 irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u8 prio, u16 qs_handle)
249 {
250 cc->config_ok = false;
251 cc->traffic_class = traffic_class;
252 cc->qs_handle = qs_handle;
253 cc->prio = prio;
254 cc->lfc_set = 0;
255 cc->pfc_set = 0;
256 }
257
258 static bool
irdma_is_lfc_set(struct irdma_config_check * cc,struct irdma_sc_vsi * vsi)259 irdma_is_lfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
260 {
261 u32 temp;
262 u32 lfc = 1;
263 u32 rx_pause_enable, tx_pause_enable;
264 u8 fn_id = vsi->dev->hmc_fn_id;
265
266 if (irdma_fw_major_ver(vsi->dev) == 1) {
267 rx_pause_enable = PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
268 tx_pause_enable = PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
269 } else {
270 rx_pause_enable = CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
271 tx_pause_enable = CNV_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
272 }
273
274 #define LFC_ENABLE BIT_ULL(8)
275 #define LFC_ENABLE_S 8
276 temp = rd32(vsi->dev->hw, rx_pause_enable + 4 * fn_id);
277 lfc &= FIELD_GET(LFC_ENABLE, temp);
278 temp = rd32(vsi->dev->hw, tx_pause_enable + 4 * fn_id);
279 lfc &= FIELD_GET(LFC_ENABLE, temp);
280 lfc &= rd32(vsi->dev->hw,
281 PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id);
282 if (lfc)
283 return true;
284 return false;
285 }
286
287 static bool
irdma_check_tc_has_pfc(struct irdma_sc_vsi * vsi,u64 reg_offset,u16 traffic_class)288 irdma_check_tc_has_pfc(struct irdma_sc_vsi *vsi, u64 reg_offset, u16 traffic_class)
289 {
290 u32 value, pfc = 0;
291 u32 i;
292
293 value = rd32(vsi->dev->hw, reg_offset);
294 for (i = 0; i < 4; i++)
295 pfc |= (value >> (8 * i + traffic_class)) & 0x1;
296
297 if (pfc)
298 return true;
299 return false;
300 }
301
302 static bool
irdma_is_pfc_set(struct irdma_config_check * cc,struct irdma_sc_vsi * vsi)303 irdma_is_pfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
304 {
305 u32 pause;
306 u32 rx_pause_enable, tx_pause_enable;
307 u8 fn_id = vsi->dev->hmc_fn_id;
308
309 if (irdma_fw_major_ver(vsi->dev) == 1) {
310 rx_pause_enable = PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
311 tx_pause_enable = PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
312 } else {
313 rx_pause_enable = CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
314 tx_pause_enable = CNV_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
315 }
316
317 pause = (rd32(vsi->dev->hw, rx_pause_enable + 4 * fn_id) >>
318 cc->prio) & BIT(0);
319 pause &= (rd32(vsi->dev->hw, tx_pause_enable + 4 * fn_id) >>
320 cc->prio) & BIT(0);
321
322 return irdma_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) &&
323 pause;
324 }
325
326 bool
irdma_is_config_ok(struct irdma_config_check * cc,struct irdma_sc_vsi * vsi)327 irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
328 {
329 cc->lfc_set = irdma_is_lfc_set(cc, vsi);
330 cc->pfc_set = irdma_is_pfc_set(cc, vsi);
331
332 cc->config_ok = cc->lfc_set || cc->pfc_set;
333
334 return cc->config_ok;
335 }
336
337 #define IRDMA_RCV_WND_NO_FC 0x1FFFC
338 #define IRDMA_RCV_WND_FC 0x3FFFC
339
340 #define IRDMA_CWND_NO_FC 0x20
341 #define IRDMA_CWND_FC 0x400
342 #define IRDMA_CWND_DCQCN_FC 0x80000
343
344 #define IRDMA_RTOMIN_NO_FC 0x5
345 #define IRDMA_RTOMIN_FC 0x32
346
347 #define IRDMA_ACKCREDS_NO_FC 0x02
348 #define IRDMA_ACKCREDS_FC 0x1E
349
350 static void
irdma_check_flow_ctrl(struct irdma_sc_vsi * vsi,u8 user_prio,u8 traffic_class)351 irdma_check_flow_ctrl(struct irdma_sc_vsi *vsi, u8 user_prio, u8 traffic_class)
352 {
353 struct irdma_config_check *cfg_chk = &vsi->cfg_check[user_prio];
354
355 if (!irdma_is_config_ok(cfg_chk, vsi)) {
356 if (vsi->tc_print_warning[traffic_class]) {
357 irdma_pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", traffic_class);
358 vsi->tc_print_warning[traffic_class] = false;
359 }
360 } else {
361 if (vsi->tc_print_warning[traffic_class]) {
362 irdma_pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", traffic_class);
363 vsi->tc_print_warning[traffic_class] = false;
364 }
365 }
366 }
367
368 void
irdma_check_fc_for_tc_update(struct irdma_sc_vsi * vsi,struct irdma_l2params * l2params)369 irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
370 struct irdma_l2params *l2params)
371 {
372 u8 i;
373
374 for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
375 vsi->tc_print_warning[i] = true;
376
377 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
378 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
379 u8 tc = l2params->up2tc[i];
380
381 cfg_chk->traffic_class = tc;
382 cfg_chk->qs_handle = vsi->qos[i].qs_handle;
383 irdma_check_flow_ctrl(vsi, i, tc);
384 }
385 }
386
387 void
irdma_check_fc_for_qp(struct irdma_sc_vsi * vsi,struct irdma_sc_qp * sc_qp)388 irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp)
389 {
390 u8 i;
391
392 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
393 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
394
395 irdma_init_config_check(cfg_chk,
396 vsi->qos[i].traffic_class, i,
397 vsi->qos[i].qs_handle);
398 if (sc_qp->qs_handle == cfg_chk->qs_handle)
399 irdma_check_flow_ctrl(vsi, i, cfg_chk->traffic_class);
400 }
401 }
402
403 #define GLPE_WQMTXIDXADDR 0x50E000
404 #define GLPE_WQMTXIDXDATA 0x50E004
405
406 void
disable_prefetch(struct irdma_hw * hw)407 disable_prefetch(struct irdma_hw *hw)
408 {
409 u32 wqm_data;
410
411 wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
412 irdma_mb();
413
414 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
415 wqm_data &= ~(1);
416 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
417 }
418
419 void
disable_tx_spad(struct irdma_hw * hw)420 disable_tx_spad(struct irdma_hw *hw)
421 {
422 u32 wqm_data;
423
424 wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
425 irdma_mb();
426
427 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
428 wqm_data &= ~(1 << 3);
429 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
430 }
431
432 #define GL_RDPU_CNTRL 0x52054
433 void
rdpu_ackreqpmthresh(struct irdma_hw * hw)434 rdpu_ackreqpmthresh(struct irdma_hw *hw)
435 {
436 u32 val;
437
438 val = rd32(hw, GL_RDPU_CNTRL);
439 val &= ~(0x3f << 10);
440 val |= (3 << 10);
441 wr32(hw, GL_RDPU_CNTRL, val);
442 }
443