xref: /src/sys/contrib/dev/rtw89/pci.c (revision 7fc5c8df4c90a2067c936e3026be6bd6840cd5ec)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020  Realtek Corporation
3  */
4 
5 #if defined(__FreeBSD__)
6 #define	LINUXKPI_PARAM_PREFIX	rtw89_pci_
7 #endif
8 
9 #include <linux/pci.h>
10 #if defined(__FreeBSD__)
11 #include <sys/rman.h>
12 #endif
13 
14 #include "mac.h"
15 #include "pci.h"
16 #include "reg.h"
17 #include "ser.h"
18 
19 static bool rtw89_pci_disable_clkreq;
20 static bool rtw89_pci_disable_aspm_l1;
21 static bool rtw89_pci_disable_l1ss;
22 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
23 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
24 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
25 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
26 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
27 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
28 
rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev * rtwdev,u32 * phy_offset)29 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
30 						  u32 *phy_offset)
31 {
32 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
33 	struct pci_dev *pdev = rtwpci->pdev;
34 	u32 val;
35 	int ret;
36 
37 	ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
38 	if (ret)
39 		return ret;
40 
41 	val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
42 	if (val == RTW89_PCIE_GEN1_SPEED) {
43 		*phy_offset = R_RAC_DIRECT_OFFSET_G1;
44 	} else if (val == RTW89_PCIE_GEN2_SPEED) {
45 		*phy_offset = R_RAC_DIRECT_OFFSET_G2;
46 	} else {
47 		rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
48 		return -EFAULT;
49 	}
50 
51 	return 0;
52 }
53 
rtw89_pci_rst_bdram_ax(struct rtw89_dev * rtwdev)54 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
55 {
56 	u32 val;
57 	int ret;
58 
59 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
60 
61 	ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
62 				       1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
63 				       rtwdev, R_AX_PCIE_INIT_CFG1);
64 
65 	return ret;
66 }
67 
rtw89_pci_dma_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring,u32 cur_idx,bool tx)68 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
69 				struct rtw89_pci_dma_ring *bd_ring,
70 				u32 cur_idx, bool tx)
71 {
72 	const struct rtw89_pci_info *info = rtwdev->pci_info;
73 	u32 cnt, cur_rp, wp, rp, len;
74 
75 	rp = bd_ring->rp;
76 	wp = bd_ring->wp;
77 	len = bd_ring->len;
78 
79 	cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
80 	if (tx) {
81 		cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
82 	} else {
83 		if (info->rx_ring_eq_is_full)
84 			wp += 1;
85 
86 		cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
87 	}
88 
89 	bd_ring->rp = cur_rp;
90 
91 	return cnt;
92 }
93 
rtw89_pci_txbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)94 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
95 				 struct rtw89_pci_tx_ring *tx_ring)
96 {
97 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
98 	u32 addr_idx = bd_ring->addr.idx;
99 	u32 cnt, idx;
100 
101 	idx = rtw89_read32(rtwdev, addr_idx);
102 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
103 
104 	return cnt;
105 }
106 
rtw89_pci_release_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,u32 cnt,bool release_all)107 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
108 				    struct rtw89_pci *rtwpci,
109 				    u32 cnt, bool release_all)
110 {
111 	struct rtw89_pci_tx_data *tx_data;
112 	struct sk_buff *skb;
113 	u32 qlen;
114 
115 	while (cnt--) {
116 		skb = skb_dequeue(&rtwpci->h2c_queue);
117 		if (!skb) {
118 			rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
119 			return;
120 		}
121 		skb_queue_tail(&rtwpci->h2c_release_queue, skb);
122 	}
123 
124 	qlen = skb_queue_len(&rtwpci->h2c_release_queue);
125 	if (!release_all)
126 	       qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
127 
128 	while (qlen--) {
129 		skb = skb_dequeue(&rtwpci->h2c_release_queue);
130 		if (!skb) {
131 			rtw89_err(rtwdev, "failed to release fwcmd\n");
132 			return;
133 		}
134 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
135 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
136 				 DMA_TO_DEVICE);
137 		dev_kfree_skb_any(skb);
138 	}
139 }
140 
rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)141 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
142 				       struct rtw89_pci *rtwpci)
143 {
144 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
145 	u32 cnt;
146 
147 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
148 	if (!cnt)
149 		return;
150 	rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
151 }
152 
rtw89_pci_rxbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)153 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
154 				 struct rtw89_pci_rx_ring *rx_ring)
155 {
156 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
157 	u32 addr_idx = bd_ring->addr.idx;
158 	u32 cnt, idx;
159 
160 	idx = rtw89_read32(rtwdev, addr_idx);
161 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
162 
163 	return cnt;
164 }
165 
rtw89_pci_sync_skb_for_cpu(struct rtw89_dev * rtwdev,struct sk_buff * skb)166 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
167 				       struct sk_buff *skb)
168 {
169 	struct rtw89_pci_rx_info *rx_info;
170 	dma_addr_t dma;
171 
172 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
173 	dma = rx_info->dma;
174 	dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
175 				DMA_FROM_DEVICE);
176 }
177 
rtw89_pci_sync_skb_for_device(struct rtw89_dev * rtwdev,struct sk_buff * skb)178 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
179 					  struct sk_buff *skb)
180 {
181 	struct rtw89_pci_rx_info *rx_info;
182 	dma_addr_t dma;
183 
184 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
185 	dma = rx_info->dma;
186 	dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
187 				   DMA_FROM_DEVICE);
188 }
189 
rtw89_pci_rxbd_info_update(struct rtw89_dev * rtwdev,struct sk_buff * skb)190 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
191 				       struct sk_buff *skb)
192 {
193 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
194 	struct rtw89_pci_rxbd_info *rxbd_info;
195 	__le32 info;
196 
197 	rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
198 	info = rxbd_info->dword;
199 
200 	rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS);
201 	rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS);
202 	rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE);
203 	rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG);
204 }
205 
rtw89_pci_validate_rx_tag(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)206 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
207 				     struct rtw89_pci_rx_ring *rx_ring,
208 				     struct sk_buff *skb)
209 {
210 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
211 	const struct rtw89_pci_info *info = rtwdev->pci_info;
212 	u32 target_rx_tag;
213 
214 	if (!info->check_rx_tag)
215 		return 0;
216 
217 	/* valid range is 1 ~ 0x1FFF */
218 	if (rx_ring->target_rx_tag == 0)
219 		target_rx_tag = 1;
220 	else
221 		target_rx_tag = rx_ring->target_rx_tag;
222 
223 	if (rx_info->tag != target_rx_tag) {
224 		rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
225 			    rx_info->tag, target_rx_tag);
226 		return -EAGAIN;
227 	}
228 
229 	return 0;
230 }
231 
232 static
rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)233 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
234 						       struct rtw89_pci_rx_ring *rx_ring,
235 						       struct sk_buff *skb)
236 {
237 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
238 	int rx_tag_retry = 1000;
239 	int ret;
240 
241 	do {
242 		rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
243 		rtw89_pci_rxbd_info_update(rtwdev, skb);
244 
245 		ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
246 		if (ret != -EAGAIN)
247 			break;
248 	} while (rx_tag_retry--);
249 
250 	/* update target rx_tag for next RX */
251 	rx_ring->target_rx_tag = rx_info->tag + 1;
252 
253 	return ret;
254 }
255 
rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev * rtwdev,bool enable)256 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
257 {
258 	const struct rtw89_pci_info *info = rtwdev->pci_info;
259 	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
260 	const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
261 
262 	if (enable) {
263 		rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
264 		if (dma_stop2->addr)
265 			rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
266 	} else {
267 		rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
268 		if (dma_stop2->addr)
269 			rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
270 	}
271 }
272 
rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev * rtwdev,bool enable)273 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
274 {
275 	const struct rtw89_pci_info *info = rtwdev->pci_info;
276 	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
277 
278 	if (enable)
279 		rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
280 	else
281 		rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
282 }
283 
284 static bool
rtw89_skb_put_rx_data(struct rtw89_dev * rtwdev,bool fs,bool ls,struct sk_buff * new,const struct sk_buff * skb,u32 offset,const struct rtw89_pci_rx_info * rx_info,const struct rtw89_rx_desc_info * desc_info)285 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
286 		      struct sk_buff *new,
287 		      const struct sk_buff *skb, u32 offset,
288 		      const struct rtw89_pci_rx_info *rx_info,
289 		      const struct rtw89_rx_desc_info *desc_info)
290 {
291 	u32 copy_len = rx_info->len - offset;
292 
293 	if (unlikely(skb_tailroom(new) < copy_len)) {
294 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
295 			    "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
296 			    rx_info->len, desc_info->pkt_size, offset, fs, ls);
297 		rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
298 			       skb->data, rx_info->len);
299 		/* length of a single segment skb is desc_info->pkt_size */
300 		if (fs && ls) {
301 			copy_len = desc_info->pkt_size;
302 		} else {
303 			rtw89_info(rtwdev, "drop rx data due to invalid length\n");
304 			return false;
305 		}
306 	}
307 
308 	skb_put_data(new, skb->data + offset, copy_len);
309 
310 	return true;
311 }
312 
rtw89_pci_get_rx_skb_idx(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring)313 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
314 				    struct rtw89_pci_dma_ring *bd_ring)
315 {
316 	const struct rtw89_pci_info *info = rtwdev->pci_info;
317 	u32 wp = bd_ring->wp;
318 
319 	if (!info->rx_ring_eq_is_full)
320 		return wp;
321 
322 	if (++wp >= bd_ring->len)
323 		wp = 0;
324 
325 	return wp;
326 }
327 
rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)328 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
329 				       struct rtw89_pci_rx_ring *rx_ring)
330 {
331 	struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
332 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
333 	const struct rtw89_pci_info *info = rtwdev->pci_info;
334 	struct sk_buff *new = rx_ring->diliver_skb;
335 	struct rtw89_pci_rx_info *rx_info;
336 	struct sk_buff *skb;
337 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
338 	u32 skb_idx;
339 	u32 offset;
340 	u32 cnt = 1;
341 	bool fs, ls;
342 	int ret;
343 
344 	skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
345 	skb = rx_ring->buf[skb_idx];
346 
347 	ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
348 	if (ret) {
349 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
350 			  bd_ring->wp, ret);
351 		goto err_sync_device;
352 	}
353 
354 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
355 	fs = info->no_rxbd_fs ? !new : rx_info->fs;
356 	ls = rx_info->ls;
357 
358 	if (unlikely(!fs || !ls))
359 		rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
360 			    "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n",
361 			    fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0);
362 
363 	if (fs) {
364 		if (new) {
365 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
366 				    "skb should not be ready before first segment start\n");
367 			goto err_sync_device;
368 		}
369 		if (desc_info->ready) {
370 			rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
371 			goto err_sync_device;
372 		}
373 
374 		rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
375 
376 		new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
377 		if (!new)
378 			goto err_sync_device;
379 
380 		rx_ring->diliver_skb = new;
381 
382 		/* first segment has RX desc */
383 		offset = desc_info->offset + desc_info->rxd_len;
384 	} else {
385 		offset = sizeof(struct rtw89_pci_rxbd_info);
386 		if (!new) {
387 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
388 			goto err_sync_device;
389 		}
390 	}
391 	if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
392 		goto err_sync_device;
393 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
394 	rtw89_pci_rxbd_increase(rx_ring, 1);
395 
396 	if (!desc_info->ready) {
397 		rtw89_warn(rtwdev, "no rx desc information\n");
398 		goto err_free_resource;
399 	}
400 	if (ls) {
401 		rtw89_core_rx(rtwdev, desc_info, new);
402 		rx_ring->diliver_skb = NULL;
403 		desc_info->ready = false;
404 	}
405 
406 	return cnt;
407 
408 err_sync_device:
409 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
410 	rtw89_pci_rxbd_increase(rx_ring, 1);
411 err_free_resource:
412 	if (new)
413 		dev_kfree_skb_any(new);
414 	rx_ring->diliver_skb = NULL;
415 	desc_info->ready = false;
416 
417 	return cnt;
418 }
419 
rtw89_pci_rxbd_deliver(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)420 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
421 				   struct rtw89_pci_rx_ring *rx_ring,
422 				   u32 cnt)
423 {
424 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
425 	u32 rx_cnt;
426 
427 	while (cnt && rtwdev->napi_budget_countdown > 0) {
428 		rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
429 		if (!rx_cnt) {
430 			rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
431 
432 			/* skip the rest RXBD bufs */
433 			rtw89_pci_rxbd_increase(rx_ring, cnt);
434 			break;
435 		}
436 
437 		cnt -= rx_cnt;
438 	}
439 
440 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
441 }
442 
rtw89_pci_poll_rxq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)443 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
444 				  struct rtw89_pci *rtwpci, int budget)
445 {
446 	struct rtw89_pci_rx_ring *rx_ring;
447 	int countdown = rtwdev->napi_budget_countdown;
448 	u32 cnt;
449 
450 	rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
451 
452 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
453 	if (!cnt)
454 		return 0;
455 
456 	cnt = min_t(u32, budget, cnt);
457 
458 	rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
459 
460 	/* In case of flushing pending SKBs, the countdown may exceed. */
461 	if (rtwdev->napi_budget_countdown <= 0)
462 		return budget;
463 
464 	return budget - countdown;
465 }
466 
rtw89_pci_tx_status(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct sk_buff * skb,u8 tx_status)467 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
468 				struct rtw89_pci_tx_ring *tx_ring,
469 				struct sk_buff *skb, u8 tx_status)
470 {
471 	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
472 	struct ieee80211_tx_info *info;
473 
474 	if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status))
475 		return;
476 
477 	info = IEEE80211_SKB_CB(skb);
478 	ieee80211_tx_info_clear_status(info);
479 
480 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
481 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
482 	if (tx_status == RTW89_TX_DONE) {
483 		info->flags |= IEEE80211_TX_STAT_ACK;
484 		tx_ring->tx_acked++;
485 	} else {
486 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
487 			rtw89_debug(rtwdev, RTW89_DBG_FW,
488 				    "failed to TX of status %x\n", tx_status);
489 		switch (tx_status) {
490 		case RTW89_TX_RETRY_LIMIT:
491 			tx_ring->tx_retry_lmt++;
492 			break;
493 		case RTW89_TX_LIFE_TIME:
494 			tx_ring->tx_life_time++;
495 			break;
496 		case RTW89_TX_MACID_DROP:
497 			tx_ring->tx_mac_id_drop++;
498 			break;
499 		default:
500 			rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
501 			break;
502 		}
503 	}
504 
505 	ieee80211_tx_status_ni(rtwdev->hw, skb);
506 }
507 
rtw89_pci_reclaim_txbd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)508 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
509 {
510 	struct rtw89_pci_tx_wd *txwd;
511 	u32 cnt;
512 
513 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
514 	while (cnt--) {
515 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
516 		if (!txwd) {
517 			rtw89_warn(rtwdev, "No busy txwd pages available\n");
518 			break;
519 		}
520 
521 		list_del_init(&txwd->list);
522 
523 		/* this skb has been freed by RPP */
524 		if (skb_queue_len(&txwd->queue) == 0)
525 			rtw89_pci_enqueue_txwd(tx_ring, txwd);
526 	}
527 }
528 
rtw89_pci_release_busy_txwd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)529 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
530 					struct rtw89_pci_tx_ring *tx_ring)
531 {
532 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
533 	struct rtw89_pci_tx_wd *txwd;
534 	int i;
535 
536 	for (i = 0; i < wd_ring->page_num; i++) {
537 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
538 		if (!txwd)
539 			break;
540 
541 		list_del_init(&txwd->list);
542 	}
543 }
544 
rtw89_pci_release_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,u16 seq,u8 tx_status)545 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
546 				       struct rtw89_pci_tx_ring *tx_ring,
547 				       struct rtw89_pci_tx_wd *txwd, u16 seq,
548 				       u8 tx_status)
549 {
550 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
551 	struct rtw89_pci_tx_data *tx_data;
552 	struct sk_buff *skb, *tmp;
553 	u8 txch = tx_ring->txch;
554 
555 	if (!list_empty(&txwd->list)) {
556 		rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
557 		/* In low power mode, RPP can receive before updating of TX BD.
558 		 * In normal mode, it should not happen so give it a warning.
559 		 */
560 		if (!rtwpci->low_power && !list_empty(&txwd->list))
561 			rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
562 				   txch, seq);
563 	}
564 
565 	skb_queue_walk_safe(&txwd->queue, skb, tmp) {
566 		skb_unlink(skb, &txwd->queue);
567 
568 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
569 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
570 				 DMA_TO_DEVICE);
571 
572 		rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
573 	}
574 
575 	if (list_empty(&txwd->list))
576 		rtw89_pci_enqueue_txwd(tx_ring, txwd);
577 }
578 
rtw89_pci_parse_rpp(struct rtw89_dev * rtwdev,void * _rpp,struct rtw89_pci_rpp_info * rpp_info)579 void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
580 			 struct rtw89_pci_rpp_info *rpp_info)
581 {
582 	const struct rtw89_pci_rpp_fmt *rpp = _rpp;
583 
584 	rpp_info->seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
585 	rpp_info->qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
586 	rpp_info->tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
587 	rpp_info->txch = rtw89_chip_get_ch_dma(rtwdev, rpp_info->qsel);
588 }
589 EXPORT_SYMBOL(rtw89_pci_parse_rpp);
590 
rtw89_pci_parse_rpp_v1(struct rtw89_dev * rtwdev,void * _rpp,struct rtw89_pci_rpp_info * rpp_info)591 void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
592 			    struct rtw89_pci_rpp_info *rpp_info)
593 {
594 	const struct rtw89_pci_rpp_fmt_v1 *rpp = _rpp;
595 
596 	rpp_info->seq = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK);
597 	rpp_info->qsel = le32_get_bits(rpp->w1, RTW89_PCI_RPP_W1_QSEL_V1_MASK);
598 	rpp_info->tx_status = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK);
599 	rpp_info->txch = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_DMA_CH_MASK);
600 }
601 EXPORT_SYMBOL(rtw89_pci_parse_rpp_v1);
602 
rtw89_pci_release_rpp(struct rtw89_dev * rtwdev,void * rpp)603 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp)
604 {
605 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
606 	const struct rtw89_pci_info *info = rtwdev->pci_info;
607 	struct rtw89_pci_rpp_info rpp_info = {};
608 	struct rtw89_pci_tx_wd_ring *wd_ring;
609 	struct rtw89_pci_tx_ring *tx_ring;
610 	struct rtw89_pci_tx_wd *txwd;
611 
612 	info->parse_rpp(rtwdev, rpp, &rpp_info);
613 
614 	if (rpp_info.txch == RTW89_TXCH_CH12) {
615 		rtw89_warn(rtwdev, "should no fwcmd release report\n");
616 		return;
617 	}
618 
619 	tx_ring = &rtwpci->tx.rings[rpp_info.txch];
620 	wd_ring = &tx_ring->wd_ring;
621 	txwd = &wd_ring->pages[rpp_info.seq];
622 
623 	rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, rpp_info.seq,
624 				   rpp_info.tx_status);
625 }
626 
rtw89_pci_release_pending_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)627 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
628 					       struct rtw89_pci_tx_ring *tx_ring)
629 {
630 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
631 	struct rtw89_pci_tx_wd *txwd;
632 	int i;
633 
634 	for (i = 0; i < wd_ring->page_num; i++) {
635 		txwd = &wd_ring->pages[i];
636 
637 		if (!list_empty(&txwd->list))
638 			continue;
639 
640 		rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
641 	}
642 }
643 
rtw89_pci_release_tx_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 max_cnt)644 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
645 				     struct rtw89_pci_rx_ring *rx_ring,
646 				     u32 max_cnt)
647 {
648 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
649 	const struct rtw89_pci_info *info = rtwdev->pci_info;
650 	struct rtw89_rx_desc_info desc_info = {};
651 	struct rtw89_pci_rx_info *rx_info;
652 	struct sk_buff *skb;
653 	void *rpp;
654 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
655 	u32 rpp_size = info->rpp_fmt_size;
656 	u32 cnt = 0;
657 	u32 skb_idx;
658 	u32 offset;
659 	int ret;
660 
661 	skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
662 	skb = rx_ring->buf[skb_idx];
663 
664 	ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
665 	if (ret) {
666 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
667 			  bd_ring->wp, ret);
668 		goto err_sync_device;
669 	}
670 
671 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
672 	if (!rx_info->fs || !rx_info->ls) {
673 		rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
674 		return cnt;
675 	}
676 
677 	rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
678 
679 	/* first segment has RX desc */
680 	offset = desc_info.offset + desc_info.rxd_len;
681 	for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
682 		rpp = skb->data + offset;
683 		rtw89_pci_release_rpp(rtwdev, rpp);
684 	}
685 
686 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
687 	rtw89_pci_rxbd_increase(rx_ring, 1);
688 	cnt++;
689 
690 	return cnt;
691 
692 err_sync_device:
693 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
694 	return 0;
695 }
696 
rtw89_pci_release_tx(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)697 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
698 				 struct rtw89_pci_rx_ring *rx_ring,
699 				 u32 cnt)
700 {
701 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
702 	u32 release_cnt;
703 
704 	while (cnt) {
705 		release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
706 		if (!release_cnt) {
707 			rtw89_err(rtwdev, "failed to release TX skbs\n");
708 
709 			/* skip the rest RXBD bufs */
710 			rtw89_pci_rxbd_increase(rx_ring, cnt);
711 			break;
712 		}
713 
714 		cnt -= release_cnt;
715 	}
716 
717 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
718 }
719 
rtw89_pci_poll_rpq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)720 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
721 				  struct rtw89_pci *rtwpci, int budget)
722 {
723 	struct rtw89_pci_rx_ring *rx_ring;
724 	u32 cnt;
725 	int work_done;
726 
727 	rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
728 
729 	spin_lock_bh(&rtwpci->trx_lock);
730 
731 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
732 	if (cnt == 0)
733 		goto out_unlock;
734 
735 	rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
736 
737 out_unlock:
738 	spin_unlock_bh(&rtwpci->trx_lock);
739 
740 	/* always release all RPQ */
741 	work_done = min_t(int, cnt, budget);
742 	rtwdev->napi_budget_countdown -= work_done;
743 
744 	return work_done;
745 }
746 
rtw89_pci_isr_rxd_unavail(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)747 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
748 				      struct rtw89_pci *rtwpci)
749 {
750 	struct rtw89_pci_rx_ring *rx_ring;
751 	struct rtw89_pci_dma_ring *bd_ring;
752 	u32 reg_idx;
753 	u16 hw_idx, hw_idx_next, host_idx;
754 	int i;
755 
756 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
757 		rx_ring = &rtwpci->rx.rings[i];
758 		bd_ring = &rx_ring->bd_ring;
759 
760 		reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
761 		hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
762 		host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
763 		hw_idx_next = (hw_idx + 1) % bd_ring->len;
764 
765 		if (hw_idx_next == host_idx)
766 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
767 
768 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
769 			    "%d RXD unavailable, idx=0x%08x, len=%d\n",
770 			    i, reg_idx, bd_ring->len);
771 	}
772 }
773 
rtw89_pci_recognize_intrs(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)774 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
775 			       struct rtw89_pci *rtwpci,
776 			       struct rtw89_pci_isrs *isrs)
777 {
778 	isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
779 	isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
780 	isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
781 
782 	rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
783 	rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
784 	rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
785 }
786 EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
787 
rtw89_pci_recognize_intrs_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)788 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
789 				  struct rtw89_pci *rtwpci,
790 				  struct rtw89_pci_isrs *isrs)
791 {
792 	isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
793 	isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
794 			      rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
795 	isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
796 			rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
797 	isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
798 			rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
799 
800 	if (isrs->halt_c2h_isrs)
801 		rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
802 	if (isrs->isrs[0])
803 		rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
804 	if (isrs->isrs[1])
805 		rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
806 }
807 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
808 
rtw89_pci_recognize_intrs_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)809 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
810 				  struct rtw89_pci *rtwpci,
811 				  struct rtw89_pci_isrs *isrs)
812 {
813 	isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
814 	isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
815 			      rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
816 	isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
817 			rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
818 	isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
819 
820 	if (isrs->halt_c2h_isrs)
821 		rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
822 	if (isrs->isrs[0])
823 		rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]);
824 	if (isrs->isrs[1])
825 		rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
826 	rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
827 }
828 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
829 
rtw89_pci_recognize_intrs_v3(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)830 void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
831 				  struct rtw89_pci *rtwpci,
832 				  struct rtw89_pci_isrs *isrs)
833 {
834 	isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
835 	isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
836 			      rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
837 	isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
838 
839 	/* isrs[0] is not used, so borrow to store RDU status to share common
840 	 * flow in rtw89_pci_interrupt_threadfn().
841 	 */
842 	isrs->isrs[0] = isrs->isrs[1] & (B_BE_PCIE_RDU_CH1_INT |
843 					 B_BE_PCIE_RDU_CH0_INT);
844 
845 	if (isrs->halt_c2h_isrs)
846 		rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
847 	if (isrs->isrs[1])
848 		rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
849 	rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
850 }
851 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v3);
852 
rtw89_pci_enable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)853 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
854 {
855 	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
856 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
857 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
858 }
859 EXPORT_SYMBOL(rtw89_pci_enable_intr);
860 
rtw89_pci_disable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)861 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
862 {
863 	rtw89_write32(rtwdev, R_AX_HIMR0, 0);
864 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
865 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
866 }
867 EXPORT_SYMBOL(rtw89_pci_disable_intr);
868 
rtw89_pci_enable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)869 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
870 {
871 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
872 	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
873 	rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
874 	rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
875 }
876 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
877 
rtw89_pci_disable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)878 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
879 {
880 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
881 }
882 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
883 
rtw89_pci_enable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)884 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
885 {
886 	rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
887 	rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]);
888 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
889 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
890 }
891 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
892 
rtw89_pci_disable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)893 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
894 {
895 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
896 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
897 }
898 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
899 
rtw89_pci_enable_intr_v3(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)900 void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
901 {
902 	rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
903 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
904 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
905 }
906 EXPORT_SYMBOL(rtw89_pci_enable_intr_v3);
907 
rtw89_pci_disable_intr_v3(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)908 void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
909 {
910 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
911 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
912 }
913 EXPORT_SYMBOL(rtw89_pci_disable_intr_v3);
914 
rtw89_pci_ops_recovery_start(struct rtw89_dev * rtwdev)915 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
916 {
917 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
918 	unsigned long flags;
919 
920 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
921 	rtw89_chip_disable_intr(rtwdev, rtwpci);
922 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
923 	rtw89_chip_enable_intr(rtwdev, rtwpci);
924 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
925 }
926 
rtw89_pci_ops_recovery_complete(struct rtw89_dev * rtwdev)927 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
928 {
929 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
930 	unsigned long flags;
931 
932 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
933 	rtw89_chip_disable_intr(rtwdev, rtwpci);
934 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
935 	rtw89_chip_enable_intr(rtwdev, rtwpci);
936 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
937 }
938 
rtw89_pci_low_power_interrupt_handler(struct rtw89_dev * rtwdev)939 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
940 {
941 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
942 	int budget = NAPI_POLL_WEIGHT;
943 
944 	/* To prevent RXQ get stuck due to run out of budget. */
945 	rtwdev->napi_budget_countdown = budget;
946 
947 	rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
948 	rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
949 }
950 
rtw89_pci_interrupt_threadfn(int irq,void * dev)951 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
952 {
953 	struct rtw89_dev *rtwdev = dev;
954 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
955 	const struct rtw89_pci_info *info = rtwdev->pci_info;
956 	const struct rtw89_pci_isr_def *isr_def = info->isr_def;
957 	struct rtw89_pci_isrs isrs;
958 	unsigned long flags;
959 
960 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
961 	rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
962 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
963 
964 	if (unlikely(isrs.isrs[0] & isr_def->isr_rdu))
965 		rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
966 
967 	if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_halt_c2h))
968 		rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
969 
970 	if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout))
971 		rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
972 
973 	if (unlikely(rtwpci->under_recovery))
974 		goto enable_intr;
975 
976 	if (unlikely(rtwpci->low_power)) {
977 		rtw89_pci_low_power_interrupt_handler(rtwdev);
978 		goto enable_intr;
979 	}
980 
981 	if (likely(rtwpci->running)) {
982 		local_bh_disable();
983 		napi_schedule(&rtwdev->napi);
984 		local_bh_enable();
985 	}
986 
987 	return IRQ_HANDLED;
988 
989 enable_intr:
990 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
991 	if (likely(rtwpci->running))
992 		rtw89_chip_enable_intr(rtwdev, rtwpci);
993 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
994 	return IRQ_HANDLED;
995 }
996 
rtw89_pci_interrupt_handler(int irq,void * dev)997 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
998 {
999 	struct rtw89_dev *rtwdev = dev;
1000 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1001 	unsigned long flags;
1002 	irqreturn_t irqret = IRQ_WAKE_THREAD;
1003 
1004 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1005 
1006 	/* If interrupt event is on the road, it is still trigger interrupt
1007 	 * even we have done pci_stop() to turn off IMR.
1008 	 */
1009 	if (unlikely(!rtwpci->running)) {
1010 		irqret = IRQ_HANDLED;
1011 		goto exit;
1012 	}
1013 
1014 	rtw89_chip_disable_intr(rtwdev, rtwpci);
1015 exit:
1016 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1017 
1018 	return irqret;
1019 }
1020 
1021 #define DEF_TXCHADDRS_TYPE3(gen, ch_idx, txch, v...) \
1022 	[RTW89_TXCH_##ch_idx] = { \
1023 		.num = R_##gen##_##txch##_TXBD_CFG, \
1024 		.idx = R_##gen##_##txch##_TXBD_IDX ##v, \
1025 		.bdram = 0, \
1026 		.desa_l = 0, \
1027 		.desa_h = 0, \
1028 	}
1029 
1030 #define DEF_TXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, txch, grp, v...) \
1031 	[RTW89_TXCH_##ch_idx] = { \
1032 		.num = R_##gen##_##txch##_TXBD_CFG, \
1033 		.idx = R_##gen##_##txch##_TXBD_IDX ##v, \
1034 		.bdram = 0, \
1035 		.desa_l = R_##gen##_##grp##_TXBD_DESA_L, \
1036 		.desa_h = R_##gen##_##grp##_TXBD_DESA_H, \
1037 	}
1038 
1039 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
1040 	[RTW89_TXCH_##ch_idx] = { \
1041 		.num = R_##gen##_##txch##_TXBD_NUM ##v, \
1042 		.idx = R_##gen##_##txch##_TXBD_IDX ##v, \
1043 		.bdram = 0, \
1044 		.desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \
1045 		.desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \
1046 	}
1047 
1048 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
1049 	[RTW89_TXCH_##txch] = { \
1050 		.num = R_AX_##txch##_TXBD_NUM ##v, \
1051 		.idx = R_AX_##txch##_TXBD_IDX ##v, \
1052 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
1053 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
1054 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
1055 	}
1056 
1057 #define DEF_TXCHADDRS(info, txch, v...) \
1058 	[RTW89_TXCH_##txch] = { \
1059 		.num = R_AX_##txch##_TXBD_NUM, \
1060 		.idx = R_AX_##txch##_TXBD_IDX, \
1061 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
1062 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
1063 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
1064 	}
1065 
1066 #define DEF_RXCHADDRS_TYPE3(gen, ch_idx, rxch, v...) \
1067 	[RTW89_RXCH_##ch_idx] = { \
1068 		.num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
1069 		.idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
1070 		.desa_l = 0, \
1071 		.desa_h = 0, \
1072 	}
1073 
1074 #define DEF_RXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, rxch, grp, v...) \
1075 	[RTW89_RXCH_##ch_idx] = { \
1076 		.num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
1077 		.idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
1078 		.desa_l = R_##gen##_##grp##_RXBD_DESA_L, \
1079 		.desa_h = R_##gen##_##grp##_RXBD_DESA_H, \
1080 	}
1081 
1082 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
1083 	[RTW89_RXCH_##ch_idx] = { \
1084 		.num = R_##gen##_##rxch##_RXBD_NUM ##v, \
1085 		.idx = R_##gen##_##rxch##_RXBD_IDX ##v, \
1086 		.desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \
1087 		.desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \
1088 	}
1089 
1090 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
1091 	.tx = {
1092 		DEF_TXCHADDRS(info, ACH0),
1093 		DEF_TXCHADDRS(info, ACH1),
1094 		DEF_TXCHADDRS(info, ACH2),
1095 		DEF_TXCHADDRS(info, ACH3),
1096 		DEF_TXCHADDRS(info, ACH4),
1097 		DEF_TXCHADDRS(info, ACH5),
1098 		DEF_TXCHADDRS(info, ACH6),
1099 		DEF_TXCHADDRS(info, ACH7),
1100 		DEF_TXCHADDRS(info, CH8),
1101 		DEF_TXCHADDRS(info, CH9),
1102 		DEF_TXCHADDRS_TYPE1(info, CH10),
1103 		DEF_TXCHADDRS_TYPE1(info, CH11),
1104 		DEF_TXCHADDRS(info, CH12),
1105 	},
1106 	.rx = {
1107 		DEF_RXCHADDRS(AX, RXQ, RXQ),
1108 		DEF_RXCHADDRS(AX, RPQ, RPQ),
1109 	},
1110 };
1111 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
1112 
1113 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
1114 	.tx = {
1115 		DEF_TXCHADDRS(info, ACH0, _V1),
1116 		DEF_TXCHADDRS(info, ACH1, _V1),
1117 		DEF_TXCHADDRS(info, ACH2, _V1),
1118 		DEF_TXCHADDRS(info, ACH3, _V1),
1119 		DEF_TXCHADDRS(info, ACH4, _V1),
1120 		DEF_TXCHADDRS(info, ACH5, _V1),
1121 		DEF_TXCHADDRS(info, ACH6, _V1),
1122 		DEF_TXCHADDRS(info, ACH7, _V1),
1123 		DEF_TXCHADDRS(info, CH8, _V1),
1124 		DEF_TXCHADDRS(info, CH9, _V1),
1125 		DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
1126 		DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
1127 		DEF_TXCHADDRS(info, CH12, _V1),
1128 	},
1129 	.rx = {
1130 		DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
1131 		DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
1132 	},
1133 };
1134 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
1135 
1136 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
1137 	.tx = {
1138 		DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
1139 		DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
1140 		DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
1141 		DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
1142 		DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
1143 		DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
1144 		DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
1145 		DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
1146 		DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
1147 		DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
1148 		DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
1149 		DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
1150 		DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
1151 	},
1152 	.rx = {
1153 		DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
1154 		DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
1155 	},
1156 };
1157 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
1158 
1159 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1 = {
1160 	.tx = {
1161 		DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, ACH0, CH0, ACQ, _V1),
1162 		/* no CH1 */
1163 		DEF_TXCHADDRS_TYPE3(BE, ACH2, CH2, _V1),
1164 		/* no CH3 */
1165 		DEF_TXCHADDRS_TYPE3(BE, ACH4, CH4, _V1),
1166 		/* no CH5 */
1167 		DEF_TXCHADDRS_TYPE3(BE, ACH6, CH6, _V1),
1168 		/* no CH7 */
1169 		DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, CH8, CH8, NACQ, _V1),
1170 		/* no CH9 */
1171 		DEF_TXCHADDRS_TYPE3(BE, CH10, CH10, _V1),
1172 		/* no CH11 */
1173 		DEF_TXCHADDRS_TYPE3(BE, CH12, CH12, _V1),
1174 	},
1175 	.rx = {
1176 		DEF_RXCHADDRS_TYPE3_GRP_BASE(BE, RXQ, CH0, HOST0, _V1),
1177 		DEF_RXCHADDRS_TYPE3(BE, RPQ, CH1, _V1),
1178 	},
1179 };
1180 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be_v1);
1181 
1182 #undef DEF_TXCHADDRS_TYPE3
1183 #undef DEF_TXCHADDRS_TYPE3_GRP_BASE
1184 #undef DEF_TXCHADDRS_TYPE2
1185 #undef DEF_TXCHADDRS_TYPE1
1186 #undef DEF_TXCHADDRS
1187 #undef DEF_RXCHADDRS_TYPE3
1188 #undef DEF_RXCHADDRS_TYPE3_GRP_BASE
1189 #undef DEF_RXCHADDRS
1190 
rtw89_pci_get_txch_addrs(struct rtw89_dev * rtwdev,enum rtw89_tx_channel txch,const struct rtw89_pci_ch_dma_addr ** addr)1191 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
1192 				    enum rtw89_tx_channel txch,
1193 				    const struct rtw89_pci_ch_dma_addr **addr)
1194 {
1195 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1196 
1197 	if (txch >= RTW89_TXCH_NUM)
1198 		return -EINVAL;
1199 
1200 	*addr = &info->dma_addr_set->tx[txch];
1201 
1202 	return 0;
1203 }
1204 
rtw89_pci_get_rxch_addrs(struct rtw89_dev * rtwdev,enum rtw89_rx_channel rxch,const struct rtw89_pci_ch_dma_addr ** addr)1205 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
1206 				    enum rtw89_rx_channel rxch,
1207 				    const struct rtw89_pci_ch_dma_addr **addr)
1208 {
1209 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1210 
1211 	if (rxch >= RTW89_RXCH_NUM)
1212 		return -EINVAL;
1213 
1214 	*addr = &info->dma_addr_set->rx[rxch];
1215 
1216 	return 0;
1217 }
1218 
rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring * ring)1219 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
1220 {
1221 	struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
1222 
1223 	/* reserved 1 desc check ring is full or not */
1224 	if (bd_ring->rp > bd_ring->wp)
1225 		return bd_ring->rp - bd_ring->wp - 1;
1226 
1227 	return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
1228 }
1229 
1230 static
__rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev * rtwdev)1231 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
1232 {
1233 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1234 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
1235 	u32 cnt;
1236 
1237 	spin_lock_bh(&rtwpci->trx_lock);
1238 	rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
1239 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1240 	spin_unlock_bh(&rtwpci->trx_lock);
1241 
1242 	return cnt;
1243 }
1244 
1245 static
__rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev * rtwdev,u8 txch)1246 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
1247 						   u8 txch)
1248 {
1249 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1250 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1251 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1252 	u32 cnt;
1253 
1254 	spin_lock_bh(&rtwpci->trx_lock);
1255 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1256 	if (txch != RTW89_TXCH_CH12)
1257 		cnt = min(cnt, wd_ring->curr_num);
1258 	spin_unlock_bh(&rtwpci->trx_lock);
1259 
1260 	return cnt;
1261 }
1262 
__rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1263 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1264 						     u8 txch)
1265 {
1266 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1267 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1268 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1269 	const struct rtw89_chip_info *chip = rtwdev->chip;
1270 	u32 bd_cnt, wd_cnt, min_cnt = 0;
1271 	struct rtw89_pci_rx_ring *rx_ring;
1272 	enum rtw89_debug_mask debug_mask;
1273 	u32 cnt;
1274 
1275 	rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
1276 
1277 	spin_lock_bh(&rtwpci->trx_lock);
1278 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1279 	wd_cnt = wd_ring->curr_num;
1280 
1281 	if (wd_cnt == 0 || bd_cnt == 0) {
1282 		cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
1283 		if (cnt)
1284 			rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
1285 		else if (wd_cnt == 0)
1286 			goto out_unlock;
1287 
1288 		bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1289 		if (bd_cnt == 0)
1290 			rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
1291 	}
1292 
1293 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1294 	wd_cnt = wd_ring->curr_num;
1295 	min_cnt = min(bd_cnt, wd_cnt);
1296 	if (min_cnt == 0) {
1297 		/* This message can be frequently shown in low power mode or
1298 		 * high traffic with small FIFO chips, and we have recognized it as normal
1299 		 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1300 		 */
1301 		if (rtwpci->low_power || chip->small_fifo_size)
1302 			debug_mask = RTW89_DBG_TXRX;
1303 		else
1304 			debug_mask = RTW89_DBG_UNEXP;
1305 
1306 		rtw89_debug(rtwdev, debug_mask,
1307 			    "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1308 			    wd_cnt, bd_cnt);
1309 	}
1310 
1311 out_unlock:
1312 	spin_unlock_bh(&rtwpci->trx_lock);
1313 
1314 	return min_cnt;
1315 }
1316 
rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1317 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1318 						   u8 txch)
1319 {
1320 	if (rtwdev->hci.paused)
1321 		return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1322 
1323 	if (txch == RTW89_TXCH_CH12)
1324 		return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1325 
1326 	return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1327 }
1328 
__rtw89_pci_tx_kick_off(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1329 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1330 {
1331 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1332 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1333 	u32 host_idx, addr;
1334 
1335 	spin_lock_bh(&rtwpci->trx_lock);
1336 
1337 	addr = bd_ring->addr.idx;
1338 	host_idx = bd_ring->wp;
1339 	rtw89_write16(rtwdev, addr, host_idx);
1340 
1341 	spin_unlock_bh(&rtwpci->trx_lock);
1342 }
1343 
rtw89_pci_tx_bd_ring_update(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,int n_txbd)1344 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1345 					int n_txbd)
1346 {
1347 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1348 	u32 host_idx, len;
1349 
1350 	len = bd_ring->len;
1351 	host_idx = bd_ring->wp + n_txbd;
1352 	host_idx = host_idx < len ? host_idx : host_idx - len;
1353 
1354 	bd_ring->wp = host_idx;
1355 }
1356 
rtw89_pci_ops_tx_kick_off(struct rtw89_dev * rtwdev,u8 txch)1357 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1358 {
1359 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1360 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1361 
1362 	if (rtwdev->hci.paused) {
1363 		set_bit(txch, rtwpci->kick_map);
1364 		return;
1365 	}
1366 
1367 	__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1368 }
1369 
rtw89_pci_tx_kick_off_pending(struct rtw89_dev * rtwdev)1370 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1371 {
1372 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1373 	struct rtw89_pci_tx_ring *tx_ring;
1374 	int txch;
1375 
1376 	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1377 		if (!test_and_clear_bit(txch, rtwpci->kick_map))
1378 			continue;
1379 
1380 		tx_ring = &rtwpci->tx.rings[txch];
1381 		__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1382 	}
1383 }
1384 
__pci_flush_txch(struct rtw89_dev * rtwdev,u8 txch,bool drop)1385 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1386 {
1387 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1388 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1389 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1390 	u32 cur_idx, cur_rp;
1391 	u8 i;
1392 
1393 	/* Because the time taked by the I/O is a bit dynamic, it's hard to
1394 	 * define a reasonable fixed total timeout to use read_poll_timeout*
1395 	 * helper. Instead, we can ensure a reasonable polling times, so we
1396 	 * just use for loop with udelay here.
1397 	 */
1398 	for (i = 0; i < 60; i++) {
1399 		cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1400 		cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1401 		if (cur_rp == bd_ring->wp)
1402 			return;
1403 
1404 		udelay(1);
1405 	}
1406 
1407 	if (!drop)
1408 		rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1409 }
1410 
__rtw89_pci_ops_flush_txchs(struct rtw89_dev * rtwdev,u32 txchs,bool drop)1411 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1412 					bool drop)
1413 {
1414 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1415 	u8 i;
1416 
1417 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1418 		/* It may be unnecessary to flush FWCMD queue. */
1419 		if (i == RTW89_TXCH_CH12)
1420 			continue;
1421 		if (info->tx_dma_ch_mask & BIT(i))
1422 			continue;
1423 
1424 		if (txchs & BIT(i))
1425 			__pci_flush_txch(rtwdev, i, drop);
1426 	}
1427 }
1428 
rtw89_pci_ops_flush_queues(struct rtw89_dev * rtwdev,u32 queues,bool drop)1429 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1430 				       bool drop)
1431 {
1432 	__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1433 }
1434 
rtw89_pci_fill_txaddr_info(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1435 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1436 			       void *txaddr_info_addr, u32 total_len,
1437 			       dma_addr_t dma, u8 *add_info_nr)
1438 {
1439 	struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1440 	__le16 option;
1441 
1442 	txaddr_info->length = cpu_to_le16(total_len);
1443 	option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1));
1444 	option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK);
1445 	txaddr_info->option = option;
1446 	txaddr_info->dma = cpu_to_le32(dma);
1447 
1448 	*add_info_nr = 1;
1449 
1450 	return sizeof(*txaddr_info);
1451 }
1452 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1453 
rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1454 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1455 				  void *txaddr_info_addr, u32 total_len,
1456 				  dma_addr_t dma, u8 *add_info_nr)
1457 {
1458 	struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1459 	u32 remain = total_len;
1460 	u32 len;
1461 	u16 length_option;
1462 	int n;
1463 
1464 	for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1465 		len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1466 		      TXADDR_INFO_LENTHG_V1_MAX : remain;
1467 		remain -= len;
1468 
1469 		length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1470 				FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1471 				FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1472 		length_option |= u16_encode_bits(upper_32_bits(dma),
1473 						 B_PCIADDR_HIGH_SEL_V1_MASK);
1474 		txaddr_info->length_opt = cpu_to_le16(length_option);
1475 		txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1476 		txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1477 
1478 		dma += len;
1479 		txaddr_info++;
1480 	}
1481 
1482 	WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1483 		  remain, total_len);
1484 
1485 	*add_info_nr = n;
1486 
1487 	return n * sizeof(*txaddr_info);
1488 }
1489 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1490 
rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req)1491 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1492 				 struct rtw89_pci_tx_ring *tx_ring,
1493 				 struct rtw89_pci_tx_wd *txwd,
1494 				 struct rtw89_core_tx_request *tx_req)
1495 {
1496 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1497 	const struct rtw89_chip_info *chip = rtwdev->chip;
1498 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1499 	struct rtw89_pci_tx_wp_info *txwp_info;
1500 	void *txaddr_info_addr;
1501 	struct pci_dev *pdev = rtwpci->pdev;
1502 	struct sk_buff *skb = tx_req->skb;
1503 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1504 	bool en_wd_info = desc_info->en_wd_info;
1505 	u32 txwd_len;
1506 	u32 txwp_len;
1507 	u32 txaddr_info_len;
1508 	dma_addr_t dma;
1509 	int ret;
1510 
1511 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1512 	if (dma_mapping_error(&pdev->dev, dma)) {
1513 		rtw89_err(rtwdev, "failed to map skb dma data\n");
1514 		ret = -EBUSY;
1515 		goto err;
1516 	}
1517 
1518 	tx_data->dma = dma;
1519 
1520 	txwp_len = sizeof(*txwp_info);
1521 	txwd_len = chip->txwd_body_size;
1522 	txwd_len += en_wd_info ? chip->txwd_info_size : 0;
1523 
1524 #if defined(__linux__)
1525 	txwp_info = txwd->vaddr + txwd_len;
1526 #elif defined(__FreeBSD__)
1527 	txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len);
1528 #endif
1529 	txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1530 	txwp_info->seq1 = 0;
1531 	txwp_info->seq2 = 0;
1532 	txwp_info->seq3 = 0;
1533 
1534 	tx_ring->tx_cnt++;
1535 #if defined(__linux__)
1536 	txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1537 #elif defined(__FreeBSD__)
1538 	txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len;
1539 #endif
1540 	txaddr_info_len =
1541 		rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1542 					    dma, &desc_info->addr_info_nr);
1543 
1544 	txwd->len = txwd_len + txwp_len + txaddr_info_len;
1545 
1546 	rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1547 
1548 	skb_queue_tail(&txwd->queue, skb);
1549 
1550 	return 0;
1551 
1552 err:
1553 	return ret;
1554 }
1555 
rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1556 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1557 				  struct rtw89_pci_tx_ring *tx_ring,
1558 				  struct rtw89_pci_tx_bd_32 *txbd,
1559 				  struct rtw89_core_tx_request *tx_req)
1560 {
1561 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1562 	const struct rtw89_chip_info *chip = rtwdev->chip;
1563 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1564 	void *txdesc;
1565 	int txdesc_size = chip->h2c_desc_size;
1566 	struct pci_dev *pdev = rtwpci->pdev;
1567 	struct sk_buff *skb = tx_req->skb;
1568 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1569 	dma_addr_t dma;
1570 	__le16 opt;
1571 
1572 	txdesc = skb_push(skb, txdesc_size);
1573 	memset(txdesc, 0, txdesc_size);
1574 	rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1575 
1576 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1577 	if (dma_mapping_error(&pdev->dev, dma)) {
1578 		rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1579 		return -EBUSY;
1580 	}
1581 
1582 	tx_data->dma = dma;
1583 	opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1584 	opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI);
1585 	txbd->opt = opt;
1586 	txbd->length = cpu_to_le16(skb->len);
1587 	txbd->dma = cpu_to_le32(tx_data->dma);
1588 	skb_queue_tail(&rtwpci->h2c_queue, skb);
1589 
1590 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1591 
1592 	return 0;
1593 }
1594 
rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1595 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1596 				 struct rtw89_pci_tx_ring *tx_ring,
1597 				 struct rtw89_pci_tx_bd_32 *txbd,
1598 				 struct rtw89_core_tx_request *tx_req)
1599 {
1600 	struct rtw89_pci_tx_wd *txwd;
1601 	__le16 opt;
1602 	int ret;
1603 
1604 	/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1605 	 * buffer with WD BODY only. So here we don't need to check the free
1606 	 * pages of the wd ring.
1607 	 */
1608 	if (tx_ring->txch == RTW89_TXCH_CH12)
1609 		return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1610 
1611 	txwd = rtw89_pci_dequeue_txwd(tx_ring);
1612 	if (!txwd) {
1613 		rtw89_err(rtwdev, "no available TXWD\n");
1614 		ret = -ENOSPC;
1615 		goto err;
1616 	}
1617 
1618 	ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1619 	if (ret) {
1620 		rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1621 		goto err_enqueue_wd;
1622 	}
1623 
1624 	list_add_tail(&txwd->list, &tx_ring->busy_pages);
1625 
1626 	opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1627 	opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI);
1628 	txbd->opt = opt;
1629 	txbd->length = cpu_to_le16(txwd->len);
1630 	txbd->dma = cpu_to_le32(txwd->paddr);
1631 
1632 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1633 
1634 	return 0;
1635 
1636 err_enqueue_wd:
1637 	rtw89_pci_enqueue_txwd(tx_ring, txwd);
1638 err:
1639 	return ret;
1640 }
1641 
rtw89_pci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,u8 txch)1642 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1643 			      u8 txch)
1644 {
1645 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1646 	struct rtw89_pci_tx_ring *tx_ring;
1647 	struct rtw89_pci_tx_bd_32 *txbd;
1648 	u32 n_avail_txbd;
1649 	int ret = 0;
1650 
1651 	/* check the tx type and dma channel for fw cmd queue */
1652 	if ((txch == RTW89_TXCH_CH12 ||
1653 	     tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1654 	    (txch != RTW89_TXCH_CH12 ||
1655 	     tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1656 		rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1657 		return -EINVAL;
1658 	}
1659 
1660 	tx_ring = &rtwpci->tx.rings[txch];
1661 	spin_lock_bh(&rtwpci->trx_lock);
1662 
1663 	n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1664 	if (n_avail_txbd == 0) {
1665 		rtw89_err(rtwdev, "no available TXBD\n");
1666 		ret = -ENOSPC;
1667 		goto err_unlock;
1668 	}
1669 
1670 	txbd = rtw89_pci_get_next_txbd(tx_ring);
1671 	ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1672 	if (ret) {
1673 		rtw89_err(rtwdev, "failed to submit TXBD\n");
1674 		goto err_unlock;
1675 	}
1676 
1677 	spin_unlock_bh(&rtwpci->trx_lock);
1678 	return 0;
1679 
1680 err_unlock:
1681 	spin_unlock_bh(&rtwpci->trx_lock);
1682 	return ret;
1683 }
1684 
rtw89_pci_ops_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)1685 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1686 {
1687 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1688 	int ret;
1689 
1690 	ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1691 	if (ret) {
1692 		rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1693 		return ret;
1694 	}
1695 
1696 	return 0;
1697 }
1698 
1699 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1700 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1701 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1702 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1703 	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1704 	[RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1705 	[RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1706 	[RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1707 	[RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1708 	[RTW89_TXCH_CH8]  = {.start_idx = 40, .max_num = 5, .min_num = 1},
1709 	[RTW89_TXCH_CH9]  = {.start_idx = 45, .max_num = 5, .min_num = 1},
1710 	[RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1711 	[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1712 	[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1713 };
1714 EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1715 
1716 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1717 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1718 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1719 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1720 	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1721 	[RTW89_TXCH_CH8]  = {.start_idx = 20, .max_num = 4, .min_num = 1},
1722 	[RTW89_TXCH_CH9]  = {.start_idx = 24, .max_num = 4, .min_num = 1},
1723 	[RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1724 };
1725 EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1726 
rtw89_pci_init_wp_16sel(struct rtw89_dev * rtwdev)1727 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
1728 {
1729 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1730 	u32 addr = info->wp_sel_addr;
1731 	u32 val;
1732 	int i;
1733 
1734 	if (!info->wp_sel_addr)
1735 		return;
1736 
1737 	for (i = 0; i < 16; i += 4) {
1738 		val = u32_encode_bits(i + 0, MASKBYTE0) |
1739 		      u32_encode_bits(i + 1, MASKBYTE1) |
1740 		      u32_encode_bits(i + 2, MASKBYTE2) |
1741 		      u32_encode_bits(i + 3, MASKBYTE3);
1742 		rtw89_write32(rtwdev, addr + i, val);
1743 	}
1744 }
1745 
rtw89_pci_enc_bd_cfg(struct rtw89_dev * rtwdev,u16 bd_num,u32 dma_offset)1746 static u16 rtw89_pci_enc_bd_cfg(struct rtw89_dev *rtwdev, u16 bd_num,
1747 				u32 dma_offset)
1748 {
1749 	u16 dma_offset_sel;
1750 	u16 num_sel;
1751 
1752 	/* B_BE_TX_NUM_SEL_MASK, B_BE_RX_NUM_SEL_MASK:
1753 	 *  0 -> 0
1754 	 *  1 -> 64 = 2^6
1755 	 *  2 -> 128 = 2^7
1756 	 *    ...
1757 	 *  7 -> 4096 = 2^12
1758 	 */
1759 	num_sel = ilog2(bd_num) - 5;
1760 
1761 	if (hweight16(bd_num) != 1)
1762 		rtw89_warn(rtwdev, "bd_num %u is not power of 2\n", bd_num);
1763 
1764 	/* B_BE_TX_START_OFFSET_MASK, B_BE_RX_START_OFFSET_MASK:
1765 	 *  0 -> 0    = 0 * 2^9
1766 	 *  1 -> 512  = 1 * 2^9
1767 	 *  2 -> 1024 = 2 * 2^9
1768 	 *  3 -> 1536 = 3 * 2^9
1769 	 *    ...
1770 	 *  255 -> 130560 = 255 * 2^9
1771 	 */
1772 	dma_offset_sel = dma_offset >> 9;
1773 
1774 	if (dma_offset % 512)
1775 		rtw89_warn(rtwdev, "offset %u is not multiple of 512\n", dma_offset);
1776 
1777 	return u16_encode_bits(num_sel, B_BE_TX_NUM_SEL_MASK) |
1778 	       u16_encode_bits(dma_offset_sel, B_BE_TX_START_OFFSET_MASK);
1779 }
1780 
rtw89_pci_reset_trx_rings(struct rtw89_dev * rtwdev)1781 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1782 {
1783 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1784 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1785 	const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1786 	struct rtw89_pci_tx_ring *tx_ring;
1787 	struct rtw89_pci_rx_ring *rx_ring;
1788 	struct rtw89_pci_dma_ring *bd_ring;
1789 	const struct rtw89_pci_bd_ram *bd_ram;
1790 	dma_addr_t group_dma_base = 0;
1791 	u16 num_or_offset;
1792 	u32 addr_desa_l;
1793 	u32 addr_bdram;
1794 	u32 addr_num;
1795 	u32 addr_idx;
1796 	u32 val32;
1797 	int i;
1798 
1799 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1800 		if (info->tx_dma_ch_mask & BIT(i))
1801 			continue;
1802 
1803 		tx_ring = &rtwpci->tx.rings[i];
1804 		bd_ring = &tx_ring->bd_ring;
1805 		bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
1806 		addr_num = bd_ring->addr.num;
1807 		addr_bdram = bd_ring->addr.bdram;
1808 		addr_desa_l = bd_ring->addr.desa_l;
1809 		bd_ring->wp = 0;
1810 		bd_ring->rp = 0;
1811 
1812 		if (info->group_bd_addr) {
1813 			if (addr_desa_l)
1814 				group_dma_base = bd_ring->dma;
1815 
1816 			num_or_offset =
1817 				rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
1818 						     bd_ring->dma - group_dma_base);
1819 		} else {
1820 			num_or_offset = bd_ring->len;
1821 		}
1822 		rtw89_write16(rtwdev, addr_num, num_or_offset);
1823 
1824 		if (addr_bdram && bd_ram) {
1825 			val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1826 				FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1827 				FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1828 
1829 			rtw89_write32(rtwdev, addr_bdram, val32);
1830 		}
1831 		if (addr_desa_l) {
1832 			rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1833 			rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1834 		}
1835 	}
1836 
1837 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1838 		rx_ring = &rtwpci->rx.rings[i];
1839 		bd_ring = &rx_ring->bd_ring;
1840 		addr_num = bd_ring->addr.num;
1841 		addr_idx = bd_ring->addr.idx;
1842 		addr_desa_l = bd_ring->addr.desa_l;
1843 		if (info->rx_ring_eq_is_full)
1844 			bd_ring->wp = bd_ring->len - 1;
1845 		else
1846 			bd_ring->wp = 0;
1847 		bd_ring->rp = 0;
1848 		rx_ring->diliver_skb = NULL;
1849 		rx_ring->diliver_desc.ready = false;
1850 		rx_ring->target_rx_tag = 0;
1851 
1852 		if (info->group_bd_addr) {
1853 			if (addr_desa_l)
1854 				group_dma_base = bd_ring->dma;
1855 
1856 			num_or_offset =
1857 				rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
1858 						     bd_ring->dma - group_dma_base);
1859 		} else {
1860 			num_or_offset = bd_ring->len;
1861 		}
1862 		rtw89_write16(rtwdev, addr_num, num_or_offset);
1863 
1864 		if (addr_desa_l) {
1865 			rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1866 			rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1867 		}
1868 
1869 		if (info->rx_ring_eq_is_full)
1870 			rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
1871 	}
1872 
1873 	rtw89_pci_init_wp_16sel(rtwdev);
1874 }
1875 
rtw89_pci_release_tx_ring(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1876 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1877 				      struct rtw89_pci_tx_ring *tx_ring)
1878 {
1879 	rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1880 	rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1881 }
1882 
rtw89_pci_ops_reset(struct rtw89_dev * rtwdev)1883 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1884 {
1885 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1886 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1887 	int txch;
1888 
1889 	rtw89_pci_reset_trx_rings(rtwdev);
1890 
1891 	spin_lock_bh(&rtwpci->trx_lock);
1892 	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1893 		if (info->tx_dma_ch_mask & BIT(txch))
1894 			continue;
1895 		if (txch == RTW89_TXCH_CH12) {
1896 			rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1897 						skb_queue_len(&rtwpci->h2c_queue), true);
1898 			continue;
1899 		}
1900 		rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx.rings[txch]);
1901 	}
1902 	spin_unlock_bh(&rtwpci->trx_lock);
1903 }
1904 
rtw89_pci_enable_intr_lock(struct rtw89_dev * rtwdev)1905 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1906 {
1907 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1908 	unsigned long flags;
1909 
1910 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1911 	rtwpci->running = true;
1912 	rtw89_chip_enable_intr(rtwdev, rtwpci);
1913 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1914 }
1915 
rtw89_pci_disable_intr_lock(struct rtw89_dev * rtwdev)1916 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1917 {
1918 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1919 	unsigned long flags;
1920 
1921 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1922 	rtwpci->running = false;
1923 	rtw89_chip_disable_intr(rtwdev, rtwpci);
1924 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1925 }
1926 
rtw89_pci_ops_start(struct rtw89_dev * rtwdev)1927 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1928 {
1929 	rtw89_core_napi_start(rtwdev);
1930 	rtw89_pci_enable_intr_lock(rtwdev);
1931 
1932 	return 0;
1933 }
1934 
rtw89_pci_ops_stop(struct rtw89_dev * rtwdev)1935 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1936 {
1937 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1938 	struct pci_dev *pdev = rtwpci->pdev;
1939 
1940 	rtw89_pci_disable_intr_lock(rtwdev);
1941 	synchronize_irq(pdev->irq);
1942 	rtw89_core_napi_stop(rtwdev);
1943 }
1944 
rtw89_pci_ops_pause(struct rtw89_dev * rtwdev,bool pause)1945 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1946 {
1947 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1948 	struct pci_dev *pdev = rtwpci->pdev;
1949 
1950 	if (pause) {
1951 		rtw89_pci_disable_intr_lock(rtwdev);
1952 		synchronize_irq(pdev->irq);
1953 		if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1954 			napi_synchronize(&rtwdev->napi);
1955 	} else {
1956 		rtw89_pci_enable_intr_lock(rtwdev);
1957 		rtw89_pci_tx_kick_off_pending(rtwdev);
1958 	}
1959 }
1960 
1961 static
rtw89_pci_switch_bd_idx_addr(struct rtw89_dev * rtwdev,bool low_power)1962 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1963 {
1964 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1965 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1966 	const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1967 	const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1968 	struct rtw89_pci_tx_ring *tx_ring;
1969 	struct rtw89_pci_rx_ring *rx_ring;
1970 	int i;
1971 
1972 	if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1973 		return;
1974 
1975 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1976 		tx_ring = &rtwpci->tx.rings[i];
1977 		tx_ring->bd_ring.addr.idx = low_power ?
1978 					    bd_idx_addr->tx_bd_addrs[i] :
1979 					    dma_addr_set->tx[i].idx;
1980 	}
1981 
1982 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1983 		rx_ring = &rtwpci->rx.rings[i];
1984 		rx_ring->bd_ring.addr.idx = low_power ?
1985 					    bd_idx_addr->rx_bd_addrs[i] :
1986 					    dma_addr_set->rx[i].idx;
1987 	}
1988 }
1989 
rtw89_pci_ops_switch_mode(struct rtw89_dev * rtwdev,bool low_power)1990 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1991 {
1992 	enum rtw89_pci_intr_mask_cfg cfg;
1993 
1994 	WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1995 
1996 	cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1997 	rtw89_chip_config_intr_mask(rtwdev, cfg);
1998 	rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1999 }
2000 
2001 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
2002 
rtw89_pci_ops_read32_cmac(struct rtw89_dev * rtwdev,u32 addr)2003 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
2004 {
2005 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2006 #if defined(__linux__)
2007 	u32 val = readl(rtwpci->mmap + addr);
2008 #elif defined(__FreeBSD__)
2009 	u32 val;
2010 
2011 	val = bus_read_4((struct resource *)rtwpci->mmap, addr);
2012 	rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
2013 #endif
2014 	int count;
2015 
2016 	for (count = 0; ; count++) {
2017 		if (val != RTW89_R32_DEAD)
2018 			return val;
2019 		if (count >= MAC_REG_POOL_COUNT) {
2020 			rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
2021 			return RTW89_R32_DEAD;
2022 		}
2023 		rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
2024 #if defined(__linux__)
2025 		val = readl(rtwpci->mmap + addr);
2026 #elif defined(__FreeBSD__)
2027 		val = bus_read_4((struct resource *)rtwpci->mmap, addr);
2028 		rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
2029 #endif
2030 	}
2031 
2032 	return val;
2033 }
2034 
rtw89_pci_ops_read8(struct rtw89_dev * rtwdev,u32 addr)2035 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
2036 {
2037 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2038 	u32 addr32, val32, shift;
2039 
2040 	if (!ACCESS_CMAC(addr))
2041 #if defined(__linux__)
2042 		return readb(rtwpci->mmap + addr);
2043 #elif defined(__FreeBSD__)
2044 	{
2045 		u8 val;
2046 
2047 		val = bus_read_1((struct resource *)rtwpci->mmap, addr);
2048 		rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val);
2049 		return (val);
2050 	}
2051 #endif
2052 
2053 	addr32 = addr & ~0x3;
2054 	shift = (addr & 0x3) * 8;
2055 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
2056 	return val32 >> shift;
2057 }
2058 
rtw89_pci_ops_read16(struct rtw89_dev * rtwdev,u32 addr)2059 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
2060 {
2061 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2062 	u32 addr32, val32, shift;
2063 
2064 	if (!ACCESS_CMAC(addr))
2065 #if defined(__linux__)
2066 		return readw(rtwpci->mmap + addr);
2067 #elif defined(__FreeBSD__)
2068 	{
2069 		u16 val;
2070 
2071 		val = bus_read_2((struct resource *)rtwpci->mmap, addr);
2072 		rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val);
2073 		return (val);
2074 	}
2075 #endif
2076 
2077 	addr32 = addr & ~0x3;
2078 	shift = (addr & 0x3) * 8;
2079 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
2080 	return val32 >> shift;
2081 }
2082 
rtw89_pci_ops_read32(struct rtw89_dev * rtwdev,u32 addr)2083 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
2084 {
2085 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2086 
2087 	if (!ACCESS_CMAC(addr))
2088 #if defined(__linux__)
2089 		return readl(rtwpci->mmap + addr);
2090 #elif defined(__FreeBSD__)
2091 	{
2092 		u32 val;
2093 
2094 		val = bus_read_4((struct resource *)rtwpci->mmap, addr);
2095 		rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
2096 		return (val);
2097 	}
2098 #endif
2099 
2100 	return rtw89_pci_ops_read32_cmac(rtwdev, addr);
2101 }
2102 
rtw89_pci_ops_write8(struct rtw89_dev * rtwdev,u32 addr,u8 data)2103 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
2104 {
2105 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2106 
2107 #if defined(__linux__)
2108 	writeb(data, rtwpci->mmap + addr);
2109 #elif defined(__FreeBSD__)
2110 	rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data);
2111 	return (bus_write_1((struct resource *)rtwpci->mmap, addr, data));
2112 #endif
2113 }
2114 
rtw89_pci_ops_write16(struct rtw89_dev * rtwdev,u32 addr,u16 data)2115 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
2116 {
2117 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2118 
2119 #if defined(__linux__)
2120 	writew(data, rtwpci->mmap + addr);
2121 #elif defined(__FreeBSD__)
2122 	rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data);
2123 	return (bus_write_2((struct resource *)rtwpci->mmap, addr, data));
2124 #endif
2125 }
2126 
rtw89_pci_ops_write32(struct rtw89_dev * rtwdev,u32 addr,u32 data)2127 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
2128 {
2129 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2130 
2131 #if defined(__linux__)
2132 	writel(data, rtwpci->mmap + addr);
2133 #elif defined(__FreeBSD__)
2134 	rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data);
2135 	return (bus_write_4((struct resource *)rtwpci->mmap, addr, data));
2136 #endif
2137 }
2138 
rtw89_pci_ops_read32_pci_cfg(struct rtw89_dev * rtwdev,u32 addr)2139 static u32 rtw89_pci_ops_read32_pci_cfg(struct rtw89_dev *rtwdev, u32 addr)
2140 {
2141 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2142 	struct pci_dev *pdev = rtwpci->pdev;
2143 	u32 value;
2144 	int ret;
2145 
2146 	ret = pci_read_config_dword(pdev, addr, &value);
2147 	if (ret)
2148 		return RTW89_R32_EA;
2149 
2150 	return value;
2151 }
2152 
rtw89_pci_ctrl_dma_trx(struct rtw89_dev * rtwdev,bool enable)2153 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
2154 {
2155 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2156 
2157 	if (enable)
2158 		rtw89_write32_set(rtwdev, info->init_cfg_reg,
2159 				  info->rxhci_en_bit | info->txhci_en_bit);
2160 	else
2161 		rtw89_write32_clr(rtwdev, info->init_cfg_reg,
2162 				  info->rxhci_en_bit | info->txhci_en_bit);
2163 }
2164 
rtw89_pci_ctrl_dma_io(struct rtw89_dev * rtwdev,bool enable)2165 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
2166 {
2167 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2168 	const struct rtw89_reg_def *reg = &info->dma_io_stop;
2169 
2170 	if (enable)
2171 		rtw89_write32_clr(rtwdev, reg->addr, reg->mask);
2172 	else
2173 		rtw89_write32_set(rtwdev, reg->addr, reg->mask);
2174 }
2175 
rtw89_pci_ctrl_dma_all(struct rtw89_dev * rtwdev,bool enable)2176 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
2177 {
2178 	rtw89_pci_ctrl_dma_io(rtwdev, enable);
2179 	rtw89_pci_ctrl_dma_trx(rtwdev, enable);
2180 }
2181 
rtw89_pci_check_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 rw_bit)2182 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
2183 {
2184 	u16 val;
2185 
2186 	rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
2187 
2188 	val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
2189 	switch (speed) {
2190 	case PCIE_PHY_GEN1:
2191 		if (addr < 0x20)
2192 			val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
2193 		else
2194 			val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
2195 		break;
2196 	case PCIE_PHY_GEN2:
2197 		if (addr < 0x20)
2198 			val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
2199 		else
2200 			val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
2201 		break;
2202 	default:
2203 		rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
2204 		return -EINVAL;
2205 	}
2206 	rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
2207 	rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
2208 
2209 	return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
2210 				 false, rtwdev, R_AX_MDIO_CFG);
2211 }
2212 
2213 static int
rtw89_read16_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 * val)2214 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
2215 {
2216 	int ret;
2217 
2218 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
2219 	if (ret) {
2220 		rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
2221 		return ret;
2222 	}
2223 	*val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
2224 
2225 	return 0;
2226 }
2227 
2228 static int
rtw89_write16_mdio(struct rtw89_dev * rtwdev,u8 addr,u16 data,u8 speed)2229 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
2230 {
2231 	int ret;
2232 
2233 	rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
2234 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
2235 	if (ret) {
2236 		rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
2237 		return ret;
2238 	}
2239 
2240 	return 0;
2241 }
2242 
2243 static int
rtw89_write16_mdio_mask(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u16 data,u8 speed)2244 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
2245 {
2246 	u32 shift;
2247 	int ret;
2248 	u16 val;
2249 
2250 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
2251 	if (ret)
2252 		return ret;
2253 
2254 	shift = __ffs(mask);
2255 	val &= ~mask;
2256 	val |= ((data << shift) & mask);
2257 
2258 	ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
2259 	if (ret)
2260 		return ret;
2261 
2262 	return 0;
2263 }
2264 
rtw89_write16_mdio_set(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)2265 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
2266 {
2267 	int ret;
2268 	u16 val;
2269 
2270 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
2271 	if (ret)
2272 		return ret;
2273 	ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
2274 	if (ret)
2275 		return ret;
2276 
2277 	return 0;
2278 }
2279 
rtw89_write16_mdio_clr(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)2280 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
2281 {
2282 	int ret;
2283 	u16 val;
2284 
2285 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
2286 	if (ret)
2287 		return ret;
2288 	ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
2289 	if (ret)
2290 		return ret;
2291 
2292 	return 0;
2293 }
2294 
rtw89_dbi_write8(struct rtw89_dev * rtwdev,u16 addr,u8 data)2295 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
2296 {
2297 	u16 addr_2lsb = addr & B_AX_DBI_2LSB;
2298 	u16 write_addr;
2299 	u8 flag;
2300 	int ret;
2301 
2302 	write_addr = addr & B_AX_DBI_ADDR_MSK;
2303 	write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
2304 	rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
2305 	rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
2306 	rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
2307 
2308 	ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2309 				       10 * RTW89_PCI_WR_RETRY_CNT, false,
2310 				       rtwdev, R_AX_DBI_FLAG + 2);
2311 	if (ret)
2312 		rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
2313 			  addr);
2314 
2315 	return ret;
2316 }
2317 
rtw89_dbi_read8(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2318 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
2319 {
2320 	u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
2321 	u8 flag;
2322 	int ret;
2323 
2324 	rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
2325 	rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
2326 
2327 	ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2328 				       10 * RTW89_PCI_WR_RETRY_CNT, false,
2329 				       rtwdev, R_AX_DBI_FLAG + 2);
2330 	if (ret) {
2331 		rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
2332 			  addr);
2333 		return ret;
2334 	}
2335 
2336 	read_addr = R_AX_DBI_RDATA + (addr & 3);
2337 	*value = rtw89_read8(rtwdev, read_addr);
2338 
2339 	return 0;
2340 }
2341 
rtw89_pci_write_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 data)2342 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2343 				       u8 data)
2344 {
2345 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2346 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2347 	struct pci_dev *pdev = rtwpci->pdev;
2348 	int ret;
2349 
2350 	ret = pci_write_config_byte(pdev, addr, data);
2351 	if (!ret)
2352 		return 0;
2353 
2354 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2355 		ret = rtw89_dbi_write8(rtwdev, addr, data);
2356 
2357 	return ret;
2358 }
2359 
rtw89_pci_read_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2360 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2361 				      u8 *value)
2362 {
2363 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2364 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2365 	struct pci_dev *pdev = rtwpci->pdev;
2366 	int ret;
2367 
2368 	ret = pci_read_config_byte(pdev, addr, value);
2369 	if (!ret)
2370 		return 0;
2371 
2372 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2373 		ret = rtw89_dbi_read8(rtwdev, addr, value);
2374 
2375 	return ret;
2376 }
2377 
rtw89_pci_config_byte_set(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2378 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
2379 				     u8 bit)
2380 {
2381 	u8 value;
2382 	int ret;
2383 
2384 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2385 	if (ret)
2386 		return ret;
2387 
2388 	value |= bit;
2389 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2390 
2391 	return ret;
2392 }
2393 
rtw89_pci_config_byte_clr(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2394 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
2395 				     u8 bit)
2396 {
2397 	u8 value;
2398 	int ret;
2399 
2400 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2401 	if (ret)
2402 		return ret;
2403 
2404 	value &= ~bit;
2405 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2406 
2407 	return ret;
2408 }
2409 
2410 static int
__get_target(struct rtw89_dev * rtwdev,u16 * target,enum rtw89_pcie_phy phy_rate)2411 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
2412 {
2413 	u16 val, tar;
2414 	int ret;
2415 
2416 	/* Enable counter */
2417 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
2418 	if (ret)
2419 		return ret;
2420 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2421 				 phy_rate);
2422 	if (ret)
2423 		return ret;
2424 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
2425 				 phy_rate);
2426 	if (ret)
2427 		return ret;
2428 
2429 	fsleep(300);
2430 
2431 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
2432 	if (ret)
2433 		return ret;
2434 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2435 				 phy_rate);
2436 	if (ret)
2437 		return ret;
2438 
2439 	tar = tar & 0x0FFF;
2440 	if (tar == 0 || tar == 0x0FFF) {
2441 		rtw89_err(rtwdev, "[ERR]Get target failed.\n");
2442 		return -EINVAL;
2443 	}
2444 
2445 	*target = tar;
2446 
2447 	return 0;
2448 }
2449 
rtw89_pci_autok_x(struct rtw89_dev * rtwdev)2450 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
2451 {
2452 	int ret;
2453 
2454 	if (!rtw89_is_rtl885xb(rtwdev))
2455 		return 0;
2456 
2457 	ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
2458 				      PCIE_AUTOK_4, PCIE_PHY_GEN1);
2459 	return ret;
2460 }
2461 
rtw89_pci_auto_refclk_cal(struct rtw89_dev * rtwdev,bool autook_en)2462 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
2463 {
2464 	enum rtw89_pcie_phy phy_rate;
2465 	u16 val16, mgn_set, div_set, tar;
2466 	u8 val8, bdr_ori;
2467 	bool l1_flag = false;
2468 	int ret = 0;
2469 
2470 	if (!rtw89_is_rtl885xb(rtwdev))
2471 		return 0;
2472 
2473 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
2474 	if (ret) {
2475 		rtw89_err(rtwdev, "[ERR]pci config read %X\n",
2476 			  RTW89_PCIE_PHY_RATE);
2477 		return ret;
2478 	}
2479 
2480 	if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
2481 		phy_rate = PCIE_PHY_GEN1;
2482 	} else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
2483 		phy_rate = PCIE_PHY_GEN2;
2484 	} else {
2485 		rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
2486 		return -EOPNOTSUPP;
2487 	}
2488 	/* Disable L1BD */
2489 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
2490 	if (ret) {
2491 		rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
2492 		return ret;
2493 	}
2494 
2495 	if (bdr_ori & RTW89_PCIE_BIT_L1) {
2496 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2497 						  bdr_ori & ~RTW89_PCIE_BIT_L1);
2498 		if (ret) {
2499 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2500 				  RTW89_PCIE_L1_CTRL);
2501 			return ret;
2502 		}
2503 		l1_flag = true;
2504 	}
2505 
2506 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2507 	if (ret) {
2508 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2509 		goto end;
2510 	}
2511 
2512 	if (val16 & B_AX_CALIB_EN) {
2513 		ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
2514 					 val16 & ~B_AX_CALIB_EN, phy_rate);
2515 		if (ret) {
2516 			rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2517 			goto end;
2518 		}
2519 	}
2520 
2521 	if (!autook_en)
2522 		goto end;
2523 	/* Set div */
2524 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
2525 	if (ret) {
2526 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2527 		goto end;
2528 	}
2529 
2530 	/* Obtain div and margin */
2531 	ret = __get_target(rtwdev, &tar, phy_rate);
2532 	if (ret) {
2533 		rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2534 		goto end;
2535 	}
2536 
2537 	mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2538 
2539 	if (mgn_set >= 128) {
2540 		div_set = 0x0003;
2541 		mgn_set = 0x000F;
2542 	} else if (mgn_set >= 64) {
2543 		div_set = 0x0003;
2544 		mgn_set >>= 3;
2545 	} else if (mgn_set >= 32) {
2546 		div_set = 0x0002;
2547 		mgn_set >>= 2;
2548 	} else if (mgn_set >= 16) {
2549 		div_set = 0x0001;
2550 		mgn_set >>= 1;
2551 	} else if (mgn_set == 0) {
2552 		rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2553 		goto end;
2554 	} else {
2555 		div_set = 0x0000;
2556 	}
2557 
2558 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2559 	if (ret) {
2560 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2561 		goto end;
2562 	}
2563 
2564 	val16 |= u16_encode_bits(div_set, B_AX_DIV);
2565 
2566 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
2567 	if (ret) {
2568 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2569 		goto end;
2570 	}
2571 
2572 	ret = __get_target(rtwdev, &tar, phy_rate);
2573 	if (ret) {
2574 		rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2575 		goto end;
2576 	}
2577 
2578 	rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2579 		    tar, div_set, mgn_set);
2580 	ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2581 				 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
2582 	if (ret) {
2583 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2584 		goto end;
2585 	}
2586 
2587 	/* Enable function */
2588 	ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
2589 	if (ret) {
2590 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2591 		goto end;
2592 	}
2593 
2594 	/* CLK delay = 0 */
2595 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2596 					  PCIE_CLKDLY_HW_0);
2597 
2598 end:
2599 	/* Set L1BD to ori */
2600 	if (l1_flag) {
2601 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2602 						  bdr_ori);
2603 		if (ret) {
2604 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2605 				  RTW89_PCIE_L1_CTRL);
2606 			return ret;
2607 		}
2608 	}
2609 
2610 	return ret;
2611 }
2612 
rtw89_pci_deglitch_setting(struct rtw89_dev * rtwdev)2613 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2614 {
2615 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2616 	int ret;
2617 
2618 	if (chip_id == RTL8852A) {
2619 		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2620 					     PCIE_PHY_GEN1);
2621 		if (ret)
2622 			return ret;
2623 		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2624 					     PCIE_PHY_GEN2);
2625 		if (ret)
2626 			return ret;
2627 	} else if (chip_id == RTL8852C) {
2628 		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2629 				  B_AX_DEGLITCH);
2630 		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2631 				  B_AX_DEGLITCH);
2632 	}
2633 
2634 	return 0;
2635 }
2636 
rtw89_pci_disable_eq_ax(struct rtw89_dev * rtwdev)2637 static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev)
2638 {
2639 	u16 g1_oobs, g2_oobs;
2640 	u32 backup_aspm;
2641 	u32 phy_offset;
2642 	u16 offset_cal;
2643 	u16 oobs_val;
2644 	int ret;
2645 	u8 gen;
2646 
2647 	if (rtwdev->chip->chip_id != RTL8852C)
2648 		return;
2649 
2650 	g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2651 					    RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2652 	g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
2653 					    RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2654 	if (g1_oobs && g2_oobs)
2655 		return;
2656 
2657 	backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
2658 	rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
2659 
2660 	ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
2661 	if (ret)
2662 		goto out;
2663 
2664 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
2665 	rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
2666 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
2667 
2668 	oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
2669 				     OOBS_LEVEL_MASK);
2670 
2671 	rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT,
2672 			   OOBS_SEN_MASK, oobs_val);
2673 	rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
2674 			  BAC_OOBS_SEL);
2675 
2676 	rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT,
2677 			   OOBS_SEN_MASK, oobs_val);
2678 	rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
2679 			  BAC_OOBS_SEL);
2680 
2681 	/* offset K */
2682 	for (gen = 1; gen <= 2; gen++) {
2683 		phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2684 					R_RAC_DIRECT_OFFSET_G2;
2685 
2686 		rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
2687 				  B_PCIE_BIT_RD_SEL);
2688 	}
2689 
2690 	offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2691 					       RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK);
2692 
2693 	for (gen = 1; gen <= 2; gen++) {
2694 		phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2695 					R_RAC_DIRECT_OFFSET_G2;
2696 
2697 		rtw89_write16_mask(rtwdev, phy_offset + RAC_ANA0B * RAC_MULT,
2698 				   MANUAL_LVL_MASK, offset_cal);
2699 		rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT,
2700 				  OFFSET_CAL_MODE);
2701 	}
2702 
2703 out:
2704 	rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm);
2705 }
2706 
rtw89_pci_ber(struct rtw89_dev * rtwdev)2707 static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
2708 {
2709 	u32 phy_offset;
2710 
2711 	if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
2712 		return;
2713 
2714 	phy_offset = R_RAC_DIRECT_OFFSET_G1;
2715 	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
2716 	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2717 
2718 	phy_offset = R_RAC_DIRECT_OFFSET_G2;
2719 	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
2720 	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2721 }
2722 
rtw89_pci_rxdma_prefth(struct rtw89_dev * rtwdev)2723 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2724 {
2725 	if (rtwdev->chip->chip_id != RTL8852A)
2726 		return;
2727 
2728 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2729 }
2730 
rtw89_pci_l1off_pwroff(struct rtw89_dev * rtwdev)2731 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2732 {
2733 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2734 
2735 	if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2736 		return;
2737 
2738 	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2739 }
2740 
rtw89_pci_l2_rxen_lat(struct rtw89_dev * rtwdev)2741 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2742 {
2743 	int ret;
2744 
2745 	if (rtwdev->chip->chip_id != RTL8852A)
2746 		return 0;
2747 
2748 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2749 				     PCIE_PHY_GEN1);
2750 	if (ret)
2751 		return ret;
2752 
2753 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2754 				     PCIE_PHY_GEN2);
2755 	if (ret)
2756 		return ret;
2757 
2758 	return 0;
2759 }
2760 
rtw89_pci_aphy_pwrcut(struct rtw89_dev * rtwdev)2761 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2762 {
2763 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2764 
2765 	if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2766 		return;
2767 
2768 	rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2769 }
2770 
rtw89_pci_hci_ldo(struct rtw89_dev * rtwdev)2771 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2772 {
2773 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2774 
2775 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2776 		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2777 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2778 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2779 				  B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2780 	} else if (rtwdev->chip->chip_id == RTL8852C) {
2781 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2782 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2783 	}
2784 }
2785 
rtw89_pci_dphy_delay(struct rtw89_dev * rtwdev)2786 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2787 {
2788 	if (!rtw89_is_rtl885xb(rtwdev))
2789 		return 0;
2790 
2791 	return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2792 				       PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2793 }
2794 
rtw89_pci_power_wake_ax(struct rtw89_dev * rtwdev,bool pwr_up)2795 static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
2796 {
2797 	if (pwr_up)
2798 		rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2799 	else
2800 		rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2801 }
2802 
rtw89_pci_autoload_hang(struct rtw89_dev * rtwdev)2803 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2804 {
2805 	if (rtwdev->chip->chip_id != RTL8852C)
2806 		return;
2807 
2808 	rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2809 	rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2810 }
2811 
rtw89_pci_l12_vmain(struct rtw89_dev * rtwdev)2812 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2813 {
2814 	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2815 		return;
2816 
2817 	rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2818 }
2819 
rtw89_pci_gen2_force_ib(struct rtw89_dev * rtwdev)2820 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2821 {
2822 	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2823 		return;
2824 
2825 	rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2826 			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2827 	rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2828 	rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2829 			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2830 }
2831 
rtw89_pci_l1_ent_lat(struct rtw89_dev * rtwdev)2832 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2833 {
2834 	if (rtwdev->chip->chip_id != RTL8852C)
2835 		return;
2836 
2837 	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2838 }
2839 
rtw89_pci_wd_exit_l1(struct rtw89_dev * rtwdev)2840 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2841 {
2842 	if (rtwdev->chip->chip_id != RTL8852C)
2843 		return;
2844 
2845 	rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2846 }
2847 
rtw89_pci_set_sic(struct rtw89_dev * rtwdev)2848 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2849 {
2850 	if (rtwdev->chip->chip_id == RTL8852C)
2851 		return;
2852 
2853 	rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2854 			  B_AX_SIC_EN_FORCE_CLKREQ);
2855 }
2856 
rtw89_pci_set_lbc(struct rtw89_dev * rtwdev)2857 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2858 {
2859 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2860 	u32 lbc;
2861 
2862 	if (rtwdev->chip->chip_id == RTL8852C)
2863 		return;
2864 
2865 	lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2866 	if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2867 		lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2868 		lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2869 		rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2870 	} else {
2871 		lbc &= ~B_AX_LBC_EN;
2872 	}
2873 	rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2874 }
2875 
rtw89_pci_set_io_rcy(struct rtw89_dev * rtwdev)2876 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2877 {
2878 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2879 	u32 val32;
2880 
2881 	if (rtwdev->chip->chip_id != RTL8852C)
2882 		return;
2883 
2884 	if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2885 		val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2886 				   info->io_rcy_tmr);
2887 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2888 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2889 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2890 
2891 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2892 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2893 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2894 	} else {
2895 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2896 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2897 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2898 	}
2899 
2900 	rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2901 }
2902 
rtw89_pci_set_dbg(struct rtw89_dev * rtwdev)2903 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2904 {
2905 	if (rtwdev->chip->chip_id == RTL8852C)
2906 		return;
2907 
2908 	rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2909 			  B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2910 
2911 	rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL,
2912 			   B_AX_EN_STUCK_DBG | B_AX_ASFF_FULL_NO_STK,
2913 			   B_AX_EN_STUCK_DBG);
2914 
2915 	if (rtwdev->chip->chip_id == RTL8852A)
2916 		rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2917 				  B_AX_EN_CHKDSC_NO_RX_STUCK);
2918 }
2919 
rtw89_pci_set_keep_reg(struct rtw89_dev * rtwdev)2920 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2921 {
2922 	if (rtwdev->chip->chip_id == RTL8852C)
2923 		return;
2924 
2925 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2926 			  B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2927 }
2928 
rtw89_pci_clr_idx_all_ax(struct rtw89_dev * rtwdev)2929 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
2930 {
2931 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2932 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2933 	u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2934 		  B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2935 		  B_AX_CLR_CH12_IDX;
2936 	u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2937 	u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2938 
2939 	if (chip_id == RTL8852A || chip_id == RTL8852C)
2940 		val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2941 		       B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2942 	/* clear DMA indexes */
2943 	rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2944 	if (chip_id == RTL8852A || chip_id == RTL8852C)
2945 		rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2946 				  B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2947 	rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2948 			  B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2949 }
2950 
rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev * rtwdev)2951 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2952 {
2953 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2954 	u32 dma_busy1 = info->dma_busy1.addr;
2955 	u32 dma_busy2 = info->dma_busy2_reg;
2956 	u32 check, dma_busy;
2957 	int ret;
2958 
2959 	check = info->dma_busy1.mask;
2960 
2961 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2962 				10, 100, false, rtwdev, dma_busy1);
2963 	if (ret)
2964 		return ret;
2965 
2966 	if (!dma_busy2)
2967 		return 0;
2968 
2969 	check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2970 
2971 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2972 				10, 100, false, rtwdev, dma_busy2);
2973 	if (ret)
2974 		return ret;
2975 
2976 	return 0;
2977 }
2978 
rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev * rtwdev)2979 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2980 {
2981 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2982 	u32 dma_busy3 = info->dma_busy3_reg;
2983 	u32 check, dma_busy;
2984 	int ret;
2985 
2986 	check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2987 
2988 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2989 				10, 100, false, rtwdev, dma_busy3);
2990 	if (ret)
2991 		return ret;
2992 
2993 	return 0;
2994 }
2995 
rtw89_pci_poll_dma_all_idle(struct rtw89_dev * rtwdev)2996 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2997 {
2998 	int ret;
2999 
3000 	ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
3001 	if (ret) {
3002 		rtw89_err(rtwdev, "txdma ch busy\n");
3003 		return ret;
3004 	}
3005 
3006 	ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
3007 	if (ret) {
3008 		rtw89_err(rtwdev, "rxdma ch busy\n");
3009 		return ret;
3010 	}
3011 
3012 	return 0;
3013 }
3014 
rtw89_pci_mode_op(struct rtw89_dev * rtwdev)3015 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
3016 {
3017 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3018 	enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
3019 	enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
3020 	enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
3021 	enum mac_ax_tag_mode tag_mode = info->tag_mode;
3022 	enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
3023 	enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
3024 	enum mac_ax_tx_burst tx_burst = info->tx_burst;
3025 	enum mac_ax_rx_burst rx_burst = info->rx_burst;
3026 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3027 	u8 cv = rtwdev->hal.cv;
3028 	u32 val32;
3029 
3030 	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
3031 		if (chip_id == RTL8852A && cv == CHIP_CBV)
3032 			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
3033 	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
3034 		if (chip_id == RTL8852A || chip_id == RTL8852B)
3035 			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
3036 	}
3037 
3038 	if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
3039 		if (chip_id == RTL8852A && cv == CHIP_CBV)
3040 			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
3041 	} else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
3042 		if (chip_id == RTL8852A || chip_id == RTL8852B)
3043 			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
3044 	}
3045 
3046 	if (rxbd_mode == MAC_AX_RXBD_PKT) {
3047 		rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
3048 	} else if (rxbd_mode == MAC_AX_RXBD_SEP) {
3049 		rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
3050 
3051 		if (chip_id == RTL8852A || chip_id == RTL8852B)
3052 			rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
3053 					   B_AX_PCIE_RX_APPLEN_MASK, 0);
3054 	}
3055 
3056 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3057 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
3058 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
3059 	} else if (chip_id == RTL8852C) {
3060 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
3061 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
3062 	}
3063 
3064 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3065 		if (tag_mode == MAC_AX_TAG_SGL) {
3066 			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
3067 					    ~B_AX_LATENCY_CONTROL;
3068 			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
3069 		} else if (tag_mode == MAC_AX_TAG_MULTI) {
3070 			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
3071 					    B_AX_LATENCY_CONTROL;
3072 			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
3073 		}
3074 	}
3075 
3076 	rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
3077 			   info->multi_tag_num);
3078 
3079 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3080 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
3081 				   wd_dma_idle_intvl);
3082 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
3083 				   wd_dma_act_intvl);
3084 	} else if (chip_id == RTL8852C) {
3085 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
3086 				   wd_dma_idle_intvl);
3087 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
3088 				   wd_dma_act_intvl);
3089 	}
3090 
3091 	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
3092 		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3093 				  B_AX_HOST_ADDR_INFO_8B_SEL);
3094 		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3095 	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
3096 		rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3097 				  B_AX_HOST_ADDR_INFO_8B_SEL);
3098 		rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3099 	}
3100 
3101 	return 0;
3102 }
3103 
rtw89_pci_ops_deinit(struct rtw89_dev * rtwdev)3104 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
3105 {
3106 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3107 
3108 	rtw89_pci_power_wake(rtwdev, false);
3109 
3110 	if (rtwdev->chip->chip_id == RTL8852A) {
3111 		/* ltr sw trigger */
3112 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
3113 	}
3114 	info->ltr_set(rtwdev, false);
3115 	rtw89_pci_ctrl_dma_all(rtwdev, false);
3116 	rtw89_pci_clr_idx_all(rtwdev);
3117 
3118 	return 0;
3119 }
3120 
rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev * rtwdev)3121 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
3122 {
3123 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3124 	int ret;
3125 
3126 	rtw89_pci_ber(rtwdev);
3127 	rtw89_pci_rxdma_prefth(rtwdev);
3128 	rtw89_pci_l1off_pwroff(rtwdev);
3129 	rtw89_pci_deglitch_setting(rtwdev);
3130 	ret = rtw89_pci_l2_rxen_lat(rtwdev);
3131 	if (ret) {
3132 		rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
3133 		return ret;
3134 	}
3135 
3136 	rtw89_pci_aphy_pwrcut(rtwdev);
3137 	rtw89_pci_hci_ldo(rtwdev);
3138 	rtw89_pci_dphy_delay(rtwdev);
3139 
3140 	ret = rtw89_pci_autok_x(rtwdev);
3141 	if (ret) {
3142 		rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
3143 		return ret;
3144 	}
3145 
3146 	ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
3147 	if (ret) {
3148 		rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
3149 		return ret;
3150 	}
3151 
3152 	rtw89_pci_power_wake_ax(rtwdev, true);
3153 	rtw89_pci_autoload_hang(rtwdev);
3154 	rtw89_pci_l12_vmain(rtwdev);
3155 	rtw89_pci_gen2_force_ib(rtwdev);
3156 	rtw89_pci_l1_ent_lat(rtwdev);
3157 	rtw89_pci_wd_exit_l1(rtwdev);
3158 	rtw89_pci_set_sic(rtwdev);
3159 	rtw89_pci_set_lbc(rtwdev);
3160 	rtw89_pci_set_io_rcy(rtwdev);
3161 	rtw89_pci_set_dbg(rtwdev);
3162 	rtw89_pci_set_keep_reg(rtwdev);
3163 
3164 	rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
3165 
3166 	/* stop DMA activities */
3167 	rtw89_pci_ctrl_dma_all(rtwdev, false);
3168 
3169 	ret = rtw89_pci_poll_dma_all_idle(rtwdev);
3170 	if (ret) {
3171 		rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
3172 		return ret;
3173 	}
3174 
3175 	rtw89_pci_clr_idx_all(rtwdev);
3176 	rtw89_pci_mode_op(rtwdev);
3177 
3178 	/* fill TRX BD indexes */
3179 	rtw89_pci_ops_reset(rtwdev);
3180 
3181 	ret = rtw89_pci_rst_bdram_ax(rtwdev);
3182 	if (ret) {
3183 		rtw89_warn(rtwdev, "reset bdram busy\n");
3184 		return ret;
3185 	}
3186 
3187 	/* disable all channels except to FW CMD channel to download firmware */
3188 	rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
3189 	rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
3190 
3191 	/* start DMA activities */
3192 	rtw89_pci_ctrl_dma_all(rtwdev, true);
3193 
3194 	return 0;
3195 }
3196 
rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev * rtwdev)3197 static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
3198 {
3199 	rtw89_pci_power_wake_ax(rtwdev, false);
3200 
3201 	return 0;
3202 }
3203 
rtw89_pci_ltr_set(struct rtw89_dev * rtwdev,bool en)3204 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
3205 {
3206 	u32 val;
3207 
3208 	if (!en)
3209 		return 0;
3210 
3211 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
3212 	if (rtw89_pci_ltr_is_err_reg_val(val))
3213 		return -EINVAL;
3214 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
3215 	if (rtw89_pci_ltr_is_err_reg_val(val))
3216 		return -EINVAL;
3217 	val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
3218 	if (rtw89_pci_ltr_is_err_reg_val(val))
3219 		return -EINVAL;
3220 	val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
3221 	if (rtw89_pci_ltr_is_err_reg_val(val))
3222 		return -EINVAL;
3223 
3224 	rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
3225 						   B_AX_LTR_WD_NOEMP_CHK);
3226 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
3227 			   PCI_LTR_SPC_500US);
3228 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
3229 			   PCI_LTR_IDLE_TIMER_3_2MS);
3230 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
3231 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
3232 	rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
3233 	rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
3234 
3235 	return 0;
3236 }
3237 EXPORT_SYMBOL(rtw89_pci_ltr_set);
3238 
rtw89_pci_ltr_set_v1(struct rtw89_dev * rtwdev,bool en)3239 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
3240 {
3241 	u32 dec_ctrl;
3242 	u32 val32;
3243 
3244 	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
3245 	if (rtw89_pci_ltr_is_err_reg_val(val32))
3246 		return -EINVAL;
3247 	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
3248 	if (rtw89_pci_ltr_is_err_reg_val(val32))
3249 		return -EINVAL;
3250 	dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
3251 	if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
3252 		return -EINVAL;
3253 	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
3254 	if (rtw89_pci_ltr_is_err_reg_val(val32))
3255 		return -EINVAL;
3256 	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
3257 	if (rtw89_pci_ltr_is_err_reg_val(val32))
3258 		return -EINVAL;
3259 
3260 	if (!en) {
3261 		dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
3262 		dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
3263 			    B_AX_LTR_REQ_DRV;
3264 	} else {
3265 		dec_ctrl |= B_AX_LTR_HW_DEC_EN;
3266 	}
3267 
3268 	dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
3269 	dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
3270 
3271 	if (en)
3272 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
3273 				  B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
3274 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
3275 			   PCI_LTR_IDLE_TIMER_3_2MS);
3276 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
3277 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
3278 	rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
3279 	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
3280 	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
3281 
3282 	return 0;
3283 }
3284 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
3285 
rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev * rtwdev)3286 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
3287 {
3288 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3289 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3290 	int ret;
3291 
3292 	ret = info->ltr_set(rtwdev, true);
3293 	if (ret) {
3294 		rtw89_err(rtwdev, "pci ltr set fail\n");
3295 		return ret;
3296 	}
3297 	if (chip_id == RTL8852A) {
3298 		/* ltr sw trigger */
3299 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
3300 	}
3301 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3302 		/* ADDR info 8-byte mode */
3303 		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3304 				  B_AX_HOST_ADDR_INFO_8B_SEL);
3305 		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3306 	}
3307 
3308 	/* enable DMA for all queues */
3309 	rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
3310 
3311 	/* Release PCI IO */
3312 	rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
3313 			  B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
3314 
3315 	return 0;
3316 }
3317 
rtw89_pci_claim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3318 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
3319 				  struct pci_dev *pdev)
3320 {
3321 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3322 	int ret;
3323 
3324 	ret = pci_enable_device(pdev);
3325 	if (ret) {
3326 		rtw89_err(rtwdev, "failed to enable pci device\n");
3327 		return ret;
3328 	}
3329 
3330 	pci_set_master(pdev);
3331 	pci_set_drvdata(pdev, rtwdev->hw);
3332 
3333 	rtwpci->pdev = pdev;
3334 
3335 	return 0;
3336 }
3337 
rtw89_pci_declaim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3338 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
3339 				     struct pci_dev *pdev)
3340 {
3341 	pci_disable_device(pdev);
3342 }
3343 
rtw89_pci_chip_is_manual_dac(struct rtw89_dev * rtwdev)3344 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev)
3345 {
3346 	const struct rtw89_chip_info *chip = rtwdev->chip;
3347 
3348 	switch (chip->chip_id) {
3349 	case RTL8852A:
3350 	case RTL8852B:
3351 	case RTL8851B:
3352 	case RTL8852BT:
3353 		return true;
3354 	default:
3355 		return false;
3356 	}
3357 }
3358 
rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev * rtwdev)3359 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
3360 {
3361 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3362 	struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev);
3363 
3364 	if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3365 		return true;
3366 
3367 	if (!bridge)
3368 		return false;
3369 
3370 	switch (bridge->vendor) {
3371 	case PCI_VENDOR_ID_INTEL:
3372 		return true;
3373 	case PCI_VENDOR_ID_ASMEDIA:
3374 		if (bridge->device == 0x2806)
3375 			return true;
3376 		break;
3377 	}
3378 
3379 	return false;
3380 }
3381 
rtw89_pci_cfg_dac(struct rtw89_dev * rtwdev,bool force)3382 static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force)
3383 {
3384 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3385 	struct pci_dev *pdev = rtwpci->pdev;
3386 	int ret;
3387 	u8 val;
3388 
3389 	if (!rtwpci->enable_dac && !force)
3390 		return 0;
3391 
3392 	if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3393 		return 0;
3394 
3395 	/* Configure DAC only via PCI config API, not DBI interfaces */
3396 	ret = pci_read_config_byte(pdev, RTW89_PCIE_L1_CTRL, &val);
3397 	if (ret)
3398 		return ret;
3399 
3400 	val |= RTW89_PCIE_BIT_EN_64BITS;
3401 	return pci_write_config_byte(pdev, RTW89_PCIE_L1_CTRL, val);
3402 }
3403 
rtw89_pci_setup_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3404 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
3405 				   struct pci_dev *pdev)
3406 {
3407 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3408 	unsigned long resource_len;
3409 	u8 bar_id = 2;
3410 	int ret;
3411 
3412 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
3413 	if (ret) {
3414 		rtw89_err(rtwdev, "failed to request pci regions\n");
3415 		goto err;
3416 	}
3417 
3418 	if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
3419 		goto try_dac_done;
3420 
3421 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
3422 	if (!ret) {
3423 		ret = rtw89_pci_cfg_dac(rtwdev, true);
3424 		if (!ret) {
3425 			rtwpci->enable_dac = true;
3426 			goto try_dac_done;
3427 		}
3428 
3429 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3430 		if (ret) {
3431 			rtw89_err(rtwdev,
3432 				  "failed to set dma and consistent mask to 32/36-bit\n");
3433 			goto err_release_regions;
3434 		}
3435 	}
3436 try_dac_done:
3437 
3438 #if defined(__FreeBSD__)
3439 	linuxkpi_pcim_want_to_use_bus_functions(pdev);
3440 #endif
3441 	resource_len = pci_resource_len(pdev, bar_id);
3442 	rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
3443 	if (!rtwpci->mmap) {
3444 		rtw89_err(rtwdev, "failed to map pci io\n");
3445 		ret = -EIO;
3446 		goto err_release_regions;
3447 	}
3448 
3449 	return 0;
3450 
3451 err_release_regions:
3452 	pci_release_regions(pdev);
3453 err:
3454 	return ret;
3455 }
3456 
rtw89_pci_clear_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3457 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
3458 				    struct pci_dev *pdev)
3459 {
3460 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3461 
3462 	if (rtwpci->mmap) {
3463 		pci_iounmap(pdev, rtwpci->mmap);
3464 		pci_release_regions(pdev);
3465 	}
3466 }
3467 
rtw89_pci_free_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3468 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
3469 				      struct pci_dev *pdev,
3470 				      struct rtw89_pci_tx_ring *tx_ring)
3471 {
3472 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3473 	u8 *head = wd_ring->head;
3474 	dma_addr_t dma = wd_ring->dma;
3475 	u32 page_size = wd_ring->page_size;
3476 	u32 page_num = wd_ring->page_num;
3477 	u32 ring_sz = page_size * page_num;
3478 
3479 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3480 	wd_ring->head = NULL;
3481 }
3482 
rtw89_pci_free_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3483 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
3484 				   struct pci_dev *pdev,
3485 				   struct rtw89_pci_tx_ring *tx_ring)
3486 {
3487 	tx_ring->bd_ring.head = NULL;
3488 }
3489 
rtw89_pci_free_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3490 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
3491 				    struct pci_dev *pdev)
3492 {
3493 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3494 	struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
3495 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3496 	struct rtw89_pci_tx_ring *tx_ring;
3497 	int i;
3498 
3499 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
3500 		if (info->tx_dma_ch_mask & BIT(i))
3501 			continue;
3502 		tx_ring = &rtwpci->tx.rings[i];
3503 		rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3504 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3505 	}
3506 
3507 	dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
3508 }
3509 
rtw89_pci_free_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring)3510 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
3511 				   struct pci_dev *pdev,
3512 				   struct rtw89_pci_rx_ring *rx_ring)
3513 {
3514 	struct rtw89_pci_rx_info *rx_info;
3515 	struct sk_buff *skb;
3516 	dma_addr_t dma;
3517 	u32 buf_sz;
3518 	int i;
3519 
3520 	buf_sz = rx_ring->buf_sz;
3521 	for (i = 0; i < rx_ring->bd_ring.len; i++) {
3522 		skb = rx_ring->buf[i];
3523 		if (!skb)
3524 			continue;
3525 
3526 		rx_info = RTW89_PCI_RX_SKB_CB(skb);
3527 		dma = rx_info->dma;
3528 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3529 		dev_kfree_skb(skb);
3530 		rx_ring->buf[i] = NULL;
3531 	}
3532 
3533 	rx_ring->bd_ring.head = NULL;
3534 }
3535 
rtw89_pci_free_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3536 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
3537 				    struct pci_dev *pdev)
3538 {
3539 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3540 	struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
3541 	struct rtw89_pci_rx_ring *rx_ring;
3542 	int i;
3543 
3544 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
3545 		rx_ring = &rtwpci->rx.rings[i];
3546 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3547 	}
3548 
3549 	dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
3550 }
3551 
rtw89_pci_free_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3552 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
3553 				     struct pci_dev *pdev)
3554 {
3555 	rtw89_pci_free_rx_rings(rtwdev, pdev);
3556 	rtw89_pci_free_tx_rings(rtwdev, pdev);
3557 }
3558 
rtw89_pci_init_rx_bd(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb,int buf_sz,u32 idx)3559 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
3560 				struct rtw89_pci_rx_ring *rx_ring,
3561 				struct sk_buff *skb, int buf_sz, u32 idx)
3562 {
3563 	struct rtw89_pci_rx_info *rx_info;
3564 	struct rtw89_pci_rx_bd_32 *rx_bd;
3565 	dma_addr_t dma;
3566 
3567 	if (!skb)
3568 		return -EINVAL;
3569 
3570 	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
3571 	if (dma_mapping_error(&pdev->dev, dma))
3572 		return -EBUSY;
3573 
3574 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
3575 	rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
3576 
3577 	memset(rx_bd, 0, sizeof(*rx_bd));
3578 	rx_bd->buf_size = cpu_to_le16(buf_sz);
3579 	rx_bd->dma = cpu_to_le32(dma);
3580 	rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI);
3581 	rx_info->dma = dma;
3582 
3583 	return 0;
3584 }
3585 
rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,enum rtw89_tx_channel txch)3586 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
3587 				      struct pci_dev *pdev,
3588 				      struct rtw89_pci_tx_ring *tx_ring,
3589 				      enum rtw89_tx_channel txch)
3590 {
3591 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3592 	struct rtw89_pci_tx_wd *txwd;
3593 	dma_addr_t dma;
3594 	dma_addr_t cur_paddr;
3595 	u8 *head;
3596 	u8 *cur_vaddr;
3597 	u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
3598 	u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
3599 	u32 ring_sz = page_size * page_num;
3600 	u32 page_offset;
3601 	int i;
3602 
3603 	/* FWCMD queue doesn't use txwd as pages */
3604 	if (txch == RTW89_TXCH_CH12)
3605 		return 0;
3606 
3607 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3608 	if (!head)
3609 		return -ENOMEM;
3610 
3611 	INIT_LIST_HEAD(&wd_ring->free_pages);
3612 	wd_ring->head = head;
3613 	wd_ring->dma = dma;
3614 	wd_ring->page_size = page_size;
3615 	wd_ring->page_num = page_num;
3616 
3617 	page_offset = 0;
3618 	for (i = 0; i < page_num; i++) {
3619 		txwd = &wd_ring->pages[i];
3620 		cur_paddr = dma + page_offset;
3621 		cur_vaddr = head + page_offset;
3622 
3623 		skb_queue_head_init(&txwd->queue);
3624 		INIT_LIST_HEAD(&txwd->list);
3625 		txwd->paddr = cur_paddr;
3626 		txwd->vaddr = cur_vaddr;
3627 		txwd->len = page_size;
3628 		txwd->seq = i;
3629 		rtw89_pci_enqueue_txwd(tx_ring, txwd);
3630 
3631 		page_offset += page_size;
3632 	}
3633 
3634 	return 0;
3635 }
3636 
rtw89_pci_alloc_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,u32 desc_size,u32 len,enum rtw89_tx_channel txch,void * head,dma_addr_t dma)3637 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
3638 				   struct pci_dev *pdev,
3639 				   struct rtw89_pci_tx_ring *tx_ring,
3640 				   u32 desc_size, u32 len,
3641 				   enum rtw89_tx_channel txch,
3642 				   void *head, dma_addr_t dma)
3643 {
3644 	const struct rtw89_pci_ch_dma_addr *txch_addr;
3645 	int ret;
3646 
3647 	ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
3648 	if (ret) {
3649 		rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
3650 		goto err;
3651 	}
3652 
3653 	ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
3654 	if (ret) {
3655 		rtw89_err(rtwdev, "failed to get address of txch %d", txch);
3656 		goto err_free_wd_ring;
3657 	}
3658 
3659 	INIT_LIST_HEAD(&tx_ring->busy_pages);
3660 	tx_ring->bd_ring.head = head;
3661 	tx_ring->bd_ring.dma = dma;
3662 	tx_ring->bd_ring.len = len;
3663 	tx_ring->bd_ring.desc_size = desc_size;
3664 	tx_ring->bd_ring.addr = *txch_addr;
3665 	tx_ring->bd_ring.wp = 0;
3666 	tx_ring->bd_ring.rp = 0;
3667 	tx_ring->txch = txch;
3668 
3669 	return 0;
3670 
3671 err_free_wd_ring:
3672 	rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3673 err:
3674 	return ret;
3675 }
3676 
rtw89_pci_alloc_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3677 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
3678 				    struct pci_dev *pdev)
3679 {
3680 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3681 	struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
3682 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3683 	struct rtw89_pci_tx_ring *tx_ring;
3684 	u32 i, tx_allocated;
3685 	dma_addr_t dma;
3686 	u32 desc_size;
3687 	u32 ring_sz;
3688 	u32 pool_sz;
3689 	u32 ch_num;
3690 #if defined(__linux__)
3691 	void *head;
3692 #elif defined(__FreeBSD__)
3693 	u8 *head;
3694 #endif
3695 	u32 len;
3696 	int ret;
3697 
3698 	BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16);
3699 
3700 	desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3701 	len = RTW89_PCI_TXBD_NUM_MAX;
3702 	ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask);
3703 	ring_sz = desc_size * len;
3704 	pool_sz = ring_sz * ch_num;
3705 
3706 	head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
3707 	if (!head)
3708 		return -ENOMEM;
3709 
3710 	bd_pool->head = head;
3711 	bd_pool->dma = dma;
3712 	bd_pool->size = pool_sz;
3713 
3714 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
3715 		if (info->tx_dma_ch_mask & BIT(i))
3716 			continue;
3717 		tx_ring = &rtwpci->tx.rings[i];
3718 		ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3719 					      desc_size, len, i, head, dma);
3720 		if (ret) {
3721 #if defined(__linux__)
3722 			rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3723 #elif defined(__FreeBSD__)
3724 			rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret);
3725 #endif
3726 			goto err_free;
3727 		}
3728 
3729 		head += ring_sz;
3730 		dma += ring_sz;
3731 	}
3732 
3733 	return 0;
3734 
3735 err_free:
3736 	tx_allocated = i;
3737 	for (i = 0; i < tx_allocated; i++) {
3738 		tx_ring = &rtwpci->tx.rings[i];
3739 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3740 	}
3741 
3742 	dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
3743 
3744 	return ret;
3745 }
3746 
rtw89_pci_alloc_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,u32 desc_size,u32 len,u32 rxch,void * head,dma_addr_t dma)3747 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3748 				   struct pci_dev *pdev,
3749 				   struct rtw89_pci_rx_ring *rx_ring,
3750 				   u32 desc_size, u32 len, u32 rxch,
3751 				   void *head, dma_addr_t dma)
3752 {
3753 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3754 	const struct rtw89_pci_ch_dma_addr *rxch_addr;
3755 	struct sk_buff *skb;
3756 	int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3757 	int i, allocated;
3758 	int ret;
3759 
3760 	ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
3761 	if (ret) {
3762 		rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3763 		return ret;
3764 	}
3765 
3766 	rx_ring->bd_ring.head = head;
3767 	rx_ring->bd_ring.dma = dma;
3768 	rx_ring->bd_ring.len = len;
3769 	rx_ring->bd_ring.desc_size = desc_size;
3770 	rx_ring->bd_ring.addr = *rxch_addr;
3771 	if (info->rx_ring_eq_is_full)
3772 		rx_ring->bd_ring.wp = len - 1;
3773 	else
3774 		rx_ring->bd_ring.wp = 0;
3775 	rx_ring->bd_ring.rp = 0;
3776 	rx_ring->buf_sz = buf_sz;
3777 	rx_ring->diliver_skb = NULL;
3778 	rx_ring->diliver_desc.ready = false;
3779 	rx_ring->target_rx_tag = 0;
3780 
3781 	for (i = 0; i < len; i++) {
3782 		skb = dev_alloc_skb(buf_sz);
3783 		if (!skb) {
3784 			ret = -ENOMEM;
3785 			goto err_free;
3786 		}
3787 
3788 		memset(skb->data, 0, buf_sz);
3789 		rx_ring->buf[i] = skb;
3790 		ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3791 					   buf_sz, i);
3792 		if (ret) {
3793 #if defined(__linux__)
3794 			rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3795 #elif defined(__FreeBSD__)
3796 			rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret);
3797 #endif
3798 			dev_kfree_skb_any(skb);
3799 			rx_ring->buf[i] = NULL;
3800 			goto err_free;
3801 		}
3802 	}
3803 
3804 	return 0;
3805 
3806 err_free:
3807 	allocated = i;
3808 	for (i = 0; i < allocated; i++) {
3809 		skb = rx_ring->buf[i];
3810 		if (!skb)
3811 			continue;
3812 		dma = *((dma_addr_t *)skb->cb);
3813 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3814 		dev_kfree_skb(skb);
3815 		rx_ring->buf[i] = NULL;
3816 	}
3817 
3818 	rx_ring->bd_ring.head = NULL;
3819 
3820 	return ret;
3821 }
3822 
rtw89_pci_alloc_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3823 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3824 				    struct pci_dev *pdev)
3825 {
3826 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3827 	struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
3828 	struct rtw89_pci_rx_ring *rx_ring;
3829 	int i, rx_allocated;
3830 	dma_addr_t dma;
3831 	u32 desc_size;
3832 	u32 ring_sz;
3833 	u32 pool_sz;
3834 #if defined(__linux__)
3835 	void *head;
3836 #elif defined(__FreeBSD__)
3837 	u8 *head;
3838 #endif
3839 	u32 len;
3840 	int ret;
3841 
3842 	desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3843 	len = RTW89_PCI_RXBD_NUM_MAX;
3844 	ring_sz = desc_size * len;
3845 	pool_sz = ring_sz * RTW89_RXCH_NUM;
3846 
3847 	head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
3848 	if (!head)
3849 		return -ENOMEM;
3850 
3851 	bd_pool->head = head;
3852 	bd_pool->dma = dma;
3853 	bd_pool->size = pool_sz;
3854 
3855 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
3856 		rx_ring = &rtwpci->rx.rings[i];
3857 
3858 		ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3859 					      desc_size, len, i,
3860 					      head, dma);
3861 		if (ret) {
3862 			rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3863 			goto err_free;
3864 		}
3865 
3866 		head += ring_sz;
3867 		dma += ring_sz;
3868 	}
3869 
3870 	return 0;
3871 
3872 err_free:
3873 	rx_allocated = i;
3874 	for (i = 0; i < rx_allocated; i++) {
3875 		rx_ring = &rtwpci->rx.rings[i];
3876 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3877 	}
3878 
3879 	dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
3880 
3881 	return ret;
3882 }
3883 
rtw89_pci_alloc_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3884 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3885 				     struct pci_dev *pdev)
3886 {
3887 	int ret;
3888 
3889 	ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3890 	if (ret) {
3891 		rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3892 		goto err;
3893 	}
3894 
3895 	ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3896 	if (ret) {
3897 		rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3898 		goto err_free_tx_rings;
3899 	}
3900 
3901 	return 0;
3902 
3903 err_free_tx_rings:
3904 	rtw89_pci_free_tx_rings(rtwdev, pdev);
3905 err:
3906 	return ret;
3907 }
3908 
rtw89_pci_h2c_init(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)3909 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3910 			       struct rtw89_pci *rtwpci)
3911 {
3912 	skb_queue_head_init(&rtwpci->h2c_queue);
3913 	skb_queue_head_init(&rtwpci->h2c_release_queue);
3914 }
3915 
rtw89_pci_setup_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3916 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3917 				    struct pci_dev *pdev)
3918 {
3919 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3920 	int ret;
3921 
3922 	ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3923 	if (ret) {
3924 		rtw89_err(rtwdev, "failed to setup pci mapping\n");
3925 		goto err;
3926 	}
3927 
3928 	ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3929 	if (ret) {
3930 		rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3931 		goto err_pci_unmap;
3932 	}
3933 
3934 	rtw89_pci_h2c_init(rtwdev, rtwpci);
3935 
3936 	spin_lock_init(&rtwpci->irq_lock);
3937 	spin_lock_init(&rtwpci->trx_lock);
3938 
3939 	return 0;
3940 
3941 err_pci_unmap:
3942 	rtw89_pci_clear_mapping(rtwdev, pdev);
3943 err:
3944 	return ret;
3945 }
3946 
rtw89_pci_clear_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3947 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3948 				     struct pci_dev *pdev)
3949 {
3950 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3951 
3952 	rtw89_pci_free_trx_rings(rtwdev, pdev);
3953 	rtw89_pci_clear_mapping(rtwdev, pdev);
3954 	rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3955 				skb_queue_len(&rtwpci->h2c_queue), true);
3956 }
3957 
rtw89_pci_config_intr_mask(struct rtw89_dev * rtwdev)3958 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3959 {
3960 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3961 	const struct rtw89_chip_info *chip = rtwdev->chip;
3962 	u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3963 
3964 	if (chip->chip_id == RTL8851B)
3965 		hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3966 
3967 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3968 
3969 	if (rtwpci->under_recovery) {
3970 		rtwpci->intrs[0] = hs0isr_ind_int_en;
3971 		rtwpci->intrs[1] = 0;
3972 	} else {
3973 		rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3974 				   B_AX_RXDMA_INT_EN |
3975 				   B_AX_RXP1DMA_INT_EN |
3976 				   B_AX_RPQDMA_INT_EN |
3977 				   B_AX_RXDMA_STUCK_INT_EN |
3978 				   B_AX_RDU_INT_EN |
3979 				   B_AX_RPQBD_FULL_INT_EN |
3980 				   hs0isr_ind_int_en;
3981 
3982 		rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3983 	}
3984 }
3985 EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3986 
rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev * rtwdev)3987 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3988 {
3989 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3990 
3991 	rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3992 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3993 	rtwpci->intrs[0] = 0;
3994 	rtwpci->intrs[1] = 0;
3995 }
3996 
rtw89_pci_default_intr_mask_v1(struct rtw89_dev * rtwdev)3997 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3998 {
3999 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4000 
4001 	rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
4002 			    B_AX_HS1ISR_IND_INT_EN |
4003 			    B_AX_HS0ISR_IND_INT_EN;
4004 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
4005 	rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
4006 			   B_AX_RXDMA_INT_EN |
4007 			   B_AX_RXP1DMA_INT_EN |
4008 			   B_AX_RPQDMA_INT_EN |
4009 			   B_AX_RXDMA_STUCK_INT_EN |
4010 			   B_AX_RDU_INT_EN |
4011 			   B_AX_RPQBD_FULL_INT_EN;
4012 	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
4013 }
4014 
rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev * rtwdev)4015 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
4016 {
4017 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4018 
4019 	rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
4020 			    B_AX_HS0ISR_IND_INT_EN;
4021 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
4022 	rtwpci->intrs[0] = 0;
4023 	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
4024 }
4025 
rtw89_pci_config_intr_mask_v1(struct rtw89_dev * rtwdev)4026 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
4027 {
4028 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4029 
4030 	if (rtwpci->under_recovery)
4031 		rtw89_pci_recovery_intr_mask_v1(rtwdev);
4032 	else if (rtwpci->low_power)
4033 		rtw89_pci_low_power_intr_mask_v1(rtwdev);
4034 	else
4035 		rtw89_pci_default_intr_mask_v1(rtwdev);
4036 }
4037 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
4038 
rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev * rtwdev)4039 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
4040 {
4041 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4042 
4043 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
4044 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
4045 	rtwpci->intrs[0] = 0;
4046 	rtwpci->intrs[1] = 0;
4047 }
4048 
rtw89_pci_default_intr_mask_v2(struct rtw89_dev * rtwdev)4049 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
4050 {
4051 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4052 
4053 	rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 |
4054 			    B_BE_HS0_IND_INT_EN0;
4055 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
4056 	rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 |
4057 			   B_BE_RDU_CH0_INT_IMR_V1;
4058 	rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
4059 			   B_BE_PCIE_RX_RPQ0_IMR0_V1;
4060 }
4061 
rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev * rtwdev)4062 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev)
4063 {
4064 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4065 
4066 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 |
4067 			    B_BE_HS1_IND_INT_EN0;
4068 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
4069 	rtwpci->intrs[0] = 0;
4070 	rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
4071 			   B_BE_PCIE_RX_RPQ0_IMR0_V1;
4072 }
4073 
rtw89_pci_config_intr_mask_v2(struct rtw89_dev * rtwdev)4074 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
4075 {
4076 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4077 
4078 	if (rtwpci->under_recovery)
4079 		rtw89_pci_recovery_intr_mask_v2(rtwdev);
4080 	else if (rtwpci->low_power)
4081 		rtw89_pci_low_power_intr_mask_v2(rtwdev);
4082 	else
4083 		rtw89_pci_default_intr_mask_v2(rtwdev);
4084 }
4085 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
4086 
rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev * rtwdev)4087 static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev)
4088 {
4089 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4090 
4091 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
4092 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
4093 	rtwpci->intrs[0] = 0;
4094 	rtwpci->intrs[1] = 0;
4095 }
4096 
rtw89_pci_default_intr_mask_v3(struct rtw89_dev * rtwdev)4097 static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev)
4098 {
4099 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4100 
4101 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
4102 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
4103 	rtwpci->intrs[0] = 0;
4104 	rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR |
4105 			   B_BE_PCIE_RDU_CH0_IMR |
4106 			   B_BE_PCIE_RX_RX0P2_IMR0_V1 |
4107 			   B_BE_PCIE_RX_RPQ0_IMR0_V1;
4108 }
4109 
rtw89_pci_config_intr_mask_v3(struct rtw89_dev * rtwdev)4110 void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev)
4111 {
4112 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4113 
4114 	if (rtwpci->under_recovery)
4115 		rtw89_pci_recovery_intr_mask_v3(rtwdev);
4116 	else
4117 		rtw89_pci_default_intr_mask_v3(rtwdev);
4118 }
4119 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v3);
4120 
rtw89_pci_request_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)4121 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
4122 				 struct pci_dev *pdev)
4123 {
4124 	unsigned long flags = 0;
4125 	int ret;
4126 
4127 	flags |= PCI_IRQ_INTX | PCI_IRQ_MSI;
4128 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
4129 	if (ret < 0) {
4130 		rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
4131 		goto err;
4132 	}
4133 
4134 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
4135 					rtw89_pci_interrupt_handler,
4136 					rtw89_pci_interrupt_threadfn,
4137 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
4138 	if (ret) {
4139 		rtw89_err(rtwdev, "failed to request threaded irq\n");
4140 		goto err_free_vector;
4141 	}
4142 
4143 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
4144 
4145 	return 0;
4146 
4147 err_free_vector:
4148 	pci_free_irq_vectors(pdev);
4149 err:
4150 	return ret;
4151 }
4152 
rtw89_pci_free_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)4153 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
4154 			       struct pci_dev *pdev)
4155 {
4156 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
4157 	pci_free_irq_vectors(pdev);
4158 }
4159 
gray_code_to_bin(u16 gray_code)4160 static u16 gray_code_to_bin(u16 gray_code)
4161 {
4162 	u16 binary = gray_code;
4163 
4164 	while (gray_code) {
4165 		gray_code >>= 1;
4166 		binary ^= gray_code;
4167 	}
4168 
4169 	return binary;
4170 }
4171 
rtw89_pci_filter_out(struct rtw89_dev * rtwdev)4172 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
4173 {
4174 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4175 	struct pci_dev *pdev = rtwpci->pdev;
4176 	u16 val16, filter_out_val;
4177 	u32 val, phy_offset;
4178 	int ret;
4179 
4180 	if (rtwdev->chip->chip_id != RTL8852C)
4181 		return 0;
4182 
4183 	val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
4184 	if (val == B_AX_ASPM_CTRL_L1)
4185 		return 0;
4186 
4187 	ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
4188 	if (ret)
4189 		return ret;
4190 
4191 	val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
4192 	if (val == RTW89_PCIE_GEN1_SPEED) {
4193 		phy_offset = R_RAC_DIRECT_OFFSET_G1;
4194 	} else if (val == RTW89_PCIE_GEN2_SPEED) {
4195 		phy_offset = R_RAC_DIRECT_OFFSET_G2;
4196 		val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
4197 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
4198 				  val16 | B_PCIE_BIT_PINOUT_DIS);
4199 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
4200 				  val16 & ~B_PCIE_BIT_RD_SEL);
4201 
4202 		val16 = rtw89_read16_mask(rtwdev,
4203 					  phy_offset + RAC_ANA1F * RAC_MULT,
4204 					  FILTER_OUT_EQ_MASK);
4205 		val16 = gray_code_to_bin(val16);
4206 		filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
4207 					      RAC_MULT);
4208 		filter_out_val &= ~REG_FILTER_OUT_MASK;
4209 		filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
4210 
4211 		rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
4212 			      filter_out_val);
4213 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
4214 				  B_BAC_EQ_SEL);
4215 		rtw89_write16_set(rtwdev,
4216 				  R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
4217 				  B_PCIE_BIT_PSAVE);
4218 	} else {
4219 		return -EOPNOTSUPP;
4220 	}
4221 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
4222 			  B_PCIE_BIT_PSAVE);
4223 
4224 	return 0;
4225 }
4226 
rtw89_pci_clkreq_set(struct rtw89_dev * rtwdev,bool enable)4227 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
4228 {
4229 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4230 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4231 
4232 	if (rtw89_pci_disable_clkreq)
4233 		return;
4234 
4235 	gen_def->clkreq_set(rtwdev, enable);
4236 }
4237 
rtw89_pci_clkreq_set_ax(struct rtw89_dev * rtwdev,bool enable)4238 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
4239 {
4240 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4241 	int ret;
4242 
4243 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
4244 					  PCIE_CLKDLY_HW_30US);
4245 	if (ret)
4246 		rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
4247 
4248 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4249 		if (enable)
4250 			ret = rtw89_pci_config_byte_set(rtwdev,
4251 							RTW89_PCIE_L1_CTRL,
4252 							RTW89_PCIE_BIT_CLK);
4253 		else
4254 			ret = rtw89_pci_config_byte_clr(rtwdev,
4255 							RTW89_PCIE_L1_CTRL,
4256 							RTW89_PCIE_BIT_CLK);
4257 		if (ret)
4258 			rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
4259 				  enable ? "set" : "unset", ret);
4260 	} else if (chip_id == RTL8852C) {
4261 		rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
4262 				  B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
4263 		if (enable)
4264 			rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
4265 					  B_AX_CLK_REQ_N);
4266 		else
4267 			rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
4268 					  B_AX_CLK_REQ_N);
4269 	}
4270 }
4271 
rtw89_pci_aspm_set(struct rtw89_dev * rtwdev,bool enable)4272 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
4273 {
4274 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4275 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4276 
4277 	if (rtw89_pci_disable_aspm_l1)
4278 		return;
4279 
4280 	gen_def->aspm_set(rtwdev, enable);
4281 }
4282 
rtw89_pci_aspm_set_ax(struct rtw89_dev * rtwdev,bool enable)4283 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
4284 {
4285 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4286 	u8 value = 0;
4287 	int ret;
4288 
4289 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
4290 	if (ret)
4291 		rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
4292 
4293 	u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
4294 	u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
4295 
4296 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
4297 	if (ret)
4298 		rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
4299 
4300 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4301 		if (enable)
4302 			ret = rtw89_pci_config_byte_set(rtwdev,
4303 							RTW89_PCIE_L1_CTRL,
4304 							RTW89_PCIE_BIT_L1);
4305 		else
4306 			ret = rtw89_pci_config_byte_clr(rtwdev,
4307 							RTW89_PCIE_L1_CTRL,
4308 							RTW89_PCIE_BIT_L1);
4309 	} else if (chip_id == RTL8852C) {
4310 		if (enable)
4311 			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4312 					  B_AX_ASPM_CTRL_L1);
4313 		else
4314 			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4315 					  B_AX_ASPM_CTRL_L1);
4316 	}
4317 	if (ret)
4318 		rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
4319 			  enable ? "set" : "unset", ret);
4320 }
4321 
rtw89_pci_recalc_int_mit(struct rtw89_dev * rtwdev)4322 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
4323 {
4324 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
4325 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4326 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
4327 	enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
4328 	enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
4329 	u32 val = 0;
4330 
4331 	if (rtwdev->scanning ||
4332 	    (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH))
4333 		goto out;
4334 
4335 	if (chip_gen == RTW89_CHIP_BE)
4336 		val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN;
4337 	else
4338 		val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
4339 		      FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
4340 		      FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
4341 		      FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
4342 
4343 out:
4344 	rtw89_write32(rtwdev, info->mit_addr, val);
4345 }
4346 
rtw89_pci_link_cfg(struct rtw89_dev * rtwdev)4347 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
4348 {
4349 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4350 	struct pci_dev *pdev = rtwpci->pdev;
4351 	u16 link_ctrl;
4352 	int ret;
4353 
4354 	/* Though there is standard PCIE configuration space to set the
4355 	 * link control register, but by Realtek's design, driver should
4356 	 * check if host supports CLKREQ/ASPM to enable the HW module.
4357 	 *
4358 	 * These functions are implemented by two HW modules associated,
4359 	 * one is responsible to access PCIE configuration space to
4360 	 * follow the host settings, and another is in charge of doing
4361 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
4362 	 * the host does not support it, and due to some reasons or wrong
4363 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
4364 	 * loss if HW misbehaves on the link.
4365 	 *
4366 	 * Hence it's designed that driver should first check the PCIE
4367 	 * configuration space is sync'ed and enabled, then driver can turn
4368 	 * on the other module that is actually working on the mechanism.
4369 	 */
4370 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
4371 	if (ret) {
4372 		rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
4373 		return;
4374 	}
4375 
4376 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
4377 		rtw89_pci_clkreq_set(rtwdev, true);
4378 
4379 	if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
4380 		rtw89_pci_aspm_set(rtwdev, true);
4381 }
4382 
rtw89_pci_l1ss_set(struct rtw89_dev * rtwdev,bool enable)4383 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
4384 {
4385 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4386 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4387 
4388 	if (rtw89_pci_disable_l1ss)
4389 		return;
4390 
4391 	gen_def->l1ss_set(rtwdev, enable);
4392 }
4393 
rtw89_pci_l1ss_set_ax(struct rtw89_dev * rtwdev,bool enable)4394 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
4395 {
4396 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4397 	int ret;
4398 
4399 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4400 		if (enable)
4401 			ret = rtw89_pci_config_byte_set(rtwdev,
4402 							RTW89_PCIE_TIMER_CTRL,
4403 							RTW89_PCIE_BIT_L1SUB);
4404 		else
4405 			ret = rtw89_pci_config_byte_clr(rtwdev,
4406 							RTW89_PCIE_TIMER_CTRL,
4407 							RTW89_PCIE_BIT_L1SUB);
4408 		if (ret)
4409 			rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
4410 				  enable ? "set" : "unset", ret);
4411 	} else if (chip_id == RTL8852C) {
4412 		ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
4413 						RTW89_PCIE_BIT_ASPM_L11 |
4414 						RTW89_PCIE_BIT_PCI_L11);
4415 		if (ret)
4416 			rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
4417 		if (enable)
4418 			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4419 					  B_AX_L1SUB_DISABLE);
4420 		else
4421 			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4422 					  B_AX_L1SUB_DISABLE);
4423 	}
4424 }
4425 
rtw89_pci_l1ss_cfg(struct rtw89_dev * rtwdev)4426 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
4427 {
4428 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4429 	struct pci_dev *pdev = rtwpci->pdev;
4430 	u32 l1ss_cap_ptr, l1ss_ctrl;
4431 
4432 	if (rtw89_pci_disable_l1ss)
4433 		return;
4434 
4435 	l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
4436 	if (!l1ss_cap_ptr)
4437 		return;
4438 
4439 	pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
4440 
4441 	if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
4442 		rtw89_pci_l1ss_set(rtwdev, true);
4443 }
4444 
rtw89_pci_cpl_timeout_cfg(struct rtw89_dev * rtwdev)4445 static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev)
4446 {
4447 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4448 	struct pci_dev *pdev = rtwpci->pdev;
4449 
4450 	pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
4451 				 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
4452 }
4453 
rtw89_pci_poll_io_idle_ax(struct rtw89_dev * rtwdev)4454 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
4455 {
4456 	int ret = 0;
4457 	u32 sts;
4458 	u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
4459 
4460 	ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
4461 				       10, 1000, false, rtwdev,
4462 				       R_AX_PCIE_DMA_BUSY1);
4463 	if (ret) {
4464 		rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
4465 			  rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
4466 		return -EINVAL;
4467 	}
4468 	return ret;
4469 }
4470 
rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev * rtwdev)4471 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
4472 {
4473 	u32 val;
4474 	int ret;
4475 
4476 	if (rtwdev->chip->chip_id == RTL8852C)
4477 		return 0;
4478 
4479 	rtw89_pci_ctrl_dma_all(rtwdev, false);
4480 	ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4481 	if (ret) {
4482 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4483 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
4484 			    "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
4485 			    R_AX_DBG_ERR_FLAG, val);
4486 		if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
4487 			rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
4488 		if (val & B_AX_RX_STUCK)
4489 			rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
4490 		rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4491 		ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4492 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4493 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
4494 			    "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
4495 			    R_AX_DBG_ERR_FLAG, val);
4496 	}
4497 
4498 	return ret;
4499 }
4500 
rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev * rtwdev)4501 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
4502 {
4503 	int ret;
4504 
4505 	if (rtwdev->chip->chip_id == RTL8852C)
4506 		return 0;
4507 
4508 	rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
4509 	rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4510 	rtw89_pci_clr_idx_all(rtwdev);
4511 
4512 	ret = rtw89_pci_rst_bdram_ax(rtwdev);
4513 	if (ret)
4514 		return ret;
4515 
4516 	rtw89_pci_ctrl_dma_all(rtwdev, true);
4517 	return 0;
4518 }
4519 
rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev * rtwdev,enum rtw89_lv1_rcvy_step step)4520 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
4521 					  enum rtw89_lv1_rcvy_step step)
4522 {
4523 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4524 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4525 	int ret;
4526 
4527 	switch (step) {
4528 	case RTW89_LV1_RCVY_STEP_1:
4529 		ret = gen_def->lv1rst_stop_dma(rtwdev);
4530 		if (ret)
4531 			rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
4532 
4533 		break;
4534 
4535 	case RTW89_LV1_RCVY_STEP_2:
4536 		ret = gen_def->lv1rst_start_dma(rtwdev);
4537 		if (ret)
4538 			rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
4539 		break;
4540 
4541 	default:
4542 		return -EINVAL;
4543 	}
4544 
4545 	return ret;
4546 }
4547 
rtw89_pci_ops_dump_err_status(struct rtw89_dev * rtwdev)4548 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
4549 {
4550 	if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
4551 		return;
4552 
4553 	if (rtwdev->chip->chip_id == RTL8852C) {
4554 		rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4555 			   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1));
4556 		rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4557 			   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1));
4558 	} else {
4559 		rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
4560 			   rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
4561 		rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4562 			   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
4563 		rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4564 			   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
4565 	}
4566 }
4567 
rtw89_pci_napi_poll(struct napi_struct * napi,int budget)4568 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
4569 {
4570 	struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
4571 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4572 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4573 	const struct rtw89_pci_isr_def *isr_def = info->isr_def;
4574 	unsigned long flags;
4575 	int work_done;
4576 
4577 	rtwdev->napi_budget_countdown = budget;
4578 
4579 	rtw89_write32(rtwdev, isr_def->isr_clear_rpq.addr, isr_def->isr_clear_rpq.data);
4580 	work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4581 	if (work_done == budget)
4582 		return budget;
4583 
4584 	rtw89_write32(rtwdev, isr_def->isr_clear_rxq.addr, isr_def->isr_clear_rxq.data);
4585 	work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4586 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4587 		spin_lock_irqsave(&rtwpci->irq_lock, flags);
4588 		if (likely(rtwpci->running))
4589 			rtw89_chip_enable_intr(rtwdev, rtwpci);
4590 		spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
4591 	}
4592 
4593 	return work_done;
4594 }
4595 
4596 static
rtw89_check_pci_ssid_quirks(struct rtw89_dev * rtwdev,struct pci_dev * pdev,const struct rtw89_pci_ssid_quirk * ssid_quirks)4597 void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev,
4598 				 struct pci_dev *pdev,
4599 				 const struct rtw89_pci_ssid_quirk *ssid_quirks)
4600 {
4601 	int i;
4602 
4603 	if (!ssid_quirks)
4604 		return;
4605 
4606 	for (i = 0; i < 200; i++, ssid_quirks++) {
4607 		if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0)
4608 			break;
4609 
4610 		if (ssid_quirks->vendor != pdev->vendor ||
4611 		    ssid_quirks->device != pdev->device ||
4612 		    ssid_quirks->subsystem_vendor != pdev->subsystem_vendor ||
4613 		    ssid_quirks->subsystem_device != pdev->subsystem_device)
4614 			continue;
4615 
4616 		bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap,
4617 			  NUM_OF_RTW89_QUIRKS);
4618 		rtwdev->custid = ssid_quirks->custid;
4619 		break;
4620 	}
4621 
4622 	rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n",
4623 		    (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid);
4624 }
4625 
rtw89_pci_suspend(struct device * dev)4626 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
4627 {
4628 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
4629 	struct rtw89_dev *rtwdev = hw->priv;
4630 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4631 
4632 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4633 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4634 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4635 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4636 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
4637 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4638 		rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
4639 				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4640 	} else {
4641 		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4642 				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4643 	}
4644 
4645 	return 0;
4646 }
4647 
rtw89_pci_l2_hci_ldo(struct rtw89_dev * rtwdev)4648 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
4649 {
4650 	if (rtwdev->chip->chip_id == RTL8852C)
4651 		return;
4652 
4653 	/* Hardware need write the reg twice to ensure the setting work */
4654 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4655 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
4656 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4657 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
4658 }
4659 
rtw89_pci_basic_cfg(struct rtw89_dev * rtwdev,bool resume)4660 void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
4661 {
4662 	if (resume)
4663 		rtw89_pci_cfg_dac(rtwdev, false);
4664 
4665 	rtw89_pci_disable_eq(rtwdev);
4666 	rtw89_pci_filter_out(rtwdev);
4667 	rtw89_pci_cpl_timeout_cfg(rtwdev);
4668 	rtw89_pci_link_cfg(rtwdev);
4669 	rtw89_pci_l1ss_cfg(rtwdev);
4670 }
4671 
rtw89_pci_resume(struct device * dev)4672 static int __maybe_unused rtw89_pci_resume(struct device *dev)
4673 {
4674 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
4675 	struct rtw89_dev *rtwdev = hw->priv;
4676 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4677 
4678 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4679 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4680 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4681 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4682 		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
4683 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4684 		rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
4685 				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4686 	} else {
4687 		rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4688 				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4689 		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4690 				  B_AX_SEL_REQ_ENTR_L1);
4691 	}
4692 	rtw89_pci_l2_hci_ldo(rtwdev);
4693 
4694 	rtw89_pci_basic_cfg(rtwdev, true);
4695 
4696 	return 0;
4697 }
4698 
4699 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
4700 EXPORT_SYMBOL(rtw89_pm_ops);
4701 
rtw89_pci_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4702 static pci_ers_result_t rtw89_pci_io_error_detected(struct pci_dev *pdev,
4703 						    pci_channel_state_t state)
4704 {
4705 	struct net_device *netdev = pci_get_drvdata(pdev);
4706 
4707 	netif_device_detach(netdev);
4708 
4709 	return PCI_ERS_RESULT_NEED_RESET;
4710 }
4711 
rtw89_pci_io_slot_reset(struct pci_dev * pdev)4712 static pci_ers_result_t rtw89_pci_io_slot_reset(struct pci_dev *pdev)
4713 {
4714 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4715 	struct rtw89_dev *rtwdev = hw->priv;
4716 
4717 	rtw89_ser_notify(rtwdev, MAC_AX_ERR_ASSERTION);
4718 
4719 	return PCI_ERS_RESULT_RECOVERED;
4720 }
4721 
rtw89_pci_io_resume(struct pci_dev * pdev)4722 static void rtw89_pci_io_resume(struct pci_dev *pdev)
4723 {
4724 	struct net_device *netdev = pci_get_drvdata(pdev);
4725 
4726 	/* ack any pending wake events, disable PME */
4727 	pci_enable_wake(pdev, PCI_D0, 0);
4728 
4729 	netif_device_attach(netdev);
4730 }
4731 
4732 const struct pci_error_handlers rtw89_pci_err_handler = {
4733 	.error_detected = rtw89_pci_io_error_detected,
4734 	.slot_reset = rtw89_pci_io_slot_reset,
4735 	.resume = rtw89_pci_io_resume,
4736 };
4737 EXPORT_SYMBOL(rtw89_pci_err_handler);
4738 
4739 const struct rtw89_pci_isr_def rtw89_pci_isr_ax = {
4740 	.isr_rdu = B_AX_RDU_INT,
4741 	.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
4742 	.isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
4743 	.isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
4744 	.isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
4745 					    B_AX_RDU_INT},
4746 };
4747 EXPORT_SYMBOL(rtw89_pci_isr_ax);
4748 
4749 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
4750 	.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
4751 	.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
4752 	.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
4753 
4754 	.clr_idx_all = rtw89_pci_clr_idx_all_ax,
4755 	.rst_bdram = rtw89_pci_rst_bdram_ax,
4756 
4757 	.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
4758 	.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
4759 
4760 	.ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
4761 	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
4762 	.poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
4763 
4764 	.aspm_set = rtw89_pci_aspm_set_ax,
4765 	.clkreq_set = rtw89_pci_clkreq_set_ax,
4766 	.l1ss_set = rtw89_pci_l1ss_set_ax,
4767 
4768 	.disable_eq = rtw89_pci_disable_eq_ax,
4769 	.power_wake = rtw89_pci_power_wake_ax,
4770 };
4771 EXPORT_SYMBOL(rtw89_pci_gen_ax);
4772 
4773 static const struct rtw89_hci_ops rtw89_pci_ops = {
4774 	.tx_write	= rtw89_pci_ops_tx_write,
4775 	.tx_kick_off	= rtw89_pci_ops_tx_kick_off,
4776 	.flush_queues	= rtw89_pci_ops_flush_queues,
4777 	.reset		= rtw89_pci_ops_reset,
4778 	.start		= rtw89_pci_ops_start,
4779 	.stop		= rtw89_pci_ops_stop,
4780 	.pause		= rtw89_pci_ops_pause,
4781 	.switch_mode	= rtw89_pci_ops_switch_mode,
4782 	.recalc_int_mit = rtw89_pci_recalc_int_mit,
4783 
4784 	.read8		= rtw89_pci_ops_read8,
4785 	.read16		= rtw89_pci_ops_read16,
4786 	.read32		= rtw89_pci_ops_read32,
4787 	.write8		= rtw89_pci_ops_write8,
4788 	.write16	= rtw89_pci_ops_write16,
4789 	.write32	= rtw89_pci_ops_write32,
4790 
4791 	.read32_pci_cfg	= rtw89_pci_ops_read32_pci_cfg,
4792 
4793 	.mac_pre_init	= rtw89_pci_ops_mac_pre_init,
4794 	.mac_pre_deinit	= rtw89_pci_ops_mac_pre_deinit,
4795 	.mac_post_init	= rtw89_pci_ops_mac_post_init,
4796 	.deinit		= rtw89_pci_ops_deinit,
4797 
4798 	.check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
4799 	.mac_lv1_rcvy	= rtw89_pci_ops_mac_lv1_recovery,
4800 	.dump_err_status = rtw89_pci_ops_dump_err_status,
4801 	.napi_poll	= rtw89_pci_napi_poll,
4802 
4803 	.recovery_start = rtw89_pci_ops_recovery_start,
4804 	.recovery_complete = rtw89_pci_ops_recovery_complete,
4805 
4806 	.ctrl_txdma_ch	= rtw89_pci_ctrl_txdma_ch,
4807 	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
4808 	.ctrl_trxhci	= rtw89_pci_ctrl_dma_trx,
4809 	.poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
4810 
4811 	.clr_idx_all	= rtw89_pci_clr_idx_all,
4812 	.clear		= rtw89_pci_clear_resource,
4813 	.disable_intr	= rtw89_pci_disable_intr_lock,
4814 	.enable_intr	= rtw89_pci_enable_intr_lock,
4815 	.rst_bdram	= rtw89_pci_reset_bdram,
4816 };
4817 
rtw89_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)4818 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4819 {
4820 	struct rtw89_dev *rtwdev;
4821 	const struct rtw89_driver_info *info;
4822 	const struct rtw89_pci_info *pci_info;
4823 	int ret;
4824 
4825 	info = (const struct rtw89_driver_info *)id->driver_data;
4826 
4827 	rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
4828 					  sizeof(struct rtw89_pci),
4829 					  info->chip, info->variant);
4830 	if (!rtwdev) {
4831 		dev_err(&pdev->dev, "failed to allocate hw\n");
4832 		return -ENOMEM;
4833 	}
4834 
4835 	pci_info = info->bus.pci;
4836 
4837 	rtwdev->pci_info = info->bus.pci;
4838 	rtwdev->hci.ops = &rtw89_pci_ops;
4839 	rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
4840 	rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_PCIE;
4841 	rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
4842 	rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
4843 
4844 	rtw89_check_quirks(rtwdev, info->quirks);
4845 	rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks);
4846 
4847 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
4848 
4849 	ret = rtw89_core_init(rtwdev);
4850 	if (ret) {
4851 		rtw89_err(rtwdev, "failed to initialise core\n");
4852 		goto err_release_hw;
4853 	}
4854 
4855 	ret = rtw89_pci_claim_device(rtwdev, pdev);
4856 	if (ret) {
4857 		rtw89_err(rtwdev, "failed to claim pci device\n");
4858 		goto err_core_deinit;
4859 	}
4860 
4861 	ret = rtw89_pci_setup_resource(rtwdev, pdev);
4862 	if (ret) {
4863 		rtw89_err(rtwdev, "failed to setup pci resource\n");
4864 		goto err_declaim_pci;
4865 	}
4866 
4867 	ret = rtw89_chip_info_setup(rtwdev);
4868 	if (ret) {
4869 		rtw89_err(rtwdev, "failed to setup chip information\n");
4870 		goto err_clear_resource;
4871 	}
4872 
4873 	rtw89_pci_basic_cfg(rtwdev, false);
4874 
4875 	ret = rtw89_core_napi_init(rtwdev);
4876 	if (ret) {
4877 		rtw89_err(rtwdev, "failed to init napi\n");
4878 		goto err_clear_resource;
4879 	}
4880 
4881 	ret = rtw89_pci_request_irq(rtwdev, pdev);
4882 	if (ret) {
4883 		rtw89_err(rtwdev, "failed to request pci irq\n");
4884 		goto err_deinit_napi;
4885 	}
4886 
4887 	ret = rtw89_core_register(rtwdev);
4888 	if (ret) {
4889 		rtw89_err(rtwdev, "failed to register core\n");
4890 		goto err_free_irq;
4891 	}
4892 
4893 	set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
4894 
4895 	return 0;
4896 
4897 err_free_irq:
4898 	rtw89_pci_free_irq(rtwdev, pdev);
4899 err_deinit_napi:
4900 	rtw89_core_napi_deinit(rtwdev);
4901 err_clear_resource:
4902 	rtw89_pci_clear_resource(rtwdev, pdev);
4903 err_declaim_pci:
4904 	rtw89_pci_declaim_device(rtwdev, pdev);
4905 err_core_deinit:
4906 	rtw89_core_deinit(rtwdev);
4907 err_release_hw:
4908 	rtw89_free_ieee80211_hw(rtwdev);
4909 
4910 	return ret;
4911 }
4912 EXPORT_SYMBOL(rtw89_pci_probe);
4913 
rtw89_pci_remove(struct pci_dev * pdev)4914 void rtw89_pci_remove(struct pci_dev *pdev)
4915 {
4916 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4917 	struct rtw89_dev *rtwdev;
4918 
4919 	rtwdev = hw->priv;
4920 
4921 	rtw89_pci_free_irq(rtwdev, pdev);
4922 	rtw89_core_napi_deinit(rtwdev);
4923 	rtw89_core_unregister(rtwdev);
4924 	rtw89_pci_clear_resource(rtwdev, pdev);
4925 	rtw89_pci_declaim_device(rtwdev, pdev);
4926 	rtw89_core_deinit(rtwdev);
4927 	rtw89_free_ieee80211_hw(rtwdev);
4928 }
4929 EXPORT_SYMBOL(rtw89_pci_remove);
4930 
4931 MODULE_AUTHOR("Realtek Corporation");
4932 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
4933 MODULE_LICENSE("Dual BSD/GPL");
4934