1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
4 */
5
6 #include "mt76.h"
7 #include "dma.h"
8
mt76_wed_release_rx_buf(struct mtk_wed_device * wed)9 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
10 {
11 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
12 int i;
13
14 for (i = 0; i < dev->rx_token_size; i++) {
15 struct mt76_txwi_cache *t;
16
17 t = mt76_rx_token_release(dev, i);
18 if (!t || !t->ptr)
19 continue;
20
21 mt76_put_page_pool_buf(t->ptr, false);
22 t->ptr = NULL;
23
24 mt76_put_rxwi(dev, t);
25 }
26
27 mt76_free_pending_rxwi(dev);
28 }
29 EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
30
31 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
mt76_wed_init_rx_buf(struct mtk_wed_device * wed,int size)32 u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
33 {
34 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
35 struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
36 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
37 struct mt76_txwi_cache *t = NULL;
38 int i;
39
40 for (i = 0; i < size; i++) {
41 dma_addr_t addr;
42 u32 offset;
43 int token;
44 void *buf;
45
46 t = mt76_get_rxwi(dev);
47 if (!t)
48 goto unmap;
49
50 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
51 if (!buf)
52 goto unmap;
53
54 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
55 desc->buf0 = cpu_to_le32(addr);
56 token = mt76_rx_token_consume(dev, buf, t, addr);
57 if (token < 0) {
58 mt76_put_page_pool_buf(buf, false);
59 goto unmap;
60 }
61
62 token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
63 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
64 token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
65 #endif
66 desc->token |= cpu_to_le32(token);
67 desc++;
68 }
69
70 return 0;
71
72 unmap:
73 if (t)
74 mt76_put_rxwi(dev, t);
75 mt76_wed_release_rx_buf(wed);
76
77 return -ENOMEM;
78 }
79 EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf);
80
mt76_wed_offload_enable(struct mtk_wed_device * wed)81 int mt76_wed_offload_enable(struct mtk_wed_device *wed)
82 {
83 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
84
85 spin_lock_bh(&dev->token_lock);
86 dev->token_size = wed->wlan.token_start;
87 spin_unlock_bh(&dev->token_lock);
88
89 return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
90 }
91 EXPORT_SYMBOL_GPL(mt76_wed_offload_enable);
92
mt76_wed_dma_setup(struct mt76_dev * dev,struct mt76_queue * q,bool reset)93 int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
94 {
95 int ret = 0, type, ring;
96 u16 flags;
97
98 if (!q || !q->ndesc)
99 return -EINVAL;
100
101 flags = q->flags;
102 if (!q->wed || !mtk_wed_device_active(q->wed))
103 q->flags &= ~MT_QFLAG_WED;
104
105 if (!(q->flags & MT_QFLAG_WED))
106 return 0;
107
108 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
109 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
110
111 switch (type) {
112 case MT76_WED_Q_TX:
113 ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
114 reset);
115 if (!ret)
116 q->wed_regs = q->wed->tx_ring[ring].reg_base;
117 break;
118 case MT76_WED_Q_TXFREE:
119 /* WED txfree queue needs ring to be initialized before setup */
120 q->flags = 0;
121 mt76_dma_queue_reset(dev, q);
122 mt76_dma_rx_fill(dev, q, false);
123
124 ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
125 if (!ret)
126 q->wed_regs = q->wed->txfree_ring.reg_base;
127 break;
128 case MT76_WED_Q_RX:
129 ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
130 reset);
131 if (!ret)
132 q->wed_regs = q->wed->rx_ring[ring].reg_base;
133 break;
134 case MT76_WED_RRO_Q_DATA:
135 q->flags &= ~MT_QFLAG_WED;
136 __mt76_dma_queue_reset(dev, q, false);
137 mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
138 q->head = q->ndesc - 1;
139 q->queued = q->head;
140 break;
141 case MT76_WED_RRO_Q_MSDU_PG:
142 q->flags &= ~MT_QFLAG_WED;
143 __mt76_dma_queue_reset(dev, q, false);
144 mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
145 q->head = q->ndesc - 1;
146 q->queued = q->head;
147 break;
148 case MT76_WED_RRO_Q_IND:
149 q->flags &= ~MT_QFLAG_WED;
150 mt76_dma_queue_reset(dev, q);
151 mt76_dma_rx_fill(dev, q, false);
152 mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
153 break;
154 default:
155 ret = -EINVAL;
156 break;
157 }
158 q->flags = flags;
159
160 return ret;
161 }
162 EXPORT_SYMBOL_GPL(mt76_wed_dma_setup);
163 #endif /*CONFIG_NET_MEDIATEK_SOC_WED */
164
mt76_wed_offload_disable(struct mtk_wed_device * wed)165 void mt76_wed_offload_disable(struct mtk_wed_device *wed)
166 {
167 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
168
169 spin_lock_bh(&dev->token_lock);
170 dev->token_size = dev->drv->token_size;
171 spin_unlock_bh(&dev->token_lock);
172 }
173 EXPORT_SYMBOL_GPL(mt76_wed_offload_disable);
174
mt76_wed_reset_complete(struct mtk_wed_device * wed)175 void mt76_wed_reset_complete(struct mtk_wed_device *wed)
176 {
177 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
178
179 complete(&dev->mmio.wed_reset_complete);
180 }
181 EXPORT_SYMBOL_GPL(mt76_wed_reset_complete);
182
mt76_wed_net_setup_tc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct net_device * netdev,enum tc_setup_type type,void * type_data)183 int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
184 struct net_device *netdev, enum tc_setup_type type,
185 void *type_data)
186 {
187 struct mt76_phy *phy = hw->priv;
188 struct mtk_wed_device *wed = &phy->dev->mmio.wed;
189
190 if (!mtk_wed_device_active(wed))
191 return -EOPNOTSUPP;
192
193 return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
194 }
195 EXPORT_SYMBOL_GPL(mt76_wed_net_setup_tc);
196
mt76_wed_dma_reset(struct mt76_dev * dev)197 void mt76_wed_dma_reset(struct mt76_dev *dev)
198 {
199 struct mt76_mmio *mmio = &dev->mmio;
200
201 if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
202 return;
203
204 complete(&mmio->wed_reset);
205
206 if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
207 dev_err(dev->dev, "wed reset complete timeout\n");
208 }
209 EXPORT_SYMBOL_GPL(mt76_wed_dma_reset);
210