xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c (revision 63467137ecc0ff6f804d53903ad87a2f0397a18b)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/if_vlan.h>
7 #include <linux/iopoll.h>
8 #include <linux/minmax.h>
9 #include "hbg_common.h"
10 #include "hbg_hw.h"
11 #include "hbg_reg.h"
12 
13 #define HBG_HW_EVENT_WAIT_TIMEOUT_US	(2 * 1000 * 1000)
14 #define HBG_HW_EVENT_WAIT_INTERVAL_US	(10 * 1000)
15 #define HBG_MAC_LINK_WAIT_TIMEOUT_US	(500 * 1000)
16 #define HBG_MAC_LINK_WAIT_INTERVAL_US	(5 * 1000)
17 /* little endian or big endian.
18  * ctrl means packet description, data means skb packet data
19  */
20 #define HBG_ENDIAN_CTRL_LE_DATA_BE	0x0
21 #define HBG_PCU_FRAME_LEN_PLUS 4
22 
23 #define HBG_FIFO_TX_FULL_THRSLD		0x3F0
24 #define HBG_FIFO_TX_EMPTY_THRSLD	0x1F0
25 #define HBG_FIFO_RX_FULL_THRSLD		0x240
26 #define HBG_FIFO_RX_EMPTY_THRSLD	0x190
27 #define HBG_CFG_FIFO_FULL_THRSLD	0x10
28 #define HBG_CFG_FIFO_EMPTY_THRSLD	0x01
29 
hbg_hw_spec_is_valid(struct hbg_priv * priv)30 static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
31 {
32 	return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
33 	       !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR);
34 }
35 
hbg_hw_event_notify(struct hbg_priv * priv,enum hbg_hw_event_type event_type)36 int hbg_hw_event_notify(struct hbg_priv *priv,
37 			enum hbg_hw_event_type event_type)
38 {
39 	bool is_valid;
40 	int ret;
41 
42 	if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state))
43 		return -EBUSY;
44 
45 	/* notify */
46 	hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type);
47 
48 	ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid,
49 				HBG_HW_EVENT_WAIT_INTERVAL_US,
50 				HBG_HW_EVENT_WAIT_TIMEOUT_US,
51 				HBG_HW_EVENT_WAIT_INTERVAL_US, priv);
52 
53 	clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state);
54 
55 	if (ret)
56 		dev_err(&priv->pdev->dev,
57 			"event %d wait timeout\n", event_type);
58 
59 	return ret;
60 }
61 
hbg_hw_dev_specs_init(struct hbg_priv * priv)62 static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
63 {
64 	struct hbg_dev_specs *specs = &priv->dev_specs;
65 	u64 mac_addr;
66 
67 	if (!hbg_hw_spec_is_valid(priv)) {
68 		dev_err(&priv->pdev->dev, "dev_specs not init\n");
69 		return -EINVAL;
70 	}
71 
72 	specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR);
73 	specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR);
74 	specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR);
75 	specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR);
76 	specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR);
77 	specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
78 	specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
79 	specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
80 	specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
81 
82 	mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
83 	u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
84 
85 	if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data))
86 		return -EADDRNOTAVAIL;
87 
88 	specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu;
89 	specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len;
90 	return 0;
91 }
92 
hbg_hw_get_irq_status(struct hbg_priv * priv)93 u32 hbg_hw_get_irq_status(struct hbg_priv *priv)
94 {
95 	u32 status;
96 
97 	status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR);
98 
99 	hbg_field_modify(status, HBG_INT_MSK_TX_B,
100 			 hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR));
101 	hbg_field_modify(status, HBG_INT_MSK_RX_B,
102 			 hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR));
103 
104 	return status;
105 }
106 
hbg_hw_irq_clear(struct hbg_priv * priv,u32 mask)107 void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask)
108 {
109 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
110 		return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1);
111 
112 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
113 		return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1);
114 
115 	return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask);
116 }
117 
hbg_hw_irq_is_enabled(struct hbg_priv * priv,u32 mask)118 bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask)
119 {
120 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
121 		return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR);
122 
123 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
124 		return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR);
125 
126 	return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask;
127 }
128 
hbg_hw_irq_enable(struct hbg_priv * priv,u32 mask,bool enable)129 void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
130 {
131 	u32 value;
132 
133 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
134 		return hbg_reg_write(priv,
135 				     HBG_REG_CF_IND_TXINT_MSK_ADDR, enable);
136 
137 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
138 		return hbg_reg_write(priv,
139 				     HBG_REG_CF_IND_RXINT_MSK_ADDR, enable);
140 
141 	value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR);
142 	if (enable)
143 		value |= mask;
144 	else
145 		value &= ~mask;
146 
147 	hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
148 }
149 
hbg_hw_set_uc_addr(struct hbg_priv * priv,u64 mac_addr,u32 index)150 void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
151 {
152 	u32 addr;
153 
154 	/* mac addr is u64, so the addr offset is 0x8 */
155 	addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
156 	hbg_reg_write64(priv, addr, mac_addr);
157 }
158 
hbg_hw_set_pcu_max_frame_len(struct hbg_priv * priv,u16 max_frame_len)159 static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
160 					 u16 max_frame_len)
161 {
162 	max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN);
163 
164 	/* lower two bits of value must be set to 0 */
165 	max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS);
166 
167 	hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR,
168 			    HBG_REG_MAX_FRAME_LEN_M, max_frame_len);
169 }
170 
hbg_hw_set_mac_max_frame_len(struct hbg_priv * priv,u16 max_frame_size)171 static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
172 					 u16 max_frame_size)
173 {
174 	hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR,
175 			    HBG_REG_MAX_FRAME_LEN_M, max_frame_size);
176 }
177 
hbg_hw_set_mtu(struct hbg_priv * priv,u16 mtu)178 void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
179 {
180 	/* burst_len BIT(29) set to 1 can improve the TX performance.
181 	 * But packet drop occurs when mtu > 2000.
182 	 * So, BIT(29) reset to 0 when mtu > 2000.
183 	 */
184 	u32 burst_len_bit = (mtu > 2000) ? 0 : 1;
185 	u32 frame_len;
186 
187 	frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
188 		    ETH_HLEN + ETH_FCS_LEN;
189 
190 	hbg_hw_set_pcu_max_frame_len(priv, frame_len);
191 	hbg_hw_set_mac_max_frame_len(priv, frame_len);
192 
193 	hbg_reg_write_field(priv, HBG_REG_BRUST_LENGTH_ADDR,
194 			    HBG_REG_BRUST_LENGTH_B, burst_len_bit);
195 }
196 
hbg_hw_mac_enable(struct hbg_priv * priv,u32 enable)197 void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
198 {
199 	hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
200 			    HBG_REG_PORT_ENABLE_TX_B, enable);
201 	hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
202 			    HBG_REG_PORT_ENABLE_RX_B, enable);
203 }
204 
hbg_hw_get_fifo_used_num(struct hbg_priv * priv,enum hbg_dir dir)205 u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir)
206 {
207 	if (dir & HBG_DIR_TX)
208 		return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
209 					  HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M);
210 
211 	if (dir & HBG_DIR_RX)
212 		return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
213 					  HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M);
214 
215 	return 0;
216 }
217 
hbg_hw_set_tx_desc(struct hbg_priv * priv,struct hbg_tx_desc * tx_desc)218 void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc)
219 {
220 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0);
221 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1);
222 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2);
223 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3);
224 }
225 
hbg_hw_fill_buffer(struct hbg_priv * priv,u32 buffer_dma_addr)226 void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
227 {
228 	hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr);
229 }
230 
hbg_hw_adjust_link(struct hbg_priv * priv,u32 speed,u32 duplex)231 void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
232 {
233 	u32 link_status;
234 	int ret;
235 
236 	hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
237 
238 	hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
239 			    HBG_REG_PORT_MODE_M, speed);
240 	hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
241 			    HBG_REG_DUPLEX_B, duplex);
242 
243 	hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
244 
245 	hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
246 
247 	/* wait MAC link up */
248 	ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
249 				 link_status,
250 				 FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
251 					   link_status),
252 				 HBG_MAC_LINK_WAIT_INTERVAL_US,
253 				 HBG_MAC_LINK_WAIT_TIMEOUT_US);
254 	if (ret)
255 		hbg_np_link_fail_task_schedule(priv);
256 }
257 
258 /* only support uc filter */
hbg_hw_set_mac_filter_enable(struct hbg_priv * priv,u32 enable)259 void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
260 {
261 	hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
262 			    HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
263 
264 	/* only uc filter is supported, so set all bits of mc mask reg to 1 */
265 	hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX);
266 	hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX);
267 }
268 
hbg_hw_set_pause_enable(struct hbg_priv * priv,u32 tx_en,u32 rx_en)269 void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
270 {
271 	hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
272 			    HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
273 	hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
274 			    HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
275 
276 	hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
277 			    HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en);
278 }
279 
hbg_hw_get_pause_enable(struct hbg_priv * priv,u32 * tx_en,u32 * rx_en)280 void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
281 {
282 	*tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
283 				    HBG_REG_PAUSE_ENABLE_TX_B);
284 	*rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
285 				    HBG_REG_PAUSE_ENABLE_RX_B);
286 }
287 
hbg_hw_set_rx_pause_mac_addr(struct hbg_priv * priv,u64 mac_addr)288 void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
289 {
290 	hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
291 }
292 
hbg_hw_set_fifo_thrsld(struct hbg_priv * priv,u32 full,u32 empty,enum hbg_dir dir)293 static void hbg_hw_set_fifo_thrsld(struct hbg_priv *priv,
294 				   u32 full, u32 empty, enum hbg_dir dir)
295 {
296 	u32 value = 0;
297 
298 	value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_FULL_M, full);
299 	value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_EMPTY_M, empty);
300 
301 	if (dir & HBG_DIR_TX)
302 		hbg_reg_write(priv, HBG_REG_TX_FIFO_THRSLD_ADDR, value);
303 
304 	if (dir & HBG_DIR_RX)
305 		hbg_reg_write(priv, HBG_REG_RX_FIFO_THRSLD_ADDR, value);
306 }
307 
hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv * priv,u32 full,u32 empty,enum hbg_dir dir)308 static void hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv *priv,
309 				       u32 full, u32 empty, enum hbg_dir dir)
310 {
311 	u32 value;
312 
313 	value = hbg_reg_read(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR);
314 
315 	if (dir & HBG_DIR_TX) {
316 		value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M, full);
317 		value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M, empty);
318 	}
319 
320 	if (dir & HBG_DIR_RX) {
321 		value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M, full);
322 		value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M, empty);
323 	}
324 
325 	hbg_reg_write(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR, value);
326 }
327 
hbg_hw_init_transmit_ctrl(struct hbg_priv * priv)328 static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
329 {
330 	u32 ctrl = 0;
331 
332 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE);
333 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE);
334 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE);
335 
336 	hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl);
337 }
338 
hbg_hw_init_rx_ctrl(struct hbg_priv * priv)339 static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv)
340 {
341 	u32 ctrl = 0;
342 
343 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B,
344 			   HBG_STATUS_ENABLE);
345 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE);
346 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1);
347 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M,
348 			   HBG_RX_SKIP2);
349 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN);
350 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id);
351 
352 	hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl);
353 }
354 
hbg_hw_init_rx_control(struct hbg_priv * priv)355 static void hbg_hw_init_rx_control(struct hbg_priv *priv)
356 {
357 	hbg_hw_init_rx_ctrl(priv);
358 
359 	/* parse from L2 layer */
360 	hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR,
361 			    HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1);
362 
363 	hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR,
364 			    HBG_REG_RECV_CTRL_STRIP_PAD_EN_B,
365 			    HBG_STATUS_ENABLE);
366 	hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR,
367 			    HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size);
368 	hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR,
369 			    HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE);
370 }
371 
hbg_hw_init(struct hbg_priv * priv)372 int hbg_hw_init(struct hbg_priv *priv)
373 {
374 	int ret;
375 
376 	ret = hbg_hw_dev_specs_init(priv);
377 	if (ret)
378 		return ret;
379 
380 	hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR,
381 			    HBG_REG_BUS_CTRL_ENDIAN_M,
382 			    HBG_ENDIAN_CTRL_LE_DATA_BE);
383 	hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR,
384 			    HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE);
385 
386 	hbg_hw_init_rx_control(priv);
387 	hbg_hw_init_transmit_ctrl(priv);
388 
389 	hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_TX_FULL_THRSLD,
390 			       HBG_FIFO_TX_EMPTY_THRSLD, HBG_DIR_TX);
391 	hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_RX_FULL_THRSLD,
392 			       HBG_FIFO_RX_EMPTY_THRSLD, HBG_DIR_RX);
393 	hbg_hw_set_cfg_fifo_thrsld(priv, HBG_CFG_FIFO_FULL_THRSLD,
394 				   HBG_CFG_FIFO_EMPTY_THRSLD, HBG_DIR_TX_RX);
395 	return 0;
396 }
397