xref: /src/sys/dev/aq/aq_hw.c (revision 668423f75b4d9006f16847b415c861defb8267d7)
1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  *   (1) Redistributions of source code must retain the above
10  *   copyright notice, this list of conditions and the following
11  *   disclaimer.
12  *
13  *   (2) Redistributions in binary form must reproduce the above
14  *   copyright notice, this list of conditions and the following
15  *   disclaimer in the documentation and/or other materials provided
16  *   with the distribution.
17  *
18  *   (3)The name of the author may not be used to endorse or promote
19  *   products derived from this software without specific prior
20  *   written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/endian.h>
38 #include <sys/socket.h>
39 #include <machine/cpu.h>
40 #include <net/if.h>
41 
42 #include "aq_hw.h"
43 #include "aq_dbg.h"
44 #include "aq_hw_llh.h"
45 #include "aq_fw.h"
46 
47 #define AQ_HW_FW_SM_RAM        0x2U
48 #define AQ_CFG_FW_MIN_VER_EXPECTED 0x01050006U
49 
50 
51 int
aq_hw_err_from_flags(struct aq_hw * hw)52 aq_hw_err_from_flags(struct aq_hw *hw)
53 {
54 	return (0);
55 }
56 
57 static void
aq_hw_chip_features_init(struct aq_hw * hw,uint32_t * p)58 aq_hw_chip_features_init(struct aq_hw *hw, uint32_t *p)
59 {
60 	uint32_t chip_features = 0U;
61 	uint32_t val = reg_glb_mif_id_get(hw);
62 	uint32_t mif_rev = val & 0xFFU;
63 
64 	if ((0xFU & mif_rev) == 1U) {
65 		chip_features |= AQ_HW_CHIP_REVISION_A0 | AQ_HW_CHIP_MPI_AQ |
66 		     AQ_HW_CHIP_MIPS;
67 	} else if ((0xFU & mif_rev) == 2U) {
68 		chip_features |= AQ_HW_CHIP_REVISION_B0 | AQ_HW_CHIP_MPI_AQ |
69 		    AQ_HW_CHIP_MIPS | AQ_HW_CHIP_TPO2 | AQ_HW_CHIP_RPF2;
70 	} else if ((0xFU & mif_rev) == 0xAU) {
71 		chip_features |= AQ_HW_CHIP_REVISION_B1 | AQ_HW_CHIP_MPI_AQ |
72 		    AQ_HW_CHIP_MIPS | AQ_HW_CHIP_TPO2 | AQ_HW_CHIP_RPF2;
73 	}
74 
75 	*p = chip_features;
76 }
77 
78 int
aq_hw_fw_downld_dwords(struct aq_hw * hw,uint32_t a,uint32_t * p,uint32_t cnt)79 aq_hw_fw_downld_dwords(struct aq_hw *hw, uint32_t a, uint32_t *p, uint32_t cnt)
80 {
81 	int err = 0;
82 
83 //    AQ_DBG_ENTER();
84 	AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM) == 1U, 1U,
85 	     10000U);
86 
87 	if (err < 0) {
88 		bool is_locked;
89 
90 		reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
91 		is_locked = reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM);
92 		if (!is_locked) {
93 			err = -ETIME;
94 			goto err_exit;
95 		}
96 	}
97 
98 	mif_mcp_up_mailbox_addr_set(hw, a);
99 
100 	for (++cnt; --cnt && !err;) {
101 		mif_mcp_up_mailbox_execute_operation_set(hw, 1);
102 
103 		if (IS_CHIP_FEATURE(hw, REVISION_B1))
104 			AQ_HW_WAIT_FOR(a != mif_mcp_up_mailbox_addr_get(hw),
105 			    1U, 1000U);
106 		else
107 			AQ_HW_WAIT_FOR(!mif_mcp_up_mailbox_busy_get(hw), 1,
108 			     1000U);
109 
110 		*(p++) = mif_mcp_up_mailbox_data_get(hw);
111 	}
112 
113 	reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
114 
115 err_exit:
116 //    AQ_DBG_EXIT(err);
117 	return (err);
118 }
119 
120 int
aq_hw_ver_match(const aq_hw_fw_version * ver_expected,const aq_hw_fw_version * ver_actual)121 aq_hw_ver_match(const aq_hw_fw_version* ver_expected,
122     const aq_hw_fw_version* ver_actual)
123 {
124 	AQ_DBG_ENTER();
125 
126 	if (ver_actual->major_version >= ver_expected->major_version)
127 		return (true);
128 	if (ver_actual->minor_version >= ver_expected->minor_version)
129 		return (true);
130 	if (ver_actual->build_number >= ver_expected->build_number)
131 		return (true);
132 
133 	return (false);
134 }
135 
136 static int
aq_hw_init_ucp(struct aq_hw * hw)137 aq_hw_init_ucp(struct aq_hw *hw)
138 {
139 	int err = 0;
140 	AQ_DBG_ENTER();
141 
142 	hw->fw_version.raw = 0;
143 
144 	err = aq_fw_reset(hw);
145 	if (err != EOK) {
146 		aq_log_error("aq_hw_init_ucp(): F/W reset failed, err %d", err);
147 		return (err);
148 	}
149 
150 	aq_hw_chip_features_init(hw, &hw->chip_features);
151 	err = aq_fw_ops_init(hw);
152 	if (err < 0) {
153 		aq_log_error("could not initialize F/W ops, err %d", err);
154 		return (-1);
155 	}
156 
157 	if (hw->fw_version.major_version == 1) {
158 		if (!AQ_READ_REG(hw, 0x370)) {
159 			unsigned int rnd = 0;
160 			unsigned int ucp_0x370 = 0;
161 
162 			rnd = arc4random();
163 
164 			ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
165 			AQ_WRITE_REG(hw, AQ_HW_UCP_0X370_REG, ucp_0x370);
166 		}
167 
168 		reg_glb_cpu_scratch_scp_set(hw, 0, 25);
169 	}
170 
171 	/* check 10 times by 1ms */
172 	AQ_HW_WAIT_FOR((hw->mbox_addr = AQ_READ_REG(hw, 0x360)) != 0, 400U, 20);
173 
174 	aq_hw_fw_version ver_expected = { .raw = AQ_CFG_FW_MIN_VER_EXPECTED };
175 	if (!aq_hw_ver_match(&ver_expected, &hw->fw_version))
176 	        aq_log_error("atlantic: aq_hw_init_ucp(), wrong FW version: expected:%x actual:%x",
177 		    AQ_CFG_FW_MIN_VER_EXPECTED, hw->fw_version.raw);
178 
179 	AQ_DBG_EXIT(err);
180 	return (err);
181 }
182 
183 int
aq_hw_mpi_create(struct aq_hw * hw)184 aq_hw_mpi_create(struct aq_hw *hw)
185 {
186 	int err = 0;
187 
188 	AQ_DBG_ENTER();
189 	err = aq_hw_init_ucp(hw);
190 	if (err < 0)
191 		goto err_exit;
192 
193 err_exit:
194 	AQ_DBG_EXIT(err);
195 	return (err);
196 }
197 
198 int
aq_hw_mpi_read_stats(struct aq_hw * hw,struct aq_hw_fw_mbox * pmbox)199 aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox)
200 {
201 	int err = 0;
202 //    AQ_DBG_ENTER();
203 
204 	if (hw->fw_ops && hw->fw_ops->get_stats) {
205 		err = hw->fw_ops->get_stats(hw, &pmbox->stats);
206 	} else {
207 		err = -ENOTSUP;
208 		aq_log_error("get_stats() not supported by F/W");
209 	}
210 
211 	if (err == EOK) {
212 		pmbox->stats.dpc = reg_rx_dma_stat_counter7get(hw);
213 		pmbox->stats.cprc = stats_rx_lro_coalesced_pkt_count0_get(hw);
214 	}
215 
216 //    AQ_DBG_EXIT(err);
217 	return (err);
218 }
219 
220 static int
aq_hw_mpi_set(struct aq_hw * hw,enum aq_hw_fw_mpi_state_e state,uint32_t speed)221 aq_hw_mpi_set(struct aq_hw *hw, enum aq_hw_fw_mpi_state_e state, uint32_t speed)
222 {
223 	int err = -ENOTSUP;
224 	AQ_DBG_ENTERA("speed %d", speed);
225 
226 	if (hw->fw_ops && hw->fw_ops->set_mode) {
227 		err = hw->fw_ops->set_mode(hw, state, speed);
228 	} else {
229 		aq_log_error("set_mode() not supported by F/W");
230 	}
231 
232 	AQ_DBG_EXIT(err);
233 	return (err);
234 }
235 
236 int
aq_hw_set_link_speed(struct aq_hw * hw,uint32_t speed)237 aq_hw_set_link_speed(struct aq_hw *hw, uint32_t speed)
238 {
239 	return aq_hw_mpi_set(hw, MPI_INIT, speed);
240 }
241 
242 int
aq_hw_get_link_state(struct aq_hw * hw,uint32_t * link_speed,struct aq_hw_fc_info * fc_neg)243 aq_hw_get_link_state(struct aq_hw *hw, uint32_t *link_speed, struct aq_hw_fc_info *fc_neg)
244 {
245 	int err = EOK;
246 
247  //   AQ_DBG_ENTER();
248 
249 	enum aq_hw_fw_mpi_state_e mode;
250 	aq_fw_link_speed_t speed = aq_fw_none;
251 	aq_fw_link_fc_t fc;
252 
253 	if (hw->fw_ops && hw->fw_ops->get_mode) {
254 		err = hw->fw_ops->get_mode(hw, &mode, &speed, &fc);
255 	} else {
256 		aq_log_error("get_mode() not supported by F/W");
257 		AQ_DBG_EXIT(-ENOTSUP);
258 		return (-ENOTSUP);
259 	}
260 
261 	if (err < 0) {
262 		aq_log_error("get_mode() failed, err %d", err);
263 		AQ_DBG_EXIT(err);
264 		return (err);
265 	}
266 	*link_speed = 0;
267 	if (mode != MPI_INIT)
268 		return (0);
269 
270 	switch (speed) {
271 	case aq_fw_10G:
272 		*link_speed = 10000U;
273 		break;
274 	case aq_fw_5G:
275 		*link_speed = 5000U;
276 		break;
277 	case aq_fw_2G5:
278 		*link_speed = 2500U;
279 		break;
280 	case aq_fw_1G:
281 		*link_speed = 1000U;
282 		break;
283 	case aq_fw_100M:
284 		*link_speed = 100U;
285 		break;
286 	default:
287 		*link_speed = 0U;
288 		break;
289 	}
290 
291 	fc_neg->fc_rx = !!(fc & aq_fw_fc_ENABLE_RX);
292 	fc_neg->fc_tx = !!(fc & aq_fw_fc_ENABLE_TX);
293 
294  //   AQ_DBG_EXIT(0);
295 	return (0);
296 }
297 
298 int
aq_hw_get_mac_permanent(struct aq_hw * hw,uint8_t * mac)299 aq_hw_get_mac_permanent(struct aq_hw *hw,  uint8_t *mac)
300 {
301 	int err = -ENOTSUP;
302 	AQ_DBG_ENTER();
303 
304 	if (hw->fw_ops && hw->fw_ops->get_mac_addr)
305 		err = hw->fw_ops->get_mac_addr(hw, mac);
306 
307 	/* Couldn't get MAC address from HW. Use auto-generated one. */
308 	if ((mac[0] & 1) || ((mac[0] | mac[1] | mac[2]) == 0)) {
309 		uint16_t rnd;
310 		uint32_t h = 0;
311 		uint32_t l = 0;
312 
313 		printf("atlantic: HW MAC address %x:%x:%x:%x:%x:%x is multicast or empty MAC", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
314 		printf("atlantic: Use random MAC address");
315 
316 		rnd = arc4random();
317 
318 		/* chip revision */
319 		l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
320 		h = 0x8001300EU;
321 
322 		mac[5] = (uint8_t)(0xFFU & l);
323 		l >>= 8;
324 		mac[4] = (uint8_t)(0xFFU & l);
325 		l >>= 8;
326 		mac[3] = (uint8_t)(0xFFU & l);
327 		l >>= 8;
328 		mac[2] = (uint8_t)(0xFFU & l);
329 		mac[1] = (uint8_t)(0xFFU & h);
330 		h >>= 8;
331 		mac[0] = (uint8_t)(0xFFU & h);
332 
333 		err = EOK;
334 	}
335 
336 	AQ_DBG_EXIT(err);
337 	return (err);
338 }
339 
340 int
aq_hw_deinit(struct aq_hw * hw)341 aq_hw_deinit(struct aq_hw *hw)
342 {
343 	AQ_DBG_ENTER();
344 	aq_hw_mpi_set(hw, MPI_DEINIT, 0);
345 	AQ_DBG_EXIT(0);
346 	return (0);
347 }
348 
349 int
aq_hw_set_power(struct aq_hw * hw,unsigned int power_state)350 aq_hw_set_power(struct aq_hw *hw, unsigned int power_state)
351 {
352 	AQ_DBG_ENTER();
353 	aq_hw_mpi_set(hw, MPI_POWER, 0);
354 	AQ_DBG_EXIT(0);
355 	return (0);
356 }
357 
358 
359 /* HW NIC functions */
360 
361 int
aq_hw_reset(struct aq_hw * hw)362 aq_hw_reset(struct aq_hw *hw)
363 {
364 	int err = 0;
365 
366 	AQ_DBG_ENTER();
367 
368 	err = aq_fw_reset(hw);
369 	if (err < 0)
370 		goto err_exit;
371 
372 	itr_irq_reg_res_dis_set(hw, 0);
373 	itr_res_irq_set(hw, 1);
374 
375 	/* check 10 times by 1ms */
376 	AQ_HW_WAIT_FOR(itr_res_irq_get(hw) == 0, 1000, 10);
377 	if (err < 0) {
378 		printf("atlantic: IRQ reset failed: %d", err);
379 		goto err_exit;
380 	}
381 
382 	if (hw->fw_ops && hw->fw_ops->reset)
383 		hw->fw_ops->reset(hw);
384 
385 	err = aq_hw_err_from_flags(hw);
386 
387 err_exit:
388 	AQ_DBG_EXIT(err);
389 	return (err);
390 }
391 
392 static int
aq_hw_qos_set(struct aq_hw * hw)393 aq_hw_qos_set(struct aq_hw *hw)
394 {
395 	uint32_t tc = 0U;
396 	uint32_t buff_size = 0U;
397 	unsigned int i_priority = 0U;
398 	int err = 0;
399 
400 	AQ_DBG_ENTER();
401 	/* TPS Descriptor rate init */
402 	tps_tx_pkt_shed_desc_rate_curr_time_res_set(hw, 0x0U);
403 	tps_tx_pkt_shed_desc_rate_lim_set(hw, 0xA);
404 
405 	/* TPS VM init */
406 	tps_tx_pkt_shed_desc_vm_arb_mode_set(hw, 0U);
407 
408 	/* TPS TC credits init */
409 	tps_tx_pkt_shed_desc_tc_arb_mode_set(hw, 0U);
410 	tps_tx_pkt_shed_data_arb_mode_set(hw, 0U);
411 
412 	tps_tx_pkt_shed_tc_data_max_credit_set(hw, 0xFFF, 0U);
413 	tps_tx_pkt_shed_tc_data_weight_set(hw, 0x64, 0U);
414 	tps_tx_pkt_shed_desc_tc_max_credit_set(hw, 0x50, 0U);
415 	tps_tx_pkt_shed_desc_tc_weight_set(hw, 0x1E, 0U);
416 
417 	/* Tx buf size */
418 	buff_size = AQ_HW_TXBUF_MAX;
419 
420 	tpb_tx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
421 	tpb_tx_buff_hi_threshold_per_tc_set(hw,
422 	    (buff_size * (1024 / 32U) * 66U) / 100U, tc);
423 	tpb_tx_buff_lo_threshold_per_tc_set(hw,
424 	    (buff_size * (1024 / 32U) * 50U) / 100U, tc);
425 
426 	/* QoS Rx buf size per TC */
427 	tc = 0;
428 	buff_size = AQ_HW_RXBUF_MAX;
429 
430 	rpb_rx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
431 	rpb_rx_buff_hi_threshold_per_tc_set(hw,
432 	    (buff_size * (1024U / 32U) * 66U) / 100U, tc);
433 	rpb_rx_buff_lo_threshold_per_tc_set(hw,
434 	    (buff_size * (1024U / 32U) * 50U) / 100U, tc);
435 
436 	/* QoS 802.1p priority -> TC mapping */
437 	for (i_priority = 8U; i_priority--;)
438 	rpf_rpb_user_priority_tc_map_set(hw, i_priority, 0U);
439 
440 	err = aq_hw_err_from_flags(hw);
441 	AQ_DBG_EXIT(err);
442 	return (err);
443 }
444 
445 static int
aq_hw_offload_set(struct aq_hw * hw)446 aq_hw_offload_set(struct aq_hw *hw)
447 {
448 	int err = 0;
449 
450 	AQ_DBG_ENTER();
451 	/* TX checksums offloads*/
452 	tpo_ipv4header_crc_offload_en_set(hw, 1);
453 	tpo_tcp_udp_crc_offload_en_set(hw, 1);
454 	if (err < 0)
455 		goto err_exit;
456 
457 	/* RX checksums offloads*/
458 	rpo_ipv4header_crc_offload_en_set(hw, 1);
459 	rpo_tcp_udp_crc_offload_en_set(hw, 1);
460 	if (err < 0)
461 		goto err_exit;
462 
463 	/* LSO offloads*/
464 	tdm_large_send_offload_en_set(hw, 0xFFFFFFFFU);
465 	if (err < 0)
466 		goto err_exit;
467 
468 /* LRO offloads */
469 	{
470 		uint32_t i = 0;
471 		uint32_t val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
472 		    ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
473 		    ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
474 
475 		for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
476 			rpo_lro_max_num_of_descriptors_set(hw, val, i);
477 
478 		rpo_lro_time_base_divider_set(hw, 0x61AU);
479 		rpo_lro_inactive_interval_set(hw, 0);
480 		/* the LRO timebase divider is 5 uS (0x61a),
481 		 * to get a maximum coalescing interval of 250 uS,
482 		 * we need to multiply by 50(0x32) to get
483 		 * the default value 250 uS
484 		 */
485 		rpo_lro_max_coalescing_interval_set(hw, 50);
486 
487 		rpo_lro_qsessions_lim_set(hw, 1U);
488 
489 		rpo_lro_total_desc_lim_set(hw, 2U);
490 
491 		rpo_lro_patch_optimization_en_set(hw, 0U);
492 
493 		rpo_lro_min_pay_of_first_pkt_set(hw, 10U);
494 
495 		rpo_lro_pkt_lim_set(hw, 1U);
496 
497 		rpo_lro_en_set(hw, (hw->lro_enabled ? 0xFFFFFFFFU : 0U));
498 	}
499 
500 
501 	err = aq_hw_err_from_flags(hw);
502 
503 err_exit:
504 	AQ_DBG_EXIT(err);
505 	return (err);
506 }
507 
508 static int
aq_hw_init_tx_path(struct aq_hw * hw)509 aq_hw_init_tx_path(struct aq_hw *hw)
510 {
511 	int err = 0;
512 
513 	AQ_DBG_ENTER();
514 
515 	/* Tx TC/RSS number config */
516 	tpb_tx_tc_mode_set(hw, 1U);
517 
518 	thm_lso_tcp_flag_of_first_pkt_set(hw, 0x0FF6U);
519 	thm_lso_tcp_flag_of_middle_pkt_set(hw, 0x0FF6U);
520 	thm_lso_tcp_flag_of_last_pkt_set(hw, 0x0F7FU);
521 
522 	/* Tx interrupts */
523 	tdm_tx_desc_wr_wb_irq_en_set(hw, 1U);
524 
525 	/* misc */
526 	AQ_WRITE_REG(hw, 0x00007040U, 0x00010000U);//IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U);
527 	tdm_tx_dca_en_set(hw, 0U);
528 	tdm_tx_dca_mode_set(hw, 0U);
529 
530 	tpb_tx_path_scp_ins_en_set(hw, 1U);
531 
532 	err = aq_hw_err_from_flags(hw);
533 	AQ_DBG_EXIT(err);
534 	return (err);
535 }
536 
537 static int
aq_hw_init_rx_path(struct aq_hw * hw)538 aq_hw_init_rx_path(struct aq_hw *hw)
539 {
540 	//struct aq_nic_cfg_s *cfg = hw->aq_nic_cfg;
541 	unsigned int control_reg_val = 0U;
542 	int i;
543 	int err;
544 
545 	AQ_DBG_ENTER();
546 	/* Rx TC/RSS number config */
547 	rpb_rpf_rx_traf_class_mode_set(hw, 1U);
548 
549 	/* Rx flow control */
550 	rpb_rx_flow_ctl_mode_set(hw, 1U);
551 
552 	/* RSS Ring selection */
553 	reg_rx_flr_rss_control1set(hw, 0xB3333333U);
554 
555 	/* Multicast filters */
556 	for (i = AQ_HW_MAC_MAX; i--;) {
557 		rpfl2_uc_flr_en_set(hw, (i == 0U) ? 1U : 0U, i);
558 		rpfl2unicast_flr_act_set(hw, 1U, i);
559 	}
560 
561 	reg_rx_flr_mcst_flr_msk_set(hw, 0x00000000U);
562 	reg_rx_flr_mcst_flr_set(hw, 0x00010FFFU, 0U);
563 
564 	/* Vlan filters */
565 	rpf_vlan_outer_etht_set(hw, 0x88A8U);
566 	rpf_vlan_inner_etht_set(hw, 0x8100U);
567 	rpf_vlan_accept_untagged_packets_set(hw, true);
568 	rpf_vlan_untagged_act_set(hw, HW_ATL_RX_HOST);
569 
570 	rpf_vlan_prom_mode_en_set(hw, 1);
571 
572 	/* Rx Interrupts */
573 	rdm_rx_desc_wr_wb_irq_en_set(hw, 1U);
574 
575 	/* misc */
576 	control_reg_val = 0x000F0000U; //RPF2
577 
578 	/* RSS hash type set for IP/TCP */
579 	control_reg_val |= 0x1EU;
580 
581 	AQ_WRITE_REG(hw, 0x00005040U, control_reg_val);
582 
583 	rpfl2broadcast_en_set(hw, 1U);
584 	rpfl2broadcast_flr_act_set(hw, 1U);
585 	rpfl2broadcast_count_threshold_set(hw, 0xFFFFU & (~0U / 256U));
586 
587 	rdm_rx_dca_en_set(hw, 0U);
588 	rdm_rx_dca_mode_set(hw, 0U);
589 
590 	err = aq_hw_err_from_flags(hw);
591 	AQ_DBG_EXIT(err);
592 	return (err);
593 }
594 
595 int
aq_hw_mac_addr_set(struct aq_hw * hw,uint8_t * mac_addr,uint8_t index)596 aq_hw_mac_addr_set(struct aq_hw *hw, uint8_t *mac_addr, uint8_t index)
597 {
598 	int err = 0;
599 	unsigned int h = 0U;
600 	unsigned int l = 0U;
601 
602 	AQ_DBG_ENTER();
603 	if (!mac_addr) {
604 		err = -EINVAL;
605 		goto err_exit;
606 	}
607 	h = (mac_addr[0] << 8) | (mac_addr[1]);
608 	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) |
609 	    mac_addr[5];
610 
611 	rpfl2_uc_flr_en_set(hw, 0U, index);
612 	rpfl2unicast_dest_addresslsw_set(hw, l, index);
613 	rpfl2unicast_dest_addressmsw_set(hw, h, index);
614 	rpfl2_uc_flr_en_set(hw, 1U, index);
615 
616 	err = aq_hw_err_from_flags(hw);
617 
618 err_exit:
619 	AQ_DBG_EXIT(err);
620 	return (err);
621 }
622 
623 int
aq_hw_init(struct aq_hw * hw,uint8_t * mac_addr,uint8_t adm_irq,bool msix)624 aq_hw_init(struct aq_hw *hw, uint8_t *mac_addr, uint8_t adm_irq, bool msix)
625 {
626 
627 	int err = 0;
628 	uint32_t val = 0;
629 
630 	AQ_DBG_ENTER();
631 
632 	/* Force limit MRRS on RDM/TDM to 2K */
633 	val = AQ_READ_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR);
634 	AQ_WRITE_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR, (val & ~0x707) | 0x404);
635 
636 	/* TX DMA total request limit. B0 hardware is not capable to
637 	* handle more than (8K-MRRS) incoming DMA data.
638 	* Value 24 in 256byte units
639 	*/
640 	AQ_WRITE_REG(hw, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
641 
642 	aq_hw_init_tx_path(hw);
643 	aq_hw_init_rx_path(hw);
644 
645 	aq_hw_mac_addr_set(hw, mac_addr, AQ_HW_MAC);
646 
647 	aq_hw_mpi_set(hw, MPI_INIT, hw->link_rate);
648 
649 	aq_hw_qos_set(hw);
650 
651 	err = aq_hw_err_from_flags(hw);
652 	if (err < 0)
653 		goto err_exit;
654 
655 	/* Interrupts */
656 	//Enable interrupt
657 	itr_irq_status_cor_en_set(hw, 0); //Disable clear-on-read for status
658 	itr_irq_auto_mask_clr_en_set(hw, 1); // Enable auto-mask clear.
659 	if (msix)
660 		itr_irq_mode_set(hw, 0x6); //MSIX + multi vector
661 	else
662 		itr_irq_mode_set(hw, 0x5); //MSI + multi vector
663 
664 	reg_gen_irq_map_set(hw, 0x80 | adm_irq, 3);
665 
666 	aq_hw_offload_set(hw);
667 
668 err_exit:
669 	AQ_DBG_EXIT(err);
670 	return (err);
671 }
672 
673 
674 int
aq_hw_start(struct aq_hw * hw)675 aq_hw_start(struct aq_hw *hw)
676 {
677 	int err;
678 
679 	AQ_DBG_ENTER();
680 	tpb_tx_buff_en_set(hw, 1U);
681 	rpb_rx_buff_en_set(hw, 1U);
682 	err = aq_hw_err_from_flags(hw);
683 	AQ_DBG_EXIT(err);
684 	return (err);
685 }
686 
687 
688 int
aq_hw_interrupt_moderation_set(struct aq_hw * hw)689 aq_hw_interrupt_moderation_set(struct aq_hw *hw)
690 {
691 	static unsigned int AQ_HW_NIC_timers_table_rx_[][2] = {
692 	    {80, 120},//{0x6U, 0x38U},/* 10Gbit */
693 	    {0xCU, 0x70U},/* 5Gbit */
694 	    {0xCU, 0x70U},/* 5Gbit 5GS */
695 	    {0x18U, 0xE0U},/* 2.5Gbit */
696 	    {0x30U, 0x80U},/* 1Gbit */
697 	    {0x4U, 0x50U},/* 100Mbit */
698 	};
699 	static unsigned int AQ_HW_NIC_timers_table_tx_[][2] = {
700 	    {0x4fU, 0x1ff},//{0xffU, 0xffU}, /* 10Gbit */
701 	    {0x4fU, 0xffU}, /* 5Gbit */
702 	    {0x4fU, 0xffU}, /* 5Gbit 5GS */
703 	    {0x4fU, 0xffU}, /* 2.5Gbit */
704 	    {0x4fU, 0xffU}, /* 1Gbit */
705 	    {0x4fU, 0xffU}, /* 100Mbit */
706 	};
707 
708 	uint32_t speed_index = 0U; //itr settings for 10 g
709 	uint32_t itr_rx = 2U;
710 	uint32_t itr_tx = 2U;
711 	int custom_itr = hw->itr;
712 	int active = custom_itr != 0;
713 	int err;
714 
715 
716 	AQ_DBG_ENTER();
717 
718 	if (custom_itr == -1) {
719 		/* set min timer value */
720 		itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][0] << 0x8U;
721 		/* set max timer value */
722 		itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][1] << 0x10U;
723 
724 		/* set min timer value */
725 		itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][0] << 0x8U;
726 		/* set max timer value */
727 		itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][1] << 0x10U;
728 	} else {
729 		if (custom_itr > 0x1FF)
730 			custom_itr = 0x1FF;
731 
732 		itr_rx |= (custom_itr/2) << 0x8U; /* set min timer value */
733 		itr_rx |= custom_itr << 0x10U; /* set max timer value */
734 
735 		itr_tx |= (custom_itr/2) << 0x8U; /* set min timer value */
736 		itr_tx |= custom_itr << 0x10U; /* set max timer value */
737 	}
738 
739 	tdm_tx_desc_wr_wb_irq_en_set(hw, !active);
740 	tdm_tdm_intr_moder_en_set(hw, active);
741 	rdm_rx_desc_wr_wb_irq_en_set(hw, !active);
742 	rdm_rdm_intr_moder_en_set(hw, active);
743 
744 	for (int i = HW_ATL_B0_RINGS_MAX; i--;) {
745 		reg_tx_intr_moder_ctrl_set(hw,  itr_tx, i);
746 		reg_rx_intr_moder_ctrl_set(hw,  itr_rx, i);
747 	}
748 
749 	err = aq_hw_err_from_flags(hw);
750 	AQ_DBG_EXIT(err);
751 	return (err);
752 }
753 
754 /**
755  * @brief Set VLAN filter table
756  * @details Configure VLAN filter table to accept (and assign the queue) traffic
757  *  for the particular vlan ids.
758  * Note: use this function under vlan promisc mode not to lost the traffic
759  *
760  * @param aq_hw_s
761  * @param aq_rx_filter_vlan VLAN filter configuration
762  * @return 0 - OK, <0 - error
763  */
764 int
hw_atl_b0_hw_vlan_set(struct aq_hw_s * self,struct aq_rx_filter_vlan * aq_vlans)765 hw_atl_b0_hw_vlan_set(struct aq_hw_s *self, struct aq_rx_filter_vlan *aq_vlans)
766 {
767 	int i;
768 
769 	for (i = 0; i < AQ_HW_VLAN_MAX_FILTERS; i++) {
770 		hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
771 		hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
772 		if (aq_vlans[i].enable) {
773 			hw_atl_rpf_vlan_id_flr_set(self,
774 						   aq_vlans[i].vlan_id,
775 						   i);
776 			hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
777 			hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
778 			if (aq_vlans[i].queue != 0xFF) {
779 				hw_atl_rpf_vlan_rxq_flr_set(self,
780 							    aq_vlans[i].queue,
781 							    i);
782 				hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
783 			}
784 		}
785 	}
786 
787 	return aq_hw_err_from_flags(self);
788 }
789 
790 int
hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s * self,bool promisc)791 hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc)
792 {
793 	hw_atl_rpf_vlan_prom_mode_en_set(self, promisc);
794 	return aq_hw_err_from_flags(self);
795 }
796 
797 
798 void
aq_hw_set_promisc(struct aq_hw_s * self,bool l2_promisc,bool vlan_promisc,bool mc_promisc)799 aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc,
800     bool mc_promisc)
801 {
802 	AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc,
803 	    vlan_promisc, mc_promisc);
804 
805 	rpfl2promiscuous_mode_en_set(self, l2_promisc);
806 
807 	hw_atl_b0_hw_vlan_promisc_set(self, l2_promisc | vlan_promisc);
808 
809 	rpfl2_accept_all_mc_packets_set(self, mc_promisc);
810 	rpfl2multicast_flr_en_set(self, mc_promisc, 0);
811 
812 	AQ_DBG_EXIT(0);
813 }
814 
815 int
aq_hw_rss_hash_set(struct aq_hw_s * self,uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])816 aq_hw_rss_hash_set(struct aq_hw_s *self,
817     uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
818 {
819 	uint32_t rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
820 	uint32_t addr = 0U;
821 	uint32_t i = 0U;
822 	int err = 0;
823 
824 	AQ_DBG_ENTER();
825 
826 	memcpy(rss_key_dw, rss_key, HW_ATL_RSS_HASHKEY_SIZE);
827 
828 	for (i = 10, addr = 0U; i--; ++addr) {
829 		uint32_t key_data = bswap32(rss_key_dw[i]);
830 		rpf_rss_key_wr_data_set(self, key_data);
831 		rpf_rss_key_addr_set(self, addr);
832 		rpf_rss_key_wr_en_set(self, 1U);
833 		AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0,
834 			       1000U, 10U);
835 		if (err < 0)
836 			goto err_exit;
837 	}
838 
839 	err = aq_hw_err_from_flags(self);
840 
841 err_exit:
842 	AQ_DBG_EXIT(err);
843 	return (err);
844 }
845 
846 int
aq_hw_rss_hash_get(struct aq_hw_s * self,uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])847 aq_hw_rss_hash_get(struct aq_hw_s *self,
848     uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
849 {
850 	uint32_t rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
851 	uint32_t addr = 0U;
852 	uint32_t i = 0U;
853 	int err = 0;
854 
855 	AQ_DBG_ENTER();
856 
857 	for (i = 10, addr = 0U; i--; ++addr) {
858 		rpf_rss_key_addr_set(self, addr);
859 		rss_key_dw[i] = bswap32(rpf_rss_key_rd_data_get(self));
860 	}
861 	memcpy(rss_key, rss_key_dw, HW_ATL_RSS_HASHKEY_SIZE);
862 
863 	err = aq_hw_err_from_flags(self);
864 
865 	AQ_DBG_EXIT(err);
866 	return (err);
867 }
868 
869 int
aq_hw_rss_set(struct aq_hw_s * self,uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])870 aq_hw_rss_set(struct aq_hw_s *self,
871     uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
872 {
873 	uint16_t bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX *
874 					3 / 16U)];
875 	int err = 0;
876 	uint32_t i = 0U;
877 
878 	memset(bitary, 0, sizeof(bitary));
879 
880 	for (i = HW_ATL_RSS_INDIRECTION_TABLE_MAX; i--;) {
881 		(*(uint32_t *)(bitary + ((i * 3U) / 16U))) |=
882 			((rss_table[i]) << ((i * 3U) & 0xFU));
883 	}
884 
885 	for (i = ARRAY_SIZE(bitary); i--;) {
886 		rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
887 		rpf_rss_redir_tbl_addr_set(self, i);
888 		rpf_rss_redir_wr_en_set(self, 1U);
889 		AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0,
890 			       1000U, 10U);
891 		if (err < 0)
892 			goto err_exit;
893 	}
894 
895 	err = aq_hw_err_from_flags(self);
896 
897 err_exit:
898 	return (err);
899 }
900 
901 int
aq_hw_udp_rss_enable(struct aq_hw_s * self,bool enable)902 aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable)
903 {
904 	int err = 0;
905 	if (!enable) {
906 		/* HW bug workaround:
907 		 * Disable RSS for UDP using rx flow filter 0.
908 		 * HW does not track RSS stream for fragmenged UDP,
909 		 * 0x5040 control reg does not work.
910 		 */
911 		hw_atl_rpf_l3_l4_enf_set(self, true, 0);
912 		hw_atl_rpf_l4_protf_en_set(self, true, 0);
913 		hw_atl_rpf_l3_l4_rxqf_en_set(self, true, 0);
914 		hw_atl_rpf_l3_l4_actf_set(self, L2_FILTER_ACTION_HOST, 0);
915 		hw_atl_rpf_l3_l4_rxqf_set(self, 0, 0);
916 		hw_atl_rpf_l4_protf_set(self, HW_ATL_RX_UDP, 0);
917 	} else {
918 		hw_atl_rpf_l3_l4_enf_set(self, false, 0);
919 	}
920 
921 	err = aq_hw_err_from_flags(self);
922 	return (err);
923 
924 }
925