xref: /src/sys/dev/qlnx/qlnxe/ecore_l2_api.h (revision 70256d2b86d95a678a63c65b157b9c635f1f4c6a)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #ifndef __ECORE_L2_API_H__
30 #define __ECORE_L2_API_H__
31 
32 #include "ecore_status.h"
33 #include "ecore_sp_api.h"
34 #include "ecore_int_api.h"
35 
36 #ifndef __EXTRACT__LINUX__
37 enum ecore_rss_caps {
38 	ECORE_RSS_IPV4		= 0x1,
39 	ECORE_RSS_IPV6		= 0x2,
40 	ECORE_RSS_IPV4_TCP	= 0x4,
41 	ECORE_RSS_IPV6_TCP	= 0x8,
42 	ECORE_RSS_IPV4_UDP	= 0x10,
43 	ECORE_RSS_IPV6_UDP	= 0x20,
44 };
45 
46 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
47 #define ECORE_RSS_IND_TABLE_SIZE 128
48 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
49 
50 #define ECORE_MAX_PHC_DRIFT_PPB	291666666
51 
52 enum ecore_ptp_filter_type {
53 	ECORE_PTP_FILTER_NONE,
54 	ECORE_PTP_FILTER_ALL,
55 	ECORE_PTP_FILTER_V1_L4_EVENT,
56 	ECORE_PTP_FILTER_V1_L4_GEN,
57 	ECORE_PTP_FILTER_V2_L4_EVENT,
58 	ECORE_PTP_FILTER_V2_L4_GEN,
59 	ECORE_PTP_FILTER_V2_L2_EVENT,
60 	ECORE_PTP_FILTER_V2_L2_GEN,
61 	ECORE_PTP_FILTER_V2_EVENT,
62 	ECORE_PTP_FILTER_V2_GEN
63 };
64 
65 enum ecore_ptp_hwtstamp_tx_type {
66 	ECORE_PTP_HWTSTAMP_TX_OFF,
67 	ECORE_PTP_HWTSTAMP_TX_ON,
68 };
69 #endif
70 
71 #ifndef __EXTRACT__LINUX__
72 struct ecore_queue_start_common_params {
73 	/* Should always be relative to entity sending this. */
74 	u8 vport_id;
75 	u16 queue_id;
76 
77 	/* Relative, but relevant only for PFs */
78 	u8 stats_id;
79 
80 	struct ecore_sb_info *p_sb;
81 	u8 sb_idx;
82 
83 	u8 tc;
84 };
85 
86 struct ecore_rxq_start_ret_params {
87 	void OSAL_IOMEM *p_prod;
88 	void *p_handle;
89 };
90 
91 struct ecore_txq_start_ret_params {
92 	void OSAL_IOMEM *p_doorbell;
93 	void *p_handle;
94 };
95 #endif
96 
97 struct ecore_rss_params {
98 	u8 update_rss_config;
99 	u8 rss_enable;
100 	u8 rss_eng_id;
101 	u8 update_rss_capabilities;
102 	u8 update_rss_ind_table;
103 	u8 update_rss_key;
104 	u8 rss_caps;
105 	u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
106 
107 	/* Indirection table consist of rx queue handles */
108 	void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
109 	u32 rss_key[ECORE_RSS_KEY_SIZE];
110 };
111 
112 struct ecore_sge_tpa_params {
113 	u8 max_buffers_per_cqe;
114 
115 	u8 update_tpa_en_flg;
116 	u8 tpa_ipv4_en_flg;
117 	u8 tpa_ipv6_en_flg;
118 	u8 tpa_ipv4_tunn_en_flg;
119 	u8 tpa_ipv6_tunn_en_flg;
120 
121 	u8 update_tpa_param_flg;
122 	u8 tpa_pkt_split_flg;
123 	u8 tpa_hdr_data_split_flg;
124 	u8 tpa_gro_consistent_flg;
125 	u8 tpa_max_aggs_num;
126 	u16 tpa_max_size;
127 	u16 tpa_min_size_to_start;
128 	u16 tpa_min_size_to_cont;
129 };
130 
131 enum ecore_filter_opcode {
132 	ECORE_FILTER_ADD,
133 	ECORE_FILTER_REMOVE,
134 	ECORE_FILTER_MOVE,
135 	ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
136 	ECORE_FILTER_FLUSH, /* Removes all filters */
137 };
138 
139 enum ecore_filter_ucast_type {
140 	ECORE_FILTER_MAC,
141 	ECORE_FILTER_VLAN,
142 	ECORE_FILTER_MAC_VLAN,
143 	ECORE_FILTER_INNER_MAC,
144 	ECORE_FILTER_INNER_VLAN,
145 	ECORE_FILTER_INNER_PAIR,
146 	ECORE_FILTER_INNER_MAC_VNI_PAIR,
147 	ECORE_FILTER_MAC_VNI_PAIR,
148 	ECORE_FILTER_VNI,
149 };
150 
151 struct ecore_filter_ucast {
152 	enum ecore_filter_opcode opcode;
153 	enum ecore_filter_ucast_type type;
154 	u8 is_rx_filter;
155 	u8 is_tx_filter;
156 	u8 vport_to_add_to;
157 	u8 vport_to_remove_from;
158 	unsigned char mac[ETH_ALEN];
159 	u8 assert_on_error;
160 	u16 vlan;
161 	u32 vni;
162 };
163 
164 struct ecore_filter_mcast {
165 	/* Only REPLACE and FLUSH is supported for multicast */
166 	enum ecore_filter_opcode opcode;
167 	u8 vport_to_add_to;
168 	u8 vport_to_remove_from;
169 	u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
170 };
171 #define ECORE_MAX_MC_ADDRS	64
172 
173 struct ecore_filter_accept_flags {
174 	u8 update_rx_mode_config;
175 	u8 update_tx_mode_config;
176 	u8 rx_accept_filter;
177 	u8 tx_accept_filter;
178 #define	ECORE_ACCEPT_NONE		0x01
179 #define ECORE_ACCEPT_UCAST_MATCHED	0x02
180 #define ECORE_ACCEPT_UCAST_UNMATCHED	0x04
181 #define ECORE_ACCEPT_MCAST_MATCHED	0x08
182 #define ECORE_ACCEPT_MCAST_UNMATCHED	0x10
183 #define ECORE_ACCEPT_BCAST		0x20
184 };
185 
186 #ifndef __EXTRACT__LINUX__
187 enum ecore_filter_config_mode {
188 	ECORE_FILTER_CONFIG_MODE_DISABLE,
189 	ECORE_FILTER_CONFIG_MODE_5_TUPLE,
190 	ECORE_FILTER_CONFIG_MODE_L4_PORT,
191 	ECORE_FILTER_CONFIG_MODE_IP_DEST,
192 };
193 #endif
194 
195 struct ecore_arfs_config_params {
196 	bool tcp;
197 	bool udp;
198 	bool ipv4;
199 	bool ipv6;
200 	enum ecore_filter_config_mode mode;
201 };
202 
203 /* Add / remove / move / remove-all unicast MAC-VLAN filters.
204  * FW will assert in the following cases, so driver should take care...:
205  * 1. Adding a filter to a full table.
206  * 2. Adding a filter which already exists on that vport.
207  * 3. Removing a filter which doesn't exist.
208  */
209 
210 enum _ecore_status_t
211 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
212 		       struct ecore_filter_ucast *p_filter_cmd,
213 		       enum spq_mode comp_mode,
214 		       struct ecore_spq_comp_cb *p_comp_data);
215 
216 /* Add / remove / move multicast MAC filters. */
217 enum _ecore_status_t
218 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
219 		       struct ecore_filter_mcast *p_filter_cmd,
220 		       enum spq_mode comp_mode,
221 		       struct ecore_spq_comp_cb *p_comp_data);
222 
223 /* Set "accept" filters */
224 enum _ecore_status_t
225 ecore_filter_accept_cmd(
226 	struct ecore_dev		 *p_dev,
227 	u8				 vport,
228 	struct ecore_filter_accept_flags accept_flags,
229 	u8				 update_accept_any_vlan,
230 	u8				 accept_any_vlan,
231 	enum spq_mode			 comp_mode,
232 	struct ecore_spq_comp_cb	 *p_comp_data);
233 
234 /**
235  * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
236  *
237  * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
238  * the VPort ID is not currently initialized.
239  *
240  * @param p_hwfn
241  * @param opaque_fid
242  * @p_params			Inputs; Relative for PF [SB being an exception]
243  * @param bd_max_bytes 		Maximum bytes that can be placed on a BD
244  * @param bd_chain_phys_addr	Physical address of BDs for receive.
245  * @param cqe_pbl_addr		Physical address of the CQE PBL Table.
246  * @param cqe_pbl_size 		Size of the CQE PBL Table
247  * @param p_ret_params		Pointed struct to be filled with outputs.
248  *
249  * @return enum _ecore_status_t
250  */
251 enum _ecore_status_t
252 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
253 			 u16 opaque_fid,
254 			 struct ecore_queue_start_common_params *p_params,
255 			 u16 bd_max_bytes,
256 			 dma_addr_t bd_chain_phys_addr,
257 			 dma_addr_t cqe_pbl_addr,
258 			 u16 cqe_pbl_size,
259 			 struct ecore_rxq_start_ret_params *p_ret_params);
260 
261 /**
262  * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
263  *
264  * @param p_hwfn
265  * @param p_rxq			Handler of queue to close
266  * @param eq_completion_only	If True completion will be on
267  *				EQe, if False completion will be
268  *				on EQe if p_hwfn opaque
269  *				different from the RXQ opaque
270  *				otherwise on CQe.
271  * @param cqe_completion	If True completion will be
272  *				recieve on CQe.
273  * @return enum _ecore_status_t
274  */
275 enum _ecore_status_t
276 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
277 			void *p_rxq,
278 			bool eq_completion_only,
279 			bool cqe_completion);
280 
281 /**
282  * @brief - TX Queue Start Ramrod
283  *
284  * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
285  * the VPort is not currently initialized.
286  *
287  * @param p_hwfn
288  * @param opaque_fid
289  * @p_params
290  * @param tc			traffic class to use with this L2 txq
291  * @param pbl_addr		address of the pbl array
292  * @param pbl_size 		number of entries in pbl
293  * @oaram p_ret_params		Pointer to fill the return parameters in.
294  *
295  * @return enum _ecore_status_t
296  */
297 enum _ecore_status_t
298 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
299 			 u16 opaque_fid,
300 			 struct ecore_queue_start_common_params *p_params,
301 			 u8 tc,
302 			 dma_addr_t pbl_addr,
303 			 u16 pbl_size,
304 			 struct ecore_txq_start_ret_params *p_ret_params);
305 
306 /**
307  * @brief ecore_eth_tx_queue_stop - closes a Tx queue
308  *
309  * @param p_hwfn
310  * @param p_txq - handle to Tx queue needed to be closed
311  *
312  * @return enum _ecore_status_t
313  */
314 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
315 					     void *p_txq);
316 
317 enum ecore_tpa_mode	{
318 	ECORE_TPA_MODE_NONE,
319 	ECORE_TPA_MODE_RSC,
320 	ECORE_TPA_MODE_GRO,
321 	ECORE_TPA_MODE_MAX
322 };
323 
324 struct ecore_sp_vport_start_params {
325 	enum ecore_tpa_mode tpa_mode;
326 	bool remove_inner_vlan;	/* Inner VLAN removal is enabled */
327 	bool tx_switching;	/* Vport supports tx-switching */
328 	bool handle_ptp_pkts;	/* Handle PTP packets */
329 	bool only_untagged;	/* Untagged pkt control */
330 	bool drop_ttl0;		/* Drop packets with TTL = 0 */
331 	u8 max_buffers_per_cqe;
332 	u32 concrete_fid;
333 	u16 opaque_fid;
334 	u8 vport_id;		/* VPORT ID */
335 	u16 mtu;		/* VPORT MTU */
336 	bool zero_placement_offset;
337 	bool check_mac;
338 	bool check_ethtype;
339 
340 	/* Strict behavior on transmission errors */
341 	bool b_err_illegal_vlan_mode;
342 	bool b_err_illegal_inband_mode;
343 	bool b_err_vlan_insert_with_inband;
344 	bool b_err_small_pkt;
345 	bool b_err_big_pkt;
346 	bool b_err_anti_spoof;
347 	bool b_err_ctrl_frame;
348 };
349 
350 /**
351  * @brief ecore_sp_vport_start -
352  *
353  * This ramrod initializes a VPort. An Assert if generated if the Function ID
354  * of the VPort is not enabled.
355  *
356  * @param p_hwfn
357  * @param p_params		VPORT start params
358  *
359  * @return enum _ecore_status_t
360  */
361 enum _ecore_status_t
362 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
363 		     struct ecore_sp_vport_start_params *p_params);
364 
365 struct ecore_sp_vport_update_params {
366 	u16			opaque_fid;
367 	u8			vport_id;
368 	u8			update_vport_active_rx_flg;
369 	u8			vport_active_rx_flg;
370 	u8			update_vport_active_tx_flg;
371 	u8			vport_active_tx_flg;
372 	u8			update_inner_vlan_removal_flg;
373 	u8			inner_vlan_removal_flg;
374 	u8			silent_vlan_removal_flg;
375 	u8			update_default_vlan_enable_flg;
376 	u8			default_vlan_enable_flg;
377 	u8			update_default_vlan_flg;
378 	u16			default_vlan;
379 	u8			update_tx_switching_flg;
380 	u8			tx_switching_flg;
381 	u8			update_approx_mcast_flg;
382 	u8			update_anti_spoofing_en_flg;
383 	u8			anti_spoofing_en;
384 	u8			update_accept_any_vlan_flg;
385 	u8			accept_any_vlan;
386 	u32			bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
387 	struct ecore_rss_params	*rss_params;
388 	struct ecore_filter_accept_flags accept_flags;
389 	struct ecore_sge_tpa_params *sge_tpa_params;
390 };
391 
392 /**
393  * @brief ecore_sp_vport_update -
394  *
395  * This ramrod updates the parameters of the VPort. Every field can be updated
396  * independently, according to flags.
397  *
398  * This ramrod is also used to set the VPort state to active after creation.
399  * An Assert is generated if the VPort does not contain an RX queue.
400  *
401  * @param p_hwfn
402  * @param p_params
403  *
404  * @return enum _ecore_status_t
405  */
406 enum _ecore_status_t
407 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
408 		      struct ecore_sp_vport_update_params *p_params,
409 		      enum spq_mode comp_mode,
410 		      struct ecore_spq_comp_cb *p_comp_data);
411 /**
412  * @brief ecore_sp_vport_stop -
413  *
414  * This ramrod closes a VPort after all its RX and TX queues are terminated.
415  * An Assert is generated if any queues are left open.
416  *
417  * @param p_hwfn
418  * @param opaque_fid
419  * @param vport_id VPort ID
420  *
421  * @return enum _ecore_status_t
422  */
423 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
424 					 u16 opaque_fid,
425 					 u8 vport_id);
426 
427 enum _ecore_status_t
428 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
429 			  u16 opaque_fid,
430 			  struct ecore_filter_ucast *p_filter_cmd,
431 			  enum spq_mode comp_mode,
432 			  struct ecore_spq_comp_cb *p_comp_data);
433 
434 /**
435  * @brief ecore_sp_rx_eth_queues_update -
436  *
437  * This ramrod updates an RX queue. It is used for setting the active state
438  * of the queue and updating the TPA and SGE parameters.
439  *
440  * @note Final phase API.
441  *
442  * @param p_hwfn
443  * @param pp_rxq_handlers	An array of queue handlers to be updated.
444  * @param num_rxqs              number of queues to update.
445  * @param complete_cqe_flg	Post completion to the CQE Ring if set
446  * @param complete_event_flg	Post completion to the Event Ring if set
447  * @param comp_mode
448  * @param p_comp_data
449  *
450  * @return enum _ecore_status_t
451  */
452 
453 enum _ecore_status_t
454 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
455 			      void **pp_rxq_handlers,
456 			      u8 num_rxqs,
457 			      u8 complete_cqe_flg,
458 			      u8 complete_event_flg,
459 			      enum spq_mode comp_mode,
460 			      struct ecore_spq_comp_cb *p_comp_data);
461 
462 /**
463  * @brief ecore_sp_eth_rx_queues_set_default -
464  *
465  * This ramrod sets RSS RX queue as default one.
466  *
467  * @note Final phase API.
468  *
469  * @param p_hwfn
470  * @param p_rxq_handlers	queue handlers to be updated.
471  * @param comp_mode
472  * @param p_comp_data
473  *
474  * @return enum _ecore_status_t
475  */
476 
477 enum _ecore_status_t
478 ecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn,
479 				   void *p_rxq_handler,
480 				   enum spq_mode comp_mode,
481 				   struct ecore_spq_comp_cb *p_comp_data);
482 
483 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
484 			     struct ecore_ptt *p_ptt,
485 			     struct ecore_eth_stats *stats,
486 			     u16 statistics_bin, bool b_get_port_stats);
487 
488 void ecore_get_vport_stats(struct ecore_dev *p_dev,
489 			   struct ecore_eth_stats *stats);
490 
491 void ecore_reset_vport_stats(struct ecore_dev *p_dev);
492 
493 /**
494  *@brief ecore_arfs_mode_configure -
495  *
496  *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
497  *and atleast one of ipv4 or ipv6 true to enable rfs mode.
498  *
499  *@param p_hwfn
500  *@param p_ptt
501  *@param p_cfg_params		arfs mode configuration parameters.
502  *
503  */
504 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
505 			       struct ecore_ptt *p_ptt,
506 			       struct ecore_arfs_config_params *p_cfg_params);
507 
508 #ifndef __EXTRACT__LINUX__
509 struct ecore_ntuple_filter_params {
510 	/* Physically mapped address containing header of buffer to be used
511 	 * as filter.
512 	 */
513 	dma_addr_t addr;
514 
515 	/* Length of header in bytes */
516 	u16 length;
517 
518 	/* Relative queue-id to receive classified packet */
519 #define ECORE_RFS_NTUPLE_QID_RSS ((u16)-1)
520 	u16 qid;
521 
522 	/* Identifier can either be according to vport-id or vfid */
523 	bool b_is_vf;
524 	u8 vport_id;
525 	u8 vf_id;
526 
527 	/* true iff this filter is to be added. Else to be removed */
528 	bool b_is_add;
529 };
530 #endif
531 
532 /**
533  * @brief - ecore_configure_rfs_ntuple_filter
534  *
535  * This ramrod should be used to add or remove arfs hw filter
536  *
537  * @params p_hwfn
538  * @params p_cb		Used for ECORE_SPQ_MODE_CB,where client would initialize
539  *			it with cookie and callback function address, if not
540  *			using this mode then client must pass NULL.
541  * @params p_params
542  */
543 enum _ecore_status_t
544 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
545 				  struct ecore_spq_comp_cb *p_cb,
546 				  struct ecore_ntuple_filter_params *p_params);
547 #endif
548