1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/string.h>
5 #include <linux/types.h>
6 #include <linux/pci.h>
7
8 #include "../libwx/wx_type.h"
9 #include "../libwx/wx_lib.h"
10 #include "../libwx/wx_hw.h"
11 #include "txgbe_type.h"
12 #include "txgbe_fdir.h"
13
14 /* These defines allow us to quickly generate all of the necessary instructions
15 * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION
16 * for values 0 through 15
17 */
18 #define TXGBE_ATR_COMMON_HASH_KEY \
19 (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY)
20 #define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
21 do { \
22 u32 n = (_n); \
23 if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
24 common_hash ^= lo_hash_dword >> n; \
25 else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
26 bucket_hash ^= lo_hash_dword >> n; \
27 else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
28 sig_hash ^= lo_hash_dword << (16 - n); \
29 if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
30 common_hash ^= hi_hash_dword >> n; \
31 else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
32 bucket_hash ^= hi_hash_dword >> n; \
33 else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
34 sig_hash ^= hi_hash_dword << (16 - n); \
35 } while (0)
36
37 /**
38 * txgbe_atr_compute_sig_hash - Compute the signature hash
39 * @input: input bitstream to compute the hash on
40 * @common: compressed common input dword
41 * @hash: pointer to the computed hash
42 *
43 * This function is almost identical to the function above but contains
44 * several optimizations such as unwinding all of the loops, letting the
45 * compiler work out all of the conditional ifs since the keys are static
46 * defines, and computing two keys at once since the hashed dword stream
47 * will be the same for both keys.
48 **/
txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input,union txgbe_atr_hash_dword common,u32 * hash)49 static void txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input,
50 union txgbe_atr_hash_dword common,
51 u32 *hash)
52 {
53 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
54 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
55 u32 i;
56
57 /* record the flow_vm_vlan bits as they are a key part to the hash */
58 flow_vm_vlan = ntohl(input.dword);
59
60 /* generate common hash dword */
61 hi_hash_dword = ntohl(common.dword);
62
63 /* low dword is word swapped version of common */
64 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
65
66 /* apply flow ID/VM pool/VLAN ID bits to hash words */
67 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
68
69 /* Process bits 0 and 16 */
70 TXGBE_COMPUTE_SIG_HASH_ITERATION(0);
71
72 /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
73 * delay this because bit 0 of the stream should not be processed
74 * so we do not add the VLAN until after bit 0 was processed
75 */
76 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
77
78 /* Process remaining 30 bit of the key */
79 for (i = 1; i <= 15; i++)
80 TXGBE_COMPUTE_SIG_HASH_ITERATION(i);
81
82 /* combine common_hash result with signature and bucket hashes */
83 bucket_hash ^= common_hash;
84 bucket_hash &= TXGBE_ATR_HASH_MASK;
85
86 sig_hash ^= common_hash << 16;
87 sig_hash &= TXGBE_ATR_HASH_MASK << 16;
88
89 /* return completed signature hash */
90 *hash = sig_hash ^ bucket_hash;
91 }
92
93 #define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
94 do { \
95 u32 n = (_n); \
96 if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
97 bucket_hash ^= lo_hash_dword >> n; \
98 if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
99 bucket_hash ^= hi_hash_dword >> n; \
100 } while (0)
101
102 /**
103 * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash
104 * @input: input bitstream to compute the hash on
105 * @input_mask: mask for the input bitstream
106 *
107 * This function serves two main purposes. First it applies the input_mask
108 * to the atr_input resulting in a cleaned up atr_input data stream.
109 * Secondly it computes the hash and stores it in the bkt_hash field at
110 * the end of the input byte stream. This way it will be available for
111 * future use without needing to recompute the hash.
112 **/
txgbe_atr_compute_perfect_hash(union txgbe_atr_input * input,union txgbe_atr_input * input_mask)113 void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
114 union txgbe_atr_input *input_mask)
115 {
116 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
117 u32 bucket_hash = 0;
118 __be32 hi_dword = 0;
119 u32 i = 0;
120
121 /* Apply masks to input data */
122 for (i = 0; i < 11; i++)
123 input->dword_stream[i] &= input_mask->dword_stream[i];
124
125 /* record the flow_vm_vlan bits as they are a key part to the hash */
126 flow_vm_vlan = ntohl(input->dword_stream[0]);
127
128 /* generate common hash dword */
129 for (i = 1; i <= 10; i++)
130 hi_dword ^= input->dword_stream[i];
131 hi_hash_dword = ntohl(hi_dword);
132
133 /* low dword is word swapped version of common */
134 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
135
136 /* apply flow ID/VM pool/VLAN ID bits to hash words */
137 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
138
139 /* Process bits 0 and 16 */
140 TXGBE_COMPUTE_BKT_HASH_ITERATION(0);
141
142 /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
143 * delay this because bit 0 of the stream should not be processed
144 * so we do not add the VLAN until after bit 0 was processed
145 */
146 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
147
148 /* Process remaining 30 bit of the key */
149 for (i = 1; i <= 15; i++)
150 TXGBE_COMPUTE_BKT_HASH_ITERATION(i);
151
152 /* Limit hash to 13 bits since max bucket count is 8K.
153 * Store result at the end of the input stream.
154 */
155 input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
156 }
157
txgbe_fdir_check_cmd_complete(struct wx * wx)158 static int txgbe_fdir_check_cmd_complete(struct wx *wx)
159 {
160 u32 val;
161
162 return read_poll_timeout_atomic(rd32, val,
163 !(val & TXGBE_RDB_FDIR_CMD_CMD_MASK),
164 10, 100, false,
165 wx, TXGBE_RDB_FDIR_CMD);
166 }
167
168 /**
169 * txgbe_fdir_add_signature_filter - Adds a signature hash filter
170 * @wx: pointer to hardware structure
171 * @input: unique input dword
172 * @common: compressed common input dword
173 * @queue: queue index to direct traffic to
174 *
175 * @return: 0 on success and negative on failure
176 **/
txgbe_fdir_add_signature_filter(struct wx * wx,union txgbe_atr_hash_dword input,union txgbe_atr_hash_dword common,u8 queue)177 static int txgbe_fdir_add_signature_filter(struct wx *wx,
178 union txgbe_atr_hash_dword input,
179 union txgbe_atr_hash_dword common,
180 u8 queue)
181 {
182 u32 fdirhashcmd, fdircmd;
183 u8 flow_type;
184 int err;
185
186 /* Get the flow_type in order to program FDIRCMD properly
187 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
188 * fifth is FDIRCMD.TUNNEL_FILTER
189 */
190 flow_type = input.formatted.flow_type;
191 switch (flow_type) {
192 case TXGBE_ATR_FLOW_TYPE_TCPV4:
193 case TXGBE_ATR_FLOW_TYPE_UDPV4:
194 case TXGBE_ATR_FLOW_TYPE_SCTPV4:
195 case TXGBE_ATR_FLOW_TYPE_TCPV6:
196 case TXGBE_ATR_FLOW_TYPE_UDPV6:
197 case TXGBE_ATR_FLOW_TYPE_SCTPV6:
198 break;
199 default:
200 wx_err(wx, "Error on flow type input\n");
201 return -EINVAL;
202 }
203
204 /* configure FDIRCMD register */
205 fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
206 TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
207 TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
208 fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(flow_type);
209 fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
210
211 txgbe_atr_compute_sig_hash(input, common, &fdirhashcmd);
212 fdirhashcmd |= TXGBE_RDB_FDIR_HASH_BUCKET_VALID;
213 wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhashcmd);
214 wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
215
216 wx_dbg(wx, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
217
218 err = txgbe_fdir_check_cmd_complete(wx);
219 if (err)
220 wx_err(wx, "Flow Director command did not complete!\n");
221
222 return err;
223 }
224
txgbe_atr(struct wx_ring * ring,struct wx_tx_buffer * first,u8 ptype)225 void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
226 {
227 union txgbe_atr_hash_dword common = { .dword = 0 };
228 union txgbe_atr_hash_dword input = { .dword = 0 };
229 struct wx_q_vector *q_vector = ring->q_vector;
230 struct wx_dec_ptype dptype;
231 union network_header {
232 struct ipv6hdr *ipv6;
233 struct iphdr *ipv4;
234 void *raw;
235 } hdr;
236 struct tcphdr *th;
237
238 /* if ring doesn't have a interrupt vector, cannot perform ATR */
239 if (!q_vector)
240 return;
241
242 ring->atr_count++;
243 dptype = wx_decode_ptype(ptype);
244 if (dptype.etype) {
245 if (WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
246 return;
247 hdr.raw = (void *)skb_inner_network_header(first->skb);
248 th = inner_tcp_hdr(first->skb);
249 } else {
250 if (WX_PTYPE_PKT(ptype) != WX_PTYPE_PKT_IP ||
251 WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
252 return;
253 hdr.raw = (void *)skb_network_header(first->skb);
254 th = tcp_hdr(first->skb);
255 }
256
257 /* skip this packet since it is invalid or the socket is closing */
258 if (!th || th->fin)
259 return;
260
261 /* sample on all syn packets or once every atr sample count */
262 if (!th->syn && ring->atr_count < ring->atr_sample_rate)
263 return;
264
265 /* reset sample count */
266 ring->atr_count = 0;
267
268 /* src and dst are inverted, think how the receiver sees them
269 *
270 * The input is broken into two sections, a non-compressed section
271 * containing vm_pool, vlan_id, and flow_type. The rest of the data
272 * is XORed together and stored in the compressed dword.
273 */
274 input.formatted.vlan_id = htons((u16)ptype);
275
276 /* since src port and flex bytes occupy the same word XOR them together
277 * and write the value to source port portion of compressed dword
278 */
279 if (first->tx_flags & WX_TX_FLAGS_SW_VLAN)
280 common.port.src ^= th->dest ^ first->skb->protocol;
281 else if (first->tx_flags & WX_TX_FLAGS_HW_VLAN)
282 common.port.src ^= th->dest ^ first->skb->vlan_proto;
283 else
284 common.port.src ^= th->dest ^ first->protocol;
285 common.port.dst ^= th->source;
286
287 if (WX_PTYPE_PKT_IPV6 & WX_PTYPE_PKT(ptype)) {
288 input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6;
289 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
290 hdr.ipv6->saddr.s6_addr32[1] ^
291 hdr.ipv6->saddr.s6_addr32[2] ^
292 hdr.ipv6->saddr.s6_addr32[3] ^
293 hdr.ipv6->daddr.s6_addr32[0] ^
294 hdr.ipv6->daddr.s6_addr32[1] ^
295 hdr.ipv6->daddr.s6_addr32[2] ^
296 hdr.ipv6->daddr.s6_addr32[3];
297 } else {
298 input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
299 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
300 }
301
302 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
303 txgbe_fdir_add_signature_filter(q_vector->wx, input, common,
304 ring->queue_index);
305 }
306
txgbe_fdir_set_input_mask(struct wx * wx,union txgbe_atr_input * input_mask)307 int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
308 {
309 u32 fdirm = 0, fdirtcpm = 0, flex = 0;
310 int index, offset;
311
312 /* Program the relevant mask registers. If src/dst_port or src/dst_addr
313 * are zero, then assume a full mask for that field. Also assume that
314 * a VLAN of 0 is unspecified, so mask that out as well. L4type
315 * cannot be masked out in this implementation.
316 *
317 * This also assumes IPv4 only. IPv6 masking isn't supported at this
318 * point in time.
319 */
320
321 /* verify bucket hash is cleared on hash generation */
322 if (input_mask->formatted.bkt_hash)
323 wx_dbg(wx, "bucket hash should always be 0 in mask\n");
324
325 /* Program FDIRM and verify partial masks */
326 switch (input_mask->formatted.vm_pool & 0x7F) {
327 case 0x0:
328 fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL;
329 break;
330 case 0x7F:
331 break;
332 default:
333 wx_err(wx, "Error on vm pool mask\n");
334 return -EINVAL;
335 }
336
337 switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) {
338 case 0x0:
339 fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P;
340 if (input_mask->formatted.dst_port ||
341 input_mask->formatted.src_port) {
342 wx_err(wx, "Error on src/dst port mask\n");
343 return -EINVAL;
344 }
345 break;
346 case TXGBE_ATR_L4TYPE_MASK:
347 break;
348 default:
349 wx_err(wx, "Error on flow type mask\n");
350 return -EINVAL;
351 }
352
353 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
354 wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
355
356 index = VMDQ_P(0) / 4;
357 offset = VMDQ_P(0) % 4;
358 flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
359 flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
360 flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
361 TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
362
363 switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
364 case 0x0000:
365 /* Mask Flex Bytes */
366 flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << (offset * 8);
367 break;
368 case 0xFFFF:
369 break;
370 default:
371 wx_err(wx, "Error on flexible byte mask\n");
372 return -EINVAL;
373 }
374 wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
375
376 /* store the TCP/UDP port masks, bit reversed from port layout */
377 fdirtcpm = ntohs(input_mask->formatted.dst_port);
378 fdirtcpm <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
379 fdirtcpm |= ntohs(input_mask->formatted.src_port);
380
381 /* write both the same so that UDP and TCP use the same mask */
382 wr32(wx, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm);
383 wr32(wx, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm);
384 wr32(wx, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm);
385
386 /* store source and destination IP masks (little-enian) */
387 wr32(wx, TXGBE_RDB_FDIR_SA4_MSK,
388 ntohl(~input_mask->formatted.src_ip[0]));
389 wr32(wx, TXGBE_RDB_FDIR_DA4_MSK,
390 ntohl(~input_mask->formatted.dst_ip[0]));
391
392 return 0;
393 }
394
txgbe_fdir_write_perfect_filter(struct wx * wx,union txgbe_atr_input * input,u16 soft_id,u8 queue)395 int txgbe_fdir_write_perfect_filter(struct wx *wx,
396 union txgbe_atr_input *input,
397 u16 soft_id, u8 queue)
398 {
399 u32 fdirport, fdirvlan, fdirhash, fdircmd;
400 int err = 0;
401
402 /* currently IPv6 is not supported, must be programmed with 0 */
403 wr32(wx, TXGBE_RDB_FDIR_IP6(2), ntohl(input->formatted.src_ip[0]));
404 wr32(wx, TXGBE_RDB_FDIR_IP6(1), ntohl(input->formatted.src_ip[1]));
405 wr32(wx, TXGBE_RDB_FDIR_IP6(0), ntohl(input->formatted.src_ip[2]));
406
407 /* record the source address (little-endian) */
408 wr32(wx, TXGBE_RDB_FDIR_SA, ntohl(input->formatted.src_ip[0]));
409
410 /* record the first 32 bits of the destination address
411 * (little-endian)
412 */
413 wr32(wx, TXGBE_RDB_FDIR_DA, ntohl(input->formatted.dst_ip[0]));
414
415 /* record source and destination port (little-endian)*/
416 fdirport = ntohs(input->formatted.dst_port);
417 fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
418 fdirport |= ntohs(input->formatted.src_port);
419 wr32(wx, TXGBE_RDB_FDIR_PORT, fdirport);
420
421 /* record packet type and flex_bytes (little-endian) */
422 fdirvlan = ntohs(input->formatted.flex_bytes);
423 fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT;
424 fdirvlan |= ntohs(input->formatted.vlan_id);
425 wr32(wx, TXGBE_RDB_FDIR_FLEX, fdirvlan);
426
427 /* configure FDIRHASH register */
428 fdirhash = (__force u32)input->formatted.bkt_hash |
429 TXGBE_RDB_FDIR_HASH_BUCKET_VALID |
430 TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
431 wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
432
433 /* flush all previous writes to make certain registers are
434 * programmed prior to issuing the command
435 */
436 WX_WRITE_FLUSH(wx);
437
438 /* configure FDIRCMD register */
439 fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
440 TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
441 TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
442 if (queue == TXGBE_RDB_FDIR_DROP_QUEUE)
443 fdircmd |= TXGBE_RDB_FDIR_CMD_DROP;
444 fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(input->formatted.flow_type);
445 fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
446 fdircmd |= TXGBE_RDB_FDIR_CMD_VT_POOL(input->formatted.vm_pool);
447
448 wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
449 err = txgbe_fdir_check_cmd_complete(wx);
450 if (err)
451 wx_err(wx, "Flow Director command did not complete!\n");
452
453 return err;
454 }
455
txgbe_fdir_erase_perfect_filter(struct wx * wx,union txgbe_atr_input * input,u16 soft_id)456 int txgbe_fdir_erase_perfect_filter(struct wx *wx,
457 union txgbe_atr_input *input,
458 u16 soft_id)
459 {
460 u32 fdirhash, fdircmd;
461 int err = 0;
462
463 /* configure FDIRHASH register */
464 fdirhash = (__force u32)input->formatted.bkt_hash;
465 fdirhash |= TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
466 wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
467
468 /* flush hash to HW */
469 WX_WRITE_FLUSH(wx);
470
471 /* Query if filter is present */
472 wr32(wx, TXGBE_RDB_FDIR_CMD, TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT);
473
474 err = txgbe_fdir_check_cmd_complete(wx);
475 if (err) {
476 wx_err(wx, "Flow Director command did not complete!\n");
477 return err;
478 }
479
480 fdircmd = rd32(wx, TXGBE_RDB_FDIR_CMD);
481 /* if filter exists in hardware then remove it */
482 if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) {
483 wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
484 WX_WRITE_FLUSH(wx);
485 wr32(wx, TXGBE_RDB_FDIR_CMD,
486 TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW);
487 }
488
489 return 0;
490 }
491
492 /**
493 * txgbe_fdir_enable - Initialize Flow Director control registers
494 * @wx: pointer to hardware structure
495 * @fdirctrl: value to write to flow director control register
496 **/
txgbe_fdir_enable(struct wx * wx,u32 fdirctrl)497 static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
498 {
499 u32 val;
500 int ret;
501
502 /* Prime the keys for hashing */
503 wr32(wx, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY);
504 wr32(wx, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
505
506 wr32(wx, TXGBE_RDB_FDIR_CTL, fdirctrl);
507 WX_WRITE_FLUSH(wx);
508 ret = read_poll_timeout(rd32, val, val & TXGBE_RDB_FDIR_CTL_INIT_DONE,
509 1000, 10000, false, wx, TXGBE_RDB_FDIR_CTL);
510
511 if (ret < 0)
512 wx_dbg(wx, "Flow Director poll time exceeded!\n");
513 }
514
515 /**
516 * txgbe_init_fdir_signature -Initialize Flow Director sig filters
517 * @wx: pointer to hardware structure
518 **/
txgbe_init_fdir_signature(struct wx * wx)519 static void txgbe_init_fdir_signature(struct wx *wx)
520 {
521 u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
522 int index = VMDQ_P(0) / 4;
523 int offset = VMDQ_P(0) % 4;
524 u32 flex = 0;
525
526 flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
527 flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
528
529 flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
530 TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
531 wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
532
533 /* Continue setup of fdirctrl register bits:
534 * Move the flexible bytes to use the ethertype - shift 6 words
535 * Set the maximum length per hash bucket to 0xA filters
536 * Send interrupt when 64 filters are left
537 */
538 fdirctrl |= TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
539 TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
540 TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
541
542 /* write hashes and fdirctrl register, poll for completion */
543 txgbe_fdir_enable(wx, fdirctrl);
544 }
545
546 /**
547 * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters
548 * @wx: pointer to hardware structure
549 **/
txgbe_init_fdir_perfect(struct wx * wx)550 static void txgbe_init_fdir_perfect(struct wx *wx)
551 {
552 u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
553
554 /* Continue setup of fdirctrl register bits:
555 * Turn perfect match filtering on
556 * Report hash in RSS field of Rx wb descriptor
557 * Initialize the drop queue
558 * Move the flexible bytes to use the ethertype - shift 6 words
559 * Set the maximum length per hash bucket to 0xA filters
560 * Send interrupt when 64 (0x4 * 16) filters are left
561 */
562 fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH |
563 TXGBE_RDB_FDIR_CTL_DROP_Q(TXGBE_RDB_FDIR_DROP_QUEUE) |
564 TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
565 TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
566 TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
567
568 /* write hashes and fdirctrl register, poll for completion */
569 txgbe_fdir_enable(wx, fdirctrl);
570 }
571
txgbe_fdir_filter_restore(struct wx * wx)572 static void txgbe_fdir_filter_restore(struct wx *wx)
573 {
574 struct txgbe_fdir_filter *filter;
575 struct txgbe *txgbe = wx->priv;
576 struct hlist_node *node;
577 u8 queue = 0;
578 int ret = 0;
579
580 spin_lock(&txgbe->fdir_perfect_lock);
581
582 if (!hlist_empty(&txgbe->fdir_filter_list))
583 ret = txgbe_fdir_set_input_mask(wx, &txgbe->fdir_mask);
584
585 if (ret)
586 goto unlock;
587
588 hlist_for_each_entry_safe(filter, node,
589 &txgbe->fdir_filter_list, fdir_node) {
590 if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) {
591 queue = TXGBE_RDB_FDIR_DROP_QUEUE;
592 } else {
593 u32 ring = ethtool_get_flow_spec_ring(filter->action);
594
595 if (ring >= wx->num_rx_queues) {
596 wx_err(wx, "FDIR restore failed, ring:%u\n",
597 ring);
598 continue;
599 }
600
601 /* Map the ring onto the absolute queue index */
602 queue = wx->rx_ring[ring]->reg_idx;
603 }
604
605 ret = txgbe_fdir_write_perfect_filter(wx,
606 &filter->filter,
607 filter->sw_idx,
608 queue);
609 if (ret)
610 wx_err(wx, "FDIR restore failed, index:%u\n",
611 filter->sw_idx);
612 }
613
614 unlock:
615 spin_unlock(&txgbe->fdir_perfect_lock);
616 }
617
txgbe_configure_fdir(struct wx * wx)618 void txgbe_configure_fdir(struct wx *wx)
619 {
620 wx_disable_sec_rx_path(wx);
621
622 if (test_bit(WX_FLAG_FDIR_HASH, wx->flags)) {
623 txgbe_init_fdir_signature(wx);
624 } else if (test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) {
625 txgbe_init_fdir_perfect(wx);
626 txgbe_fdir_filter_restore(wx);
627 }
628
629 wx_enable_sec_rx_path(wx);
630 }
631
txgbe_fdir_filter_exit(struct wx * wx)632 void txgbe_fdir_filter_exit(struct wx *wx)
633 {
634 struct txgbe_fdir_filter *filter;
635 struct txgbe *txgbe = wx->priv;
636 struct hlist_node *node;
637
638 spin_lock(&txgbe->fdir_perfect_lock);
639
640 hlist_for_each_entry_safe(filter, node,
641 &txgbe->fdir_filter_list, fdir_node) {
642 hlist_del(&filter->fdir_node);
643 kfree(filter);
644 }
645 txgbe->fdir_filter_count = 0;
646
647 spin_unlock(&txgbe->fdir_perfect_lock);
648 }
649