1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/rhashtable.h>
10 #include <net/ipv6.h>
11 #include <net/pkt_cls.h>
12
13 #include "airoha_npu.h"
14 #include "airoha_regs.h"
15 #include "airoha_eth.h"
16
17 static DEFINE_MUTEX(flow_offload_mutex);
18 static DEFINE_SPINLOCK(ppe_lock);
19
20 static const struct rhashtable_params airoha_flow_table_params = {
21 .head_offset = offsetof(struct airoha_flow_table_entry, node),
22 .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
23 .key_len = sizeof(unsigned long),
24 .automatic_shrinking = true,
25 };
26
airoha_ppe2_is_enabled(struct airoha_eth * eth)27 static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
28 {
29 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
30 }
31
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)32 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
33 {
34 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
35
36 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
37 }
38
airoha_ppe_hw_init(struct airoha_ppe * ppe)39 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
40 {
41 u32 sram_tb_size, sram_num_entries, dram_num_entries;
42 struct airoha_eth *eth = ppe->eth;
43 int i;
44
45 sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
46 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
47
48 for (i = 0; i < PPE_NUM; i++) {
49 int p;
50
51 airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
52 ppe->foe_dma + sram_tb_size);
53
54 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
55 PPE_BIND_AGE0_DELTA_NON_L4 |
56 PPE_BIND_AGE0_DELTA_UDP,
57 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
58 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
59 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
60 PPE_BIND_AGE1_DELTA_TCP_FIN |
61 PPE_BIND_AGE1_DELTA_TCP,
62 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
63 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
64
65 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
66 PPE_SRAM_TABLE_EN_MASK |
67 PPE_SRAM_HASH1_EN_MASK |
68 PPE_DRAM_TABLE_EN_MASK |
69 PPE_SRAM_HASH0_MODE_MASK |
70 PPE_SRAM_HASH1_MODE_MASK |
71 PPE_DRAM_HASH0_MODE_MASK |
72 PPE_DRAM_HASH1_MODE_MASK,
73 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
74 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
75 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
76 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
77
78 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
79 PPE_TB_CFG_SEARCH_MISS_MASK |
80 PPE_TB_ENTRY_SIZE_MASK,
81 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
82 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
83
84 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
85
86 for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
87 airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
88 FP0_EGRESS_MTU_MASK |
89 FP1_EGRESS_MTU_MASK,
90 FIELD_PREP(FP0_EGRESS_MTU_MASK,
91 AIROHA_MAX_MTU) |
92 FIELD_PREP(FP1_EGRESS_MTU_MASK,
93 AIROHA_MAX_MTU));
94 }
95
96 if (airoha_ppe2_is_enabled(eth)) {
97 sram_num_entries =
98 PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
99 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
100 PPE_SRAM_TB_NUM_ENTRY_MASK |
101 PPE_DRAM_TB_NUM_ENTRY_MASK,
102 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
103 sram_num_entries) |
104 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
105 dram_num_entries));
106 airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
107 PPE_SRAM_TB_NUM_ENTRY_MASK |
108 PPE_DRAM_TB_NUM_ENTRY_MASK,
109 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
110 sram_num_entries) |
111 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
112 dram_num_entries));
113 } else {
114 sram_num_entries =
115 PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
116 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
117 PPE_SRAM_TB_NUM_ENTRY_MASK |
118 PPE_DRAM_TB_NUM_ENTRY_MASK,
119 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
120 sram_num_entries) |
121 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
122 dram_num_entries));
123 }
124 }
125
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)126 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
127 {
128 void *dest = eth + act->mangle.offset;
129 const void *src = &act->mangle.val;
130
131 if (act->mangle.offset > 8)
132 return;
133
134 if (act->mangle.mask == 0xffff) {
135 src += 2;
136 dest += 2;
137 }
138
139 memcpy(dest, src, act->mangle.mask ? 2 : 4);
140 }
141
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)142 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
143 struct airoha_flow_data *data)
144 {
145 u32 val = be32_to_cpu((__force __be32)act->mangle.val);
146
147 switch (act->mangle.offset) {
148 case 0:
149 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
150 data->dst_port = cpu_to_be16(val);
151 else
152 data->src_port = cpu_to_be16(val >> 16);
153 break;
154 case 2:
155 data->dst_port = cpu_to_be16(val);
156 break;
157 default:
158 return -EINVAL;
159 }
160
161 return 0;
162 }
163
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)164 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
165 struct airoha_flow_data *data)
166 {
167 __be32 *dest;
168
169 switch (act->mangle.offset) {
170 case offsetof(struct iphdr, saddr):
171 dest = &data->v4.src_addr;
172 break;
173 case offsetof(struct iphdr, daddr):
174 dest = &data->v4.dst_addr;
175 break;
176 default:
177 return -EINVAL;
178 }
179
180 memcpy(dest, &act->mangle.val, sizeof(u32));
181
182 return 0;
183 }
184
airoha_get_dsa_port(struct net_device ** dev)185 static int airoha_get_dsa_port(struct net_device **dev)
186 {
187 #if IS_ENABLED(CONFIG_NET_DSA)
188 struct dsa_port *dp = dsa_port_from_netdev(*dev);
189
190 if (IS_ERR(dp))
191 return -ENODEV;
192
193 *dev = dsa_port_to_conduit(dp);
194 return dp->index;
195 #else
196 return -ENODEV;
197 #endif
198 }
199
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)200 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
201 struct airoha_foe_entry *hwe,
202 struct net_device *dev, int type,
203 struct airoha_flow_data *data,
204 int l4proto)
205 {
206 int dsa_port = airoha_get_dsa_port(&dev);
207 struct airoha_foe_mac_info_common *l2;
208 u32 qdata, ports_pad, val;
209
210 memset(hwe, 0, sizeof(*hwe));
211
212 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
213 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
214 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
215 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
216 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
217 AIROHA_FOE_IB1_BIND_TTL;
218 hwe->ib1 = val;
219
220 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
221 AIROHA_FOE_IB2_PSE_QOS;
222 if (dsa_port >= 0)
223 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
224
225 if (dev) {
226 struct airoha_gdm_port *port = netdev_priv(dev);
227 u8 pse_port;
228
229 if (!airoha_is_valid_gdm_port(eth, port))
230 return -EINVAL;
231
232 if (dsa_port >= 0)
233 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
234 else
235 pse_port = 2; /* uplink relies on GDM2 loopback */
236 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
237 }
238
239 if (is_multicast_ether_addr(data->eth.h_dest))
240 val |= AIROHA_FOE_IB2_MULTICAST;
241
242 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
243 if (type == PPE_PKT_TYPE_IPV4_ROUTE)
244 hwe->ipv4.orig_tuple.ports = ports_pad;
245 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
246 hwe->ipv6.ports = ports_pad;
247
248 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
249 if (type == PPE_PKT_TYPE_BRIDGE) {
250 hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
251 hwe->bridge.dest_mac_lo =
252 get_unaligned_be16(data->eth.h_dest + 4);
253 hwe->bridge.src_mac_hi =
254 get_unaligned_be16(data->eth.h_source);
255 hwe->bridge.src_mac_lo =
256 get_unaligned_be32(data->eth.h_source + 2);
257 hwe->bridge.data = qdata;
258 hwe->bridge.ib2 = val;
259 l2 = &hwe->bridge.l2.common;
260 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
261 hwe->ipv6.data = qdata;
262 hwe->ipv6.ib2 = val;
263 l2 = &hwe->ipv6.l2;
264 } else {
265 hwe->ipv4.data = qdata;
266 hwe->ipv4.ib2 = val;
267 l2 = &hwe->ipv4.l2.common;
268 }
269
270 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
271 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
272 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
273 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
274 hwe->ipv4.l2.src_mac_lo =
275 get_unaligned_be16(data->eth.h_source + 4);
276 } else {
277 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
278 }
279
280 if (data->vlan.num) {
281 l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
282 l2->vlan1 = data->vlan.hdr[0].id;
283 if (data->vlan.num == 2)
284 l2->vlan2 = data->vlan.hdr[1].id;
285 } else if (dsa_port >= 0) {
286 l2->etype = BIT(15) | BIT(dsa_port);
287 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
288 l2->etype = ETH_P_IPV6;
289 } else {
290 l2->etype = ETH_P_IP;
291 }
292
293 return 0;
294 }
295
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)296 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
297 struct airoha_flow_data *data,
298 bool egress)
299 {
300 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
301 struct airoha_foe_ipv4_tuple *t;
302
303 switch (type) {
304 case PPE_PKT_TYPE_IPV4_HNAPT:
305 if (egress) {
306 t = &hwe->ipv4.new_tuple;
307 break;
308 }
309 fallthrough;
310 case PPE_PKT_TYPE_IPV4_DSLITE:
311 case PPE_PKT_TYPE_IPV4_ROUTE:
312 t = &hwe->ipv4.orig_tuple;
313 break;
314 default:
315 WARN_ON_ONCE(1);
316 return -EINVAL;
317 }
318
319 t->src_ip = be32_to_cpu(data->v4.src_addr);
320 t->dest_ip = be32_to_cpu(data->v4.dst_addr);
321
322 if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
323 t->src_port = be16_to_cpu(data->src_port);
324 t->dest_port = be16_to_cpu(data->dst_port);
325 }
326
327 return 0;
328 }
329
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)330 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
331 struct airoha_flow_data *data)
332
333 {
334 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
335 u32 *src, *dest;
336
337 switch (type) {
338 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
339 case PPE_PKT_TYPE_IPV6_6RD:
340 hwe->ipv6.src_port = be16_to_cpu(data->src_port);
341 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
342 fallthrough;
343 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
344 src = hwe->ipv6.src_ip;
345 dest = hwe->ipv6.dest_ip;
346 break;
347 default:
348 WARN_ON_ONCE(1);
349 return -EINVAL;
350 }
351
352 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
353 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
354
355 return 0;
356 }
357
airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry * hwe)358 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
359 {
360 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
361 u32 hash, hv1, hv2, hv3;
362
363 switch (type) {
364 case PPE_PKT_TYPE_IPV4_ROUTE:
365 case PPE_PKT_TYPE_IPV4_HNAPT:
366 hv1 = hwe->ipv4.orig_tuple.ports;
367 hv2 = hwe->ipv4.orig_tuple.dest_ip;
368 hv3 = hwe->ipv4.orig_tuple.src_ip;
369 break;
370 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
371 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
372 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
373 hv1 ^= hwe->ipv6.ports;
374
375 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
376 hv2 ^= hwe->ipv6.dest_ip[0];
377
378 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
379 hv3 ^= hwe->ipv6.src_ip[0];
380 break;
381 case PPE_PKT_TYPE_IPV4_DSLITE:
382 case PPE_PKT_TYPE_IPV6_6RD:
383 default:
384 WARN_ON_ONCE(1);
385 return PPE_HASH_MASK;
386 }
387
388 hash = (hv1 & hv2) | ((~hv1) & hv3);
389 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
390 hash ^= hv1 ^ hv2 ^ hv3;
391 hash ^= hash >> 16;
392 hash &= PPE_NUM_ENTRIES - 1;
393
394 return hash;
395 }
396
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)397 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
398 u32 hash)
399 {
400 if (hash < PPE_SRAM_NUM_ENTRIES) {
401 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
402 struct airoha_eth *eth = ppe->eth;
403 bool ppe2;
404 u32 val;
405 int i;
406
407 ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
408 hash >= PPE1_SRAM_NUM_ENTRIES;
409 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
410 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
411 PPE_SRAM_CTRL_REQ_MASK);
412 if (read_poll_timeout_atomic(airoha_fe_rr, val,
413 val & PPE_SRAM_CTRL_ACK_MASK,
414 10, 100, false, eth,
415 REG_PPE_RAM_CTRL(ppe2)))
416 return NULL;
417
418 for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
419 hwe[i] = airoha_fe_rr(eth,
420 REG_PPE_RAM_ENTRY(ppe2, i));
421 }
422
423 return ppe->foe + hash * sizeof(struct airoha_foe_entry);
424 }
425
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)426 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
427 struct airoha_foe_entry *hwe)
428 {
429 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
430 int len;
431
432 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
433 return false;
434
435 if (type > PPE_PKT_TYPE_IPV4_DSLITE)
436 len = offsetof(struct airoha_foe_entry, ipv6.data);
437 else
438 len = offsetof(struct airoha_foe_entry, ipv4.ib2);
439
440 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
441 }
442
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash)443 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
444 struct airoha_foe_entry *e,
445 u32 hash)
446 {
447 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
448 u32 ts = airoha_ppe_get_timestamp(ppe);
449 struct airoha_eth *eth = ppe->eth;
450
451 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
452 wmb();
453
454 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
455 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
456 hwe->ib1 = e->ib1;
457
458 if (hash < PPE_SRAM_NUM_ENTRIES) {
459 dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
460 bool ppe2 = airoha_ppe2_is_enabled(eth) &&
461 hash >= PPE1_SRAM_NUM_ENTRIES;
462 struct airoha_npu *npu;
463 int err = -ENODEV;
464
465 rcu_read_lock();
466 npu = rcu_dereference(eth->npu);
467 if (npu)
468 err = npu->ops.ppe_foe_commit_entry(npu, addr,
469 sizeof(*hwe), hash,
470 ppe2);
471 rcu_read_unlock();
472
473 return err;
474 }
475
476 return 0;
477 }
478
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,u32 hash)479 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
480 {
481 struct airoha_flow_table_entry *e;
482 struct airoha_foe_entry *hwe;
483 struct hlist_node *n;
484 u32 index, state;
485
486 spin_lock_bh(&ppe_lock);
487
488 hwe = airoha_ppe_foe_get_entry(ppe, hash);
489 if (!hwe)
490 goto unlock;
491
492 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
493 if (state == AIROHA_FOE_STATE_BIND)
494 goto unlock;
495
496 index = airoha_ppe_foe_get_entry_hash(hwe);
497 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
498 if (airoha_ppe_foe_compare_entry(e, hwe)) {
499 airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
500 e->hash = hash;
501 break;
502 }
503 }
504 unlock:
505 spin_unlock_bh(&ppe_lock);
506 }
507
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)508 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
509 struct airoha_flow_table_entry *e)
510 {
511 u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
512
513 e->hash = 0xffff;
514
515 spin_lock_bh(&ppe_lock);
516 hlist_add_head(&e->list, &ppe->foe_flow[hash]);
517 spin_unlock_bh(&ppe_lock);
518
519 return 0;
520 }
521
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)522 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
523 struct airoha_flow_table_entry *e)
524 {
525 spin_lock_bh(&ppe_lock);
526
527 hlist_del_init(&e->list);
528 if (e->hash != 0xffff) {
529 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
530 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
531 AIROHA_FOE_STATE_INVALID);
532 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
533 e->hash = 0xffff;
534 }
535
536 spin_unlock_bh(&ppe_lock);
537 }
538
airoha_ppe_flow_offload_replace(struct airoha_gdm_port * port,struct flow_cls_offload * f)539 static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
540 struct flow_cls_offload *f)
541 {
542 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
543 struct airoha_eth *eth = port->qdma->eth;
544 struct airoha_flow_table_entry *e;
545 struct airoha_flow_data data = {};
546 struct net_device *odev = NULL;
547 struct flow_action_entry *act;
548 struct airoha_foe_entry hwe;
549 int err, i, offload_type;
550 u16 addr_type = 0;
551 u8 l4proto = 0;
552
553 if (rhashtable_lookup(ð->flow_table, &f->cookie,
554 airoha_flow_table_params))
555 return -EEXIST;
556
557 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
558 return -EOPNOTSUPP;
559
560 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
561 struct flow_match_control match;
562
563 flow_rule_match_control(rule, &match);
564 addr_type = match.key->addr_type;
565 if (flow_rule_has_control_flags(match.mask->flags,
566 f->common.extack))
567 return -EOPNOTSUPP;
568 } else {
569 return -EOPNOTSUPP;
570 }
571
572 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
573 struct flow_match_basic match;
574
575 flow_rule_match_basic(rule, &match);
576 l4proto = match.key->ip_proto;
577 } else {
578 return -EOPNOTSUPP;
579 }
580
581 switch (addr_type) {
582 case 0:
583 offload_type = PPE_PKT_TYPE_BRIDGE;
584 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
585 struct flow_match_eth_addrs match;
586
587 flow_rule_match_eth_addrs(rule, &match);
588 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
589 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
590 } else {
591 return -EOPNOTSUPP;
592 }
593 break;
594 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
595 offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
596 break;
597 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
598 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
599 break;
600 default:
601 return -EOPNOTSUPP;
602 }
603
604 flow_action_for_each(i, act, &rule->action) {
605 switch (act->id) {
606 case FLOW_ACTION_MANGLE:
607 if (offload_type == PPE_PKT_TYPE_BRIDGE)
608 return -EOPNOTSUPP;
609
610 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
611 airoha_ppe_flow_mangle_eth(act, &data.eth);
612 break;
613 case FLOW_ACTION_REDIRECT:
614 odev = act->dev;
615 break;
616 case FLOW_ACTION_CSUM:
617 break;
618 case FLOW_ACTION_VLAN_PUSH:
619 if (data.vlan.num == 2 ||
620 act->vlan.proto != htons(ETH_P_8021Q))
621 return -EOPNOTSUPP;
622
623 data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
624 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
625 data.vlan.num++;
626 break;
627 case FLOW_ACTION_VLAN_POP:
628 break;
629 case FLOW_ACTION_PPPOE_PUSH:
630 break;
631 default:
632 return -EOPNOTSUPP;
633 }
634 }
635
636 if (!is_valid_ether_addr(data.eth.h_source) ||
637 !is_valid_ether_addr(data.eth.h_dest))
638 return -EINVAL;
639
640 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
641 &data, l4proto);
642 if (err)
643 return err;
644
645 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
646 struct flow_match_ports ports;
647
648 if (offload_type == PPE_PKT_TYPE_BRIDGE)
649 return -EOPNOTSUPP;
650
651 flow_rule_match_ports(rule, &ports);
652 data.src_port = ports.key->src;
653 data.dst_port = ports.key->dst;
654 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
655 return -EOPNOTSUPP;
656 }
657
658 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
659 struct flow_match_ipv4_addrs addrs;
660
661 flow_rule_match_ipv4_addrs(rule, &addrs);
662 data.v4.src_addr = addrs.key->src;
663 data.v4.dst_addr = addrs.key->dst;
664 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
665 }
666
667 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
668 struct flow_match_ipv6_addrs addrs;
669
670 flow_rule_match_ipv6_addrs(rule, &addrs);
671
672 data.v6.src_addr = addrs.key->src;
673 data.v6.dst_addr = addrs.key->dst;
674 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
675 }
676
677 flow_action_for_each(i, act, &rule->action) {
678 if (act->id != FLOW_ACTION_MANGLE)
679 continue;
680
681 if (offload_type == PPE_PKT_TYPE_BRIDGE)
682 return -EOPNOTSUPP;
683
684 switch (act->mangle.htype) {
685 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
686 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
687 err = airoha_ppe_flow_mangle_ports(act, &data);
688 break;
689 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
690 err = airoha_ppe_flow_mangle_ipv4(act, &data);
691 break;
692 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
693 /* handled earlier */
694 break;
695 default:
696 return -EOPNOTSUPP;
697 }
698
699 if (err)
700 return err;
701 }
702
703 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
704 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
705 if (err)
706 return err;
707 }
708
709 e = kzalloc(sizeof(*e), GFP_KERNEL);
710 if (!e)
711 return -ENOMEM;
712
713 e->cookie = f->cookie;
714 memcpy(&e->data, &hwe, sizeof(e->data));
715
716 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
717 if (err)
718 goto free_entry;
719
720 err = rhashtable_insert_fast(ð->flow_table, &e->node,
721 airoha_flow_table_params);
722 if (err < 0)
723 goto remove_foe_entry;
724
725 return 0;
726
727 remove_foe_entry:
728 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
729 free_entry:
730 kfree(e);
731
732 return err;
733 }
734
airoha_ppe_flow_offload_destroy(struct airoha_gdm_port * port,struct flow_cls_offload * f)735 static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
736 struct flow_cls_offload *f)
737 {
738 struct airoha_eth *eth = port->qdma->eth;
739 struct airoha_flow_table_entry *e;
740
741 e = rhashtable_lookup(ð->flow_table, &f->cookie,
742 airoha_flow_table_params);
743 if (!e)
744 return -ENOENT;
745
746 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
747 rhashtable_remove_fast(ð->flow_table, &e->node,
748 airoha_flow_table_params);
749 kfree(e);
750
751 return 0;
752 }
753
airoha_ppe_flow_offload_cmd(struct airoha_gdm_port * port,struct flow_cls_offload * f)754 static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
755 struct flow_cls_offload *f)
756 {
757 switch (f->command) {
758 case FLOW_CLS_REPLACE:
759 return airoha_ppe_flow_offload_replace(port, f);
760 case FLOW_CLS_DESTROY:
761 return airoha_ppe_flow_offload_destroy(port, f);
762 default:
763 break;
764 }
765
766 return -EOPNOTSUPP;
767 }
768
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe,struct airoha_npu * npu)769 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
770 struct airoha_npu *npu)
771 {
772 int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
773 struct airoha_foe_entry *hwe = ppe->foe;
774
775 if (airoha_ppe2_is_enabled(ppe->eth))
776 sram_num_entries = sram_num_entries / 2;
777
778 for (i = 0; i < sram_num_entries; i++)
779 memset(&hwe[i], 0, sizeof(*hwe));
780
781 return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
782 PPE_SRAM_NUM_ENTRIES);
783 }
784
airoha_ppe_npu_get(struct airoha_eth * eth)785 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
786 {
787 struct airoha_npu *npu = airoha_npu_get(eth->dev);
788
789 if (IS_ERR(npu)) {
790 request_module("airoha-npu");
791 npu = airoha_npu_get(eth->dev);
792 }
793
794 return npu;
795 }
796
airoha_ppe_offload_setup(struct airoha_eth * eth)797 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
798 {
799 struct airoha_npu *npu = airoha_ppe_npu_get(eth);
800 int err;
801
802 if (IS_ERR(npu))
803 return PTR_ERR(npu);
804
805 err = npu->ops.ppe_init(npu);
806 if (err)
807 goto error_npu_put;
808
809 airoha_ppe_hw_init(eth->ppe);
810 err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
811 if (err)
812 goto error_npu_put;
813
814 rcu_assign_pointer(eth->npu, npu);
815 synchronize_rcu();
816
817 return 0;
818
819 error_npu_put:
820 airoha_npu_put(npu);
821
822 return err;
823 }
824
airoha_ppe_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)825 int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
826 void *cb_priv)
827 {
828 struct flow_cls_offload *cls = type_data;
829 struct net_device *dev = cb_priv;
830 struct airoha_gdm_port *port = netdev_priv(dev);
831 struct airoha_eth *eth = port->qdma->eth;
832 int err = 0;
833
834 if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
835 return -EOPNOTSUPP;
836
837 mutex_lock(&flow_offload_mutex);
838
839 if (!eth->npu)
840 err = airoha_ppe_offload_setup(eth);
841 if (!err)
842 err = airoha_ppe_flow_offload_cmd(port, cls);
843
844 mutex_unlock(&flow_offload_mutex);
845
846 return err;
847 }
848
airoha_ppe_check_skb(struct airoha_ppe * ppe,u16 hash)849 void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
850 {
851 u16 now, diff;
852
853 if (hash > PPE_HASH_MASK)
854 return;
855
856 now = (u16)jiffies;
857 diff = now - ppe->foe_check_time[hash];
858 if (diff < HZ / 10)
859 return;
860
861 ppe->foe_check_time[hash] = now;
862 airoha_ppe_foe_insert_entry(ppe, hash);
863 }
864
airoha_ppe_init(struct airoha_eth * eth)865 int airoha_ppe_init(struct airoha_eth *eth)
866 {
867 struct airoha_ppe *ppe;
868 int foe_size, err;
869
870 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
871 if (!ppe)
872 return -ENOMEM;
873
874 foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
875 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
876 GFP_KERNEL);
877 if (!ppe->foe)
878 return -ENOMEM;
879
880 ppe->eth = eth;
881 eth->ppe = ppe;
882
883 ppe->foe_flow = devm_kzalloc(eth->dev,
884 PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
885 GFP_KERNEL);
886 if (!ppe->foe_flow)
887 return -ENOMEM;
888
889 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
890 if (err)
891 return err;
892
893 err = airoha_ppe_debugfs_init(ppe);
894 if (err)
895 rhashtable_destroy(ð->flow_table);
896
897 return err;
898 }
899
airoha_ppe_deinit(struct airoha_eth * eth)900 void airoha_ppe_deinit(struct airoha_eth *eth)
901 {
902 struct airoha_npu *npu;
903
904 rcu_read_lock();
905 npu = rcu_dereference(eth->npu);
906 if (npu) {
907 npu->ops.ppe_deinit(npu);
908 airoha_npu_put(npu);
909 }
910 rcu_read_unlock();
911
912 rhashtable_destroy(ð->flow_table);
913 debugfs_remove(eth->ppe->debugfs_dir);
914 }
915