1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/rhashtable.h>
10 #include <net/ipv6.h>
11 #include <net/pkt_cls.h>
12
13 #include "airoha_npu.h"
14 #include "airoha_regs.h"
15 #include "airoha_eth.h"
16
17 static DEFINE_MUTEX(flow_offload_mutex);
18 static DEFINE_SPINLOCK(ppe_lock);
19
20 static const struct rhashtable_params airoha_flow_table_params = {
21 .head_offset = offsetof(struct airoha_flow_table_entry, node),
22 .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
23 .key_len = sizeof(unsigned long),
24 .automatic_shrinking = true,
25 };
26
27 static const struct rhashtable_params airoha_l2_flow_table_params = {
28 .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
29 .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
30 .key_len = 2 * ETH_ALEN,
31 .automatic_shrinking = true,
32 };
33
airoha_ppe2_is_enabled(struct airoha_eth * eth)34 static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
35 {
36 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
37 }
38
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)39 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
40 {
41 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
42
43 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
44 }
45
airoha_ppe_hw_init(struct airoha_ppe * ppe)46 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
47 {
48 u32 sram_tb_size, sram_num_entries, dram_num_entries;
49 struct airoha_eth *eth = ppe->eth;
50 int i;
51
52 sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
53 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
54
55 for (i = 0; i < PPE_NUM; i++) {
56 int p;
57
58 airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
59 ppe->foe_dma + sram_tb_size);
60
61 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
62 PPE_BIND_AGE0_DELTA_NON_L4 |
63 PPE_BIND_AGE0_DELTA_UDP,
64 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
65 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
66 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
67 PPE_BIND_AGE1_DELTA_TCP_FIN |
68 PPE_BIND_AGE1_DELTA_TCP,
69 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
70 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
71
72 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
73 PPE_SRAM_TABLE_EN_MASK |
74 PPE_SRAM_HASH1_EN_MASK |
75 PPE_DRAM_TABLE_EN_MASK |
76 PPE_SRAM_HASH0_MODE_MASK |
77 PPE_SRAM_HASH1_MODE_MASK |
78 PPE_DRAM_HASH0_MODE_MASK |
79 PPE_DRAM_HASH1_MODE_MASK,
80 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
81 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
82 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
83 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
84
85 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
86 PPE_TB_CFG_SEARCH_MISS_MASK |
87 PPE_TB_CFG_KEEPALIVE_MASK |
88 PPE_TB_ENTRY_SIZE_MASK,
89 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
90 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
91
92 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
93
94 for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
95 airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
96 FP0_EGRESS_MTU_MASK |
97 FP1_EGRESS_MTU_MASK,
98 FIELD_PREP(FP0_EGRESS_MTU_MASK,
99 AIROHA_MAX_MTU) |
100 FIELD_PREP(FP1_EGRESS_MTU_MASK,
101 AIROHA_MAX_MTU));
102 }
103
104 if (airoha_ppe2_is_enabled(eth)) {
105 sram_num_entries =
106 PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
107 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
108 PPE_SRAM_TB_NUM_ENTRY_MASK |
109 PPE_DRAM_TB_NUM_ENTRY_MASK,
110 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
111 sram_num_entries) |
112 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
113 dram_num_entries));
114 airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
115 PPE_SRAM_TB_NUM_ENTRY_MASK |
116 PPE_DRAM_TB_NUM_ENTRY_MASK,
117 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
118 sram_num_entries) |
119 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
120 dram_num_entries));
121 } else {
122 sram_num_entries =
123 PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
124 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
125 PPE_SRAM_TB_NUM_ENTRY_MASK |
126 PPE_DRAM_TB_NUM_ENTRY_MASK,
127 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
128 sram_num_entries) |
129 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
130 dram_num_entries));
131 }
132 }
133
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)134 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
135 {
136 void *dest = eth + act->mangle.offset;
137 const void *src = &act->mangle.val;
138
139 if (act->mangle.offset > 8)
140 return;
141
142 if (act->mangle.mask == 0xffff) {
143 src += 2;
144 dest += 2;
145 }
146
147 memcpy(dest, src, act->mangle.mask ? 2 : 4);
148 }
149
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)150 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
151 struct airoha_flow_data *data)
152 {
153 u32 val = be32_to_cpu((__force __be32)act->mangle.val);
154
155 switch (act->mangle.offset) {
156 case 0:
157 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
158 data->dst_port = cpu_to_be16(val);
159 else
160 data->src_port = cpu_to_be16(val >> 16);
161 break;
162 case 2:
163 data->dst_port = cpu_to_be16(val);
164 break;
165 default:
166 return -EINVAL;
167 }
168
169 return 0;
170 }
171
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)172 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
173 struct airoha_flow_data *data)
174 {
175 __be32 *dest;
176
177 switch (act->mangle.offset) {
178 case offsetof(struct iphdr, saddr):
179 dest = &data->v4.src_addr;
180 break;
181 case offsetof(struct iphdr, daddr):
182 dest = &data->v4.dst_addr;
183 break;
184 default:
185 return -EINVAL;
186 }
187
188 memcpy(dest, &act->mangle.val, sizeof(u32));
189
190 return 0;
191 }
192
airoha_get_dsa_port(struct net_device ** dev)193 static int airoha_get_dsa_port(struct net_device **dev)
194 {
195 #if IS_ENABLED(CONFIG_NET_DSA)
196 struct dsa_port *dp = dsa_port_from_netdev(*dev);
197
198 if (IS_ERR(dp))
199 return -ENODEV;
200
201 *dev = dsa_port_to_conduit(dp);
202 return dp->index;
203 #else
204 return -ENODEV;
205 #endif
206 }
207
airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge * br,struct ethhdr * eh)208 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
209 struct ethhdr *eh)
210 {
211 br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
212 br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
213 br->src_mac_hi = get_unaligned_be16(eh->h_source);
214 br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
215 }
216
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)217 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
218 struct airoha_foe_entry *hwe,
219 struct net_device *dev, int type,
220 struct airoha_flow_data *data,
221 int l4proto)
222 {
223 int dsa_port = airoha_get_dsa_port(&dev);
224 struct airoha_foe_mac_info_common *l2;
225 u32 qdata, ports_pad, val;
226 u8 smac_id = 0xf;
227
228 memset(hwe, 0, sizeof(*hwe));
229
230 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
231 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
232 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
233 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
234 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
235 FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
236 AIROHA_FOE_IB1_BIND_TTL;
237 hwe->ib1 = val;
238
239 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
240 AIROHA_FOE_IB2_PSE_QOS;
241 if (dsa_port >= 0)
242 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
243
244 if (dev) {
245 struct airoha_gdm_port *port = netdev_priv(dev);
246 u8 pse_port;
247
248 if (!airoha_is_valid_gdm_port(eth, port))
249 return -EINVAL;
250
251 if (dsa_port >= 0)
252 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
253 else
254 pse_port = 2; /* uplink relies on GDM2 loopback */
255 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
256
257 /* For downlink traffic consume SRAM memory for hw forwarding
258 * descriptors queue.
259 */
260 if (airhoa_is_lan_gdm_port(port))
261 val |= AIROHA_FOE_IB2_FAST_PATH;
262
263 smac_id = port->id;
264 }
265
266 if (is_multicast_ether_addr(data->eth.h_dest))
267 val |= AIROHA_FOE_IB2_MULTICAST;
268
269 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
270 if (type == PPE_PKT_TYPE_IPV4_ROUTE)
271 hwe->ipv4.orig_tuple.ports = ports_pad;
272 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
273 hwe->ipv6.ports = ports_pad;
274
275 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
276 if (type == PPE_PKT_TYPE_BRIDGE) {
277 airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
278 hwe->bridge.data = qdata;
279 hwe->bridge.ib2 = val;
280 l2 = &hwe->bridge.l2.common;
281 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
282 hwe->ipv6.data = qdata;
283 hwe->ipv6.ib2 = val;
284 l2 = &hwe->ipv6.l2;
285 l2->etype = ETH_P_IPV6;
286 } else {
287 hwe->ipv4.data = qdata;
288 hwe->ipv4.ib2 = val;
289 l2 = &hwe->ipv4.l2.common;
290 l2->etype = ETH_P_IP;
291 }
292
293 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
294 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
295 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
296 struct airoha_foe_mac_info *mac_info;
297
298 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
299 hwe->ipv4.l2.src_mac_lo =
300 get_unaligned_be16(data->eth.h_source + 4);
301
302 mac_info = (struct airoha_foe_mac_info *)l2;
303 mac_info->pppoe_id = data->pppoe.sid;
304 } else {
305 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
306 FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
307 data->pppoe.sid);
308 }
309
310 if (data->vlan.num) {
311 l2->vlan1 = data->vlan.hdr[0].id;
312 if (data->vlan.num == 2)
313 l2->vlan2 = data->vlan.hdr[1].id;
314 }
315
316 if (dsa_port >= 0) {
317 l2->etype = BIT(dsa_port);
318 l2->etype |= !data->vlan.num ? BIT(15) : 0;
319 } else if (data->pppoe.num) {
320 l2->etype = ETH_P_PPP_SES;
321 }
322
323 return 0;
324 }
325
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)326 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
327 struct airoha_flow_data *data,
328 bool egress)
329 {
330 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
331 struct airoha_foe_ipv4_tuple *t;
332
333 switch (type) {
334 case PPE_PKT_TYPE_IPV4_HNAPT:
335 if (egress) {
336 t = &hwe->ipv4.new_tuple;
337 break;
338 }
339 fallthrough;
340 case PPE_PKT_TYPE_IPV4_DSLITE:
341 case PPE_PKT_TYPE_IPV4_ROUTE:
342 t = &hwe->ipv4.orig_tuple;
343 break;
344 default:
345 WARN_ON_ONCE(1);
346 return -EINVAL;
347 }
348
349 t->src_ip = be32_to_cpu(data->v4.src_addr);
350 t->dest_ip = be32_to_cpu(data->v4.dst_addr);
351
352 if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
353 t->src_port = be16_to_cpu(data->src_port);
354 t->dest_port = be16_to_cpu(data->dst_port);
355 }
356
357 return 0;
358 }
359
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)360 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
361 struct airoha_flow_data *data)
362
363 {
364 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
365 u32 *src, *dest;
366
367 switch (type) {
368 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
369 case PPE_PKT_TYPE_IPV6_6RD:
370 hwe->ipv6.src_port = be16_to_cpu(data->src_port);
371 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
372 fallthrough;
373 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
374 src = hwe->ipv6.src_ip;
375 dest = hwe->ipv6.dest_ip;
376 break;
377 default:
378 WARN_ON_ONCE(1);
379 return -EINVAL;
380 }
381
382 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
383 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
384
385 return 0;
386 }
387
airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry * hwe)388 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
389 {
390 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
391 u32 hash, hv1, hv2, hv3;
392
393 switch (type) {
394 case PPE_PKT_TYPE_IPV4_ROUTE:
395 case PPE_PKT_TYPE_IPV4_HNAPT:
396 hv1 = hwe->ipv4.orig_tuple.ports;
397 hv2 = hwe->ipv4.orig_tuple.dest_ip;
398 hv3 = hwe->ipv4.orig_tuple.src_ip;
399 break;
400 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
401 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
402 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
403 hv1 ^= hwe->ipv6.ports;
404
405 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
406 hv2 ^= hwe->ipv6.dest_ip[0];
407
408 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
409 hv3 ^= hwe->ipv6.src_ip[0];
410 break;
411 case PPE_PKT_TYPE_BRIDGE: {
412 struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
413
414 hv1 = l2->common.src_mac_hi & 0xffff;
415 hv1 = hv1 << 16 | l2->src_mac_lo;
416
417 hv2 = l2->common.dest_mac_lo;
418 hv2 = hv2 << 16;
419 hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
420
421 hv3 = l2->common.dest_mac_hi;
422 break;
423 }
424 case PPE_PKT_TYPE_IPV4_DSLITE:
425 case PPE_PKT_TYPE_IPV6_6RD:
426 default:
427 WARN_ON_ONCE(1);
428 return PPE_HASH_MASK;
429 }
430
431 hash = (hv1 & hv2) | ((~hv1) & hv3);
432 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
433 hash ^= hv1 ^ hv2 ^ hv3;
434 hash ^= hash >> 16;
435 hash &= PPE_NUM_ENTRIES - 1;
436
437 return hash;
438 }
439
airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe * ppe,u32 hash)440 static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
441 {
442 if (!airoha_ppe2_is_enabled(ppe->eth))
443 return hash;
444
445 return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
446 : hash;
447 }
448
airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe * ppe,struct airoha_npu * npu,int index)449 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
450 struct airoha_npu *npu,
451 int index)
452 {
453 memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
454 memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
455 }
456
airoha_ppe_foe_flow_stats_reset(struct airoha_ppe * ppe,struct airoha_npu * npu)457 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
458 struct airoha_npu *npu)
459 {
460 int i;
461
462 for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
463 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
464 }
465
airoha_ppe_foe_flow_stats_update(struct airoha_ppe * ppe,struct airoha_npu * npu,struct airoha_foe_entry * hwe,u32 hash)466 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
467 struct airoha_npu *npu,
468 struct airoha_foe_entry *hwe,
469 u32 hash)
470 {
471 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
472 u32 index, pse_port, val, *data, *ib2, *meter;
473 u8 nbq;
474
475 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
476 if (index >= PPE_STATS_NUM_ENTRIES)
477 return;
478
479 if (type == PPE_PKT_TYPE_BRIDGE) {
480 data = &hwe->bridge.data;
481 ib2 = &hwe->bridge.ib2;
482 meter = &hwe->bridge.l2.meter;
483 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
484 data = &hwe->ipv6.data;
485 ib2 = &hwe->ipv6.ib2;
486 meter = &hwe->ipv6.meter;
487 } else {
488 data = &hwe->ipv4.data;
489 ib2 = &hwe->ipv4.ib2;
490 meter = &hwe->ipv4.l2.meter;
491 }
492
493 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
494
495 val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
496 *data = (*data & ~AIROHA_FOE_ACTDP) |
497 FIELD_PREP(AIROHA_FOE_ACTDP, val);
498
499 val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
500 AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
501 *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
502
503 pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
504 nbq = pse_port == 1 ? 6 : 5;
505 *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
506 AIROHA_FOE_IB2_PSE_QOS);
507 *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
508 FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
509 }
510
511 static struct airoha_foe_entry *
airoha_ppe_foe_get_entry_locked(struct airoha_ppe * ppe,u32 hash)512 airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
513 {
514 lockdep_assert_held(&ppe_lock);
515
516 if (hash < PPE_SRAM_NUM_ENTRIES) {
517 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
518 struct airoha_eth *eth = ppe->eth;
519 bool ppe2;
520 u32 val;
521 int i;
522
523 ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
524 hash >= PPE1_SRAM_NUM_ENTRIES;
525 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
526 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
527 PPE_SRAM_CTRL_REQ_MASK);
528 if (read_poll_timeout_atomic(airoha_fe_rr, val,
529 val & PPE_SRAM_CTRL_ACK_MASK,
530 10, 100, false, eth,
531 REG_PPE_RAM_CTRL(ppe2)))
532 return NULL;
533
534 for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
535 hwe[i] = airoha_fe_rr(eth,
536 REG_PPE_RAM_ENTRY(ppe2, i));
537 }
538
539 return ppe->foe + hash * sizeof(struct airoha_foe_entry);
540 }
541
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)542 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
543 u32 hash)
544 {
545 struct airoha_foe_entry *hwe;
546
547 spin_lock_bh(&ppe_lock);
548 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
549 spin_unlock_bh(&ppe_lock);
550
551 return hwe;
552 }
553
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)554 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
555 struct airoha_foe_entry *hwe)
556 {
557 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
558 int len;
559
560 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
561 return false;
562
563 if (type > PPE_PKT_TYPE_IPV4_DSLITE)
564 len = offsetof(struct airoha_foe_entry, ipv6.data);
565 else
566 len = offsetof(struct airoha_foe_entry, ipv4.ib2);
567
568 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
569 }
570
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash)571 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
572 struct airoha_foe_entry *e,
573 u32 hash)
574 {
575 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
576 u32 ts = airoha_ppe_get_timestamp(ppe);
577 struct airoha_eth *eth = ppe->eth;
578 struct airoha_npu *npu;
579 int err = 0;
580
581 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
582 wmb();
583
584 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
585 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
586 hwe->ib1 = e->ib1;
587
588 rcu_read_lock();
589
590 npu = rcu_dereference(eth->npu);
591 if (!npu) {
592 err = -ENODEV;
593 goto unlock;
594 }
595
596 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
597
598 if (hash < PPE_SRAM_NUM_ENTRIES) {
599 dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
600 bool ppe2 = airoha_ppe2_is_enabled(eth) &&
601 hash >= PPE1_SRAM_NUM_ENTRIES;
602
603 err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
604 hash, ppe2);
605 }
606 unlock:
607 rcu_read_unlock();
608
609 return err;
610 }
611
airoha_ppe_foe_remove_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)612 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
613 struct airoha_flow_table_entry *e)
614 {
615 lockdep_assert_held(&ppe_lock);
616
617 hlist_del_init(&e->list);
618 if (e->hash != 0xffff) {
619 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
620 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
621 AIROHA_FOE_STATE_INVALID);
622 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
623 e->hash = 0xffff;
624 }
625 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
626 hlist_del_init(&e->l2_subflow_node);
627 kfree(e);
628 }
629 }
630
airoha_ppe_foe_remove_l2_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)631 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
632 struct airoha_flow_table_entry *e)
633 {
634 struct hlist_head *head = &e->l2_flows;
635 struct hlist_node *n;
636
637 lockdep_assert_held(&ppe_lock);
638
639 rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
640 airoha_l2_flow_table_params);
641 hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
642 airoha_ppe_foe_remove_flow(ppe, e);
643 }
644
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)645 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
646 struct airoha_flow_table_entry *e)
647 {
648 spin_lock_bh(&ppe_lock);
649
650 if (e->type == FLOW_TYPE_L2)
651 airoha_ppe_foe_remove_l2_flow(ppe, e);
652 else
653 airoha_ppe_foe_remove_flow(ppe, e);
654
655 spin_unlock_bh(&ppe_lock);
656 }
657
658 static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e,u32 hash)659 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
660 struct airoha_flow_table_entry *e,
661 u32 hash)
662 {
663 u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
664 struct airoha_foe_entry *hwe_p, hwe;
665 struct airoha_flow_table_entry *f;
666 int type;
667
668 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
669 if (!hwe_p)
670 return -EINVAL;
671
672 f = kzalloc(sizeof(*f), GFP_ATOMIC);
673 if (!f)
674 return -ENOMEM;
675
676 hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
677 f->type = FLOW_TYPE_L2_SUBFLOW;
678 f->hash = hash;
679
680 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
681 hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
682
683 type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
684 if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
685 memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
686 hwe.ipv6.ib2 = e->data.bridge.ib2;
687 /* setting smac_id to 0xf instruct the hw to keep original
688 * source mac address
689 */
690 hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
691 0xf);
692 } else {
693 memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
694 sizeof(hwe.bridge.l2));
695 hwe.bridge.ib2 = e->data.bridge.ib2;
696 if (type == PPE_PKT_TYPE_IPV4_HNAPT)
697 memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
698 sizeof(hwe.ipv4.new_tuple));
699 }
700
701 hwe.bridge.data = e->data.bridge.data;
702 airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
703
704 return 0;
705 }
706
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,struct sk_buff * skb,u32 hash)707 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
708 struct sk_buff *skb,
709 u32 hash)
710 {
711 struct airoha_flow_table_entry *e;
712 struct airoha_foe_bridge br = {};
713 struct airoha_foe_entry *hwe;
714 bool commit_done = false;
715 struct hlist_node *n;
716 u32 index, state;
717
718 spin_lock_bh(&ppe_lock);
719
720 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
721 if (!hwe)
722 goto unlock;
723
724 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
725 if (state == AIROHA_FOE_STATE_BIND)
726 goto unlock;
727
728 index = airoha_ppe_foe_get_entry_hash(hwe);
729 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
730 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
731 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
732 if (state != AIROHA_FOE_STATE_BIND) {
733 e->hash = 0xffff;
734 airoha_ppe_foe_remove_flow(ppe, e);
735 }
736 continue;
737 }
738
739 if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
740 e->hash = 0xffff;
741 continue;
742 }
743
744 airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
745 commit_done = true;
746 e->hash = hash;
747 }
748
749 if (commit_done)
750 goto unlock;
751
752 airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
753 e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
754 airoha_l2_flow_table_params);
755 if (e)
756 airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
757 unlock:
758 spin_unlock_bh(&ppe_lock);
759 }
760
761 static int
airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)762 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
763 struct airoha_flow_table_entry *e)
764 {
765 struct airoha_flow_table_entry *prev;
766
767 e->type = FLOW_TYPE_L2;
768 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
769 airoha_l2_flow_table_params);
770 if (!prev)
771 return 0;
772
773 if (IS_ERR(prev))
774 return PTR_ERR(prev);
775
776 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
777 &e->l2_node,
778 airoha_l2_flow_table_params);
779 }
780
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)781 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
782 struct airoha_flow_table_entry *e)
783 {
784 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
785 u32 hash;
786
787 if (type == PPE_PKT_TYPE_BRIDGE)
788 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
789
790 hash = airoha_ppe_foe_get_entry_hash(&e->data);
791 e->type = FLOW_TYPE_L4;
792 e->hash = 0xffff;
793
794 spin_lock_bh(&ppe_lock);
795 hlist_add_head(&e->list, &ppe->foe_flow[hash]);
796 spin_unlock_bh(&ppe_lock);
797
798 return 0;
799 }
800
airoha_ppe_get_entry_idle_time(struct airoha_ppe * ppe,u32 ib1)801 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
802 {
803 u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
804 u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
805 int idle;
806
807 if (state == AIROHA_FOE_STATE_BIND) {
808 ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
809 ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
810 } else {
811 ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
812 now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
813 ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
814 }
815 idle = now - ts;
816
817 return idle < 0 ? idle + ts_mask + 1 : idle;
818 }
819
820 static void
airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)821 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
822 struct airoha_flow_table_entry *e)
823 {
824 int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
825 struct airoha_flow_table_entry *iter;
826 struct hlist_node *n;
827
828 lockdep_assert_held(&ppe_lock);
829
830 hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
831 struct airoha_foe_entry *hwe;
832 u32 ib1, state;
833 int idle;
834
835 hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
836 if (!hwe)
837 continue;
838
839 ib1 = READ_ONCE(hwe->ib1);
840 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
841 if (state != AIROHA_FOE_STATE_BIND) {
842 iter->hash = 0xffff;
843 airoha_ppe_foe_remove_flow(ppe, iter);
844 continue;
845 }
846
847 idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
848 if (idle >= min_idle)
849 continue;
850
851 min_idle = idle;
852 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
853 e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
854 }
855 }
856
airoha_ppe_foe_flow_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)857 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
858 struct airoha_flow_table_entry *e)
859 {
860 struct airoha_foe_entry *hwe_p, hwe = {};
861
862 spin_lock_bh(&ppe_lock);
863
864 if (e->type == FLOW_TYPE_L2) {
865 airoha_ppe_foe_flow_l2_entry_update(ppe, e);
866 goto unlock;
867 }
868
869 if (e->hash == 0xffff)
870 goto unlock;
871
872 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
873 if (!hwe_p)
874 goto unlock;
875
876 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
877 if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
878 e->hash = 0xffff;
879 goto unlock;
880 }
881
882 e->data.ib1 = hwe.ib1;
883 unlock:
884 spin_unlock_bh(&ppe_lock);
885 }
886
airoha_ppe_entry_idle_time(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)887 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
888 struct airoha_flow_table_entry *e)
889 {
890 airoha_ppe_foe_flow_entry_update(ppe, e);
891
892 return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
893 }
894
airoha_ppe_flow_offload_replace(struct airoha_gdm_port * port,struct flow_cls_offload * f)895 static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
896 struct flow_cls_offload *f)
897 {
898 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
899 struct airoha_eth *eth = port->qdma->eth;
900 struct airoha_flow_table_entry *e;
901 struct airoha_flow_data data = {};
902 struct net_device *odev = NULL;
903 struct flow_action_entry *act;
904 struct airoha_foe_entry hwe;
905 int err, i, offload_type;
906 u16 addr_type = 0;
907 u8 l4proto = 0;
908
909 if (rhashtable_lookup(ð->flow_table, &f->cookie,
910 airoha_flow_table_params))
911 return -EEXIST;
912
913 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
914 return -EOPNOTSUPP;
915
916 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
917 struct flow_match_control match;
918
919 flow_rule_match_control(rule, &match);
920 addr_type = match.key->addr_type;
921 if (flow_rule_has_control_flags(match.mask->flags,
922 f->common.extack))
923 return -EOPNOTSUPP;
924 } else {
925 return -EOPNOTSUPP;
926 }
927
928 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
929 struct flow_match_basic match;
930
931 flow_rule_match_basic(rule, &match);
932 l4proto = match.key->ip_proto;
933 } else {
934 return -EOPNOTSUPP;
935 }
936
937 switch (addr_type) {
938 case 0:
939 offload_type = PPE_PKT_TYPE_BRIDGE;
940 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
941 struct flow_match_eth_addrs match;
942
943 flow_rule_match_eth_addrs(rule, &match);
944 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
945 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
946 } else {
947 return -EOPNOTSUPP;
948 }
949 break;
950 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
951 offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
952 break;
953 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
954 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
955 break;
956 default:
957 return -EOPNOTSUPP;
958 }
959
960 flow_action_for_each(i, act, &rule->action) {
961 switch (act->id) {
962 case FLOW_ACTION_MANGLE:
963 if (offload_type == PPE_PKT_TYPE_BRIDGE)
964 return -EOPNOTSUPP;
965
966 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
967 airoha_ppe_flow_mangle_eth(act, &data.eth);
968 break;
969 case FLOW_ACTION_REDIRECT:
970 odev = act->dev;
971 break;
972 case FLOW_ACTION_CSUM:
973 break;
974 case FLOW_ACTION_VLAN_PUSH:
975 if (data.vlan.num == 2 ||
976 act->vlan.proto != htons(ETH_P_8021Q))
977 return -EOPNOTSUPP;
978
979 data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
980 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
981 data.vlan.num++;
982 break;
983 case FLOW_ACTION_VLAN_POP:
984 break;
985 case FLOW_ACTION_PPPOE_PUSH:
986 if (data.pppoe.num == 1 || data.vlan.num == 2)
987 return -EOPNOTSUPP;
988
989 data.pppoe.sid = act->pppoe.sid;
990 data.pppoe.num++;
991 break;
992 default:
993 return -EOPNOTSUPP;
994 }
995 }
996
997 if (!is_valid_ether_addr(data.eth.h_source) ||
998 !is_valid_ether_addr(data.eth.h_dest))
999 return -EINVAL;
1000
1001 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
1002 &data, l4proto);
1003 if (err)
1004 return err;
1005
1006 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1007 struct flow_match_ports ports;
1008
1009 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1010 return -EOPNOTSUPP;
1011
1012 flow_rule_match_ports(rule, &ports);
1013 data.src_port = ports.key->src;
1014 data.dst_port = ports.key->dst;
1015 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
1016 return -EOPNOTSUPP;
1017 }
1018
1019 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1020 struct flow_match_ipv4_addrs addrs;
1021
1022 flow_rule_match_ipv4_addrs(rule, &addrs);
1023 data.v4.src_addr = addrs.key->src;
1024 data.v4.dst_addr = addrs.key->dst;
1025 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
1026 }
1027
1028 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1029 struct flow_match_ipv6_addrs addrs;
1030
1031 flow_rule_match_ipv6_addrs(rule, &addrs);
1032
1033 data.v6.src_addr = addrs.key->src;
1034 data.v6.dst_addr = addrs.key->dst;
1035 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1036 }
1037
1038 flow_action_for_each(i, act, &rule->action) {
1039 if (act->id != FLOW_ACTION_MANGLE)
1040 continue;
1041
1042 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1043 return -EOPNOTSUPP;
1044
1045 switch (act->mangle.htype) {
1046 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1047 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1048 err = airoha_ppe_flow_mangle_ports(act, &data);
1049 break;
1050 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1051 err = airoha_ppe_flow_mangle_ipv4(act, &data);
1052 break;
1053 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1054 /* handled earlier */
1055 break;
1056 default:
1057 return -EOPNOTSUPP;
1058 }
1059
1060 if (err)
1061 return err;
1062 }
1063
1064 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1065 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1066 if (err)
1067 return err;
1068 }
1069
1070 e = kzalloc(sizeof(*e), GFP_KERNEL);
1071 if (!e)
1072 return -ENOMEM;
1073
1074 e->cookie = f->cookie;
1075 memcpy(&e->data, &hwe, sizeof(e->data));
1076
1077 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1078 if (err)
1079 goto free_entry;
1080
1081 err = rhashtable_insert_fast(ð->flow_table, &e->node,
1082 airoha_flow_table_params);
1083 if (err < 0)
1084 goto remove_foe_entry;
1085
1086 return 0;
1087
1088 remove_foe_entry:
1089 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1090 free_entry:
1091 kfree(e);
1092
1093 return err;
1094 }
1095
airoha_ppe_flow_offload_destroy(struct airoha_gdm_port * port,struct flow_cls_offload * f)1096 static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
1097 struct flow_cls_offload *f)
1098 {
1099 struct airoha_eth *eth = port->qdma->eth;
1100 struct airoha_flow_table_entry *e;
1101
1102 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1103 airoha_flow_table_params);
1104 if (!e)
1105 return -ENOENT;
1106
1107 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1108 rhashtable_remove_fast(ð->flow_table, &e->node,
1109 airoha_flow_table_params);
1110 kfree(e);
1111
1112 return 0;
1113 }
1114
airoha_ppe_foe_entry_get_stats(struct airoha_ppe * ppe,u32 hash,struct airoha_foe_stats64 * stats)1115 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1116 struct airoha_foe_stats64 *stats)
1117 {
1118 u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
1119 struct airoha_eth *eth = ppe->eth;
1120 struct airoha_npu *npu;
1121
1122 if (index >= PPE_STATS_NUM_ENTRIES)
1123 return;
1124
1125 rcu_read_lock();
1126
1127 npu = rcu_dereference(eth->npu);
1128 if (npu) {
1129 u64 packets = ppe->foe_stats[index].packets;
1130 u64 bytes = ppe->foe_stats[index].bytes;
1131 struct airoha_foe_stats npu_stats;
1132
1133 memcpy_fromio(&npu_stats, &npu->stats[index],
1134 sizeof(*npu->stats));
1135 stats->packets = packets << 32 | npu_stats.packets;
1136 stats->bytes = bytes << 32 | npu_stats.bytes;
1137 }
1138
1139 rcu_read_unlock();
1140 }
1141
airoha_ppe_flow_offload_stats(struct airoha_gdm_port * port,struct flow_cls_offload * f)1142 static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
1143 struct flow_cls_offload *f)
1144 {
1145 struct airoha_eth *eth = port->qdma->eth;
1146 struct airoha_flow_table_entry *e;
1147 u32 idle;
1148
1149 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1150 airoha_flow_table_params);
1151 if (!e)
1152 return -ENOENT;
1153
1154 idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1155 f->stats.lastused = jiffies - idle * HZ;
1156
1157 if (e->hash != 0xffff) {
1158 struct airoha_foe_stats64 stats = {};
1159
1160 airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1161 f->stats.pkts += (stats.packets - e->stats.packets);
1162 f->stats.bytes += (stats.bytes - e->stats.bytes);
1163 e->stats = stats;
1164 }
1165
1166 return 0;
1167 }
1168
airoha_ppe_flow_offload_cmd(struct airoha_gdm_port * port,struct flow_cls_offload * f)1169 static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
1170 struct flow_cls_offload *f)
1171 {
1172 switch (f->command) {
1173 case FLOW_CLS_REPLACE:
1174 return airoha_ppe_flow_offload_replace(port, f);
1175 case FLOW_CLS_DESTROY:
1176 return airoha_ppe_flow_offload_destroy(port, f);
1177 case FLOW_CLS_STATS:
1178 return airoha_ppe_flow_offload_stats(port, f);
1179 default:
1180 break;
1181 }
1182
1183 return -EOPNOTSUPP;
1184 }
1185
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe,struct airoha_npu * npu)1186 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
1187 struct airoha_npu *npu)
1188 {
1189 int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
1190 struct airoha_foe_entry *hwe = ppe->foe;
1191
1192 if (airoha_ppe2_is_enabled(ppe->eth))
1193 sram_num_entries = sram_num_entries / 2;
1194
1195 for (i = 0; i < sram_num_entries; i++)
1196 memset(&hwe[i], 0, sizeof(*hwe));
1197
1198 return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
1199 PPE_SRAM_NUM_ENTRIES);
1200 }
1201
airoha_ppe_npu_get(struct airoha_eth * eth)1202 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1203 {
1204 struct airoha_npu *npu = airoha_npu_get(eth->dev,
1205 ð->ppe->foe_stats_dma);
1206
1207 if (IS_ERR(npu)) {
1208 request_module("airoha-npu");
1209 npu = airoha_npu_get(eth->dev, ð->ppe->foe_stats_dma);
1210 }
1211
1212 return npu;
1213 }
1214
airoha_ppe_offload_setup(struct airoha_eth * eth)1215 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1216 {
1217 struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1218 int err;
1219
1220 if (IS_ERR(npu))
1221 return PTR_ERR(npu);
1222
1223 err = npu->ops.ppe_init(npu);
1224 if (err)
1225 goto error_npu_put;
1226
1227 airoha_ppe_hw_init(eth->ppe);
1228 err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
1229 if (err)
1230 goto error_npu_put;
1231
1232 airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
1233
1234 rcu_assign_pointer(eth->npu, npu);
1235 synchronize_rcu();
1236
1237 return 0;
1238
1239 error_npu_put:
1240 airoha_npu_put(npu);
1241
1242 return err;
1243 }
1244
airoha_ppe_setup_tc_block_cb(struct net_device * dev,void * type_data)1245 int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
1246 {
1247 struct airoha_gdm_port *port = netdev_priv(dev);
1248 struct flow_cls_offload *cls = type_data;
1249 struct airoha_eth *eth = port->qdma->eth;
1250 int err = 0;
1251
1252 mutex_lock(&flow_offload_mutex);
1253
1254 if (!eth->npu)
1255 err = airoha_ppe_offload_setup(eth);
1256 if (!err)
1257 err = airoha_ppe_flow_offload_cmd(port, cls);
1258
1259 mutex_unlock(&flow_offload_mutex);
1260
1261 return err;
1262 }
1263
airoha_ppe_check_skb(struct airoha_ppe * ppe,struct sk_buff * skb,u16 hash)1264 void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
1265 u16 hash)
1266 {
1267 u16 now, diff;
1268
1269 if (hash > PPE_HASH_MASK)
1270 return;
1271
1272 now = (u16)jiffies;
1273 diff = now - ppe->foe_check_time[hash];
1274 if (diff < HZ / 10)
1275 return;
1276
1277 ppe->foe_check_time[hash] = now;
1278 airoha_ppe_foe_insert_entry(ppe, skb, hash);
1279 }
1280
airoha_ppe_init_upd_mem(struct airoha_gdm_port * port)1281 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1282 {
1283 struct airoha_eth *eth = port->qdma->eth;
1284 struct net_device *dev = port->dev;
1285 const u8 *addr = dev->dev_addr;
1286 u32 val;
1287
1288 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1289 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1290 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1291 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1292 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1293
1294 val = (addr[0] << 8) | addr[1];
1295 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1296 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1297 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1298 FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1299 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1300 }
1301
airoha_ppe_init(struct airoha_eth * eth)1302 int airoha_ppe_init(struct airoha_eth *eth)
1303 {
1304 struct airoha_ppe *ppe;
1305 int foe_size, err;
1306
1307 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1308 if (!ppe)
1309 return -ENOMEM;
1310
1311 foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
1312 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1313 GFP_KERNEL);
1314 if (!ppe->foe)
1315 return -ENOMEM;
1316
1317 ppe->eth = eth;
1318 eth->ppe = ppe;
1319
1320 ppe->foe_flow = devm_kzalloc(eth->dev,
1321 PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
1322 GFP_KERNEL);
1323 if (!ppe->foe_flow)
1324 return -ENOMEM;
1325
1326 foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
1327 if (foe_size) {
1328 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1329 &ppe->foe_stats_dma,
1330 GFP_KERNEL);
1331 if (!ppe->foe_stats)
1332 return -ENOMEM;
1333 }
1334
1335 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
1336 if (err)
1337 return err;
1338
1339 err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1340 if (err)
1341 goto error_flow_table_destroy;
1342
1343 err = airoha_ppe_debugfs_init(ppe);
1344 if (err)
1345 goto error_l2_flow_table_destroy;
1346
1347 return 0;
1348
1349 error_l2_flow_table_destroy:
1350 rhashtable_destroy(&ppe->l2_flows);
1351 error_flow_table_destroy:
1352 rhashtable_destroy(ð->flow_table);
1353
1354 return err;
1355 }
1356
airoha_ppe_deinit(struct airoha_eth * eth)1357 void airoha_ppe_deinit(struct airoha_eth *eth)
1358 {
1359 struct airoha_npu *npu;
1360
1361 rcu_read_lock();
1362 npu = rcu_dereference(eth->npu);
1363 if (npu) {
1364 npu->ops.ppe_deinit(npu);
1365 airoha_npu_put(npu);
1366 }
1367 rcu_read_unlock();
1368
1369 rhashtable_destroy(ð->ppe->l2_flows);
1370 rhashtable_destroy(ð->flow_table);
1371 debugfs_remove(eth->ppe->debugfs_dir);
1372 }
1373