1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
9
nft_flow_rule_alloc(int num_actions)10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
11 {
12 struct nft_flow_rule *flow;
13
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
15 if (!flow)
16 return NULL;
17
18 flow->rule = flow_rule_alloc(num_actions);
19 if (!flow->rule) {
20 kfree(flow);
21 return NULL;
22 }
23
24 flow->rule->match.dissector = &flow->match.dissector;
25 flow->rule->match.mask = &flow->match.mask;
26 flow->rule->match.key = &flow->match.key;
27
28 return flow;
29 }
30
nft_flow_rule_set_addr_type(struct nft_flow_rule * flow,enum flow_dissector_key_id addr_type)31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
32 enum flow_dissector_key_id addr_type)
33 {
34 struct nft_flow_match *match = &flow->match;
35 struct nft_flow_key *mask = &match->mask;
36 struct nft_flow_key *key = &match->key;
37
38 if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))
39 return;
40
41 key->control.addr_type = addr_type;
42 mask->control.addr_type = 0xffff;
43 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
44 match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
45 offsetof(struct nft_flow_key, control);
46 }
47
48 struct nft_offload_ethertype {
49 __be16 value;
50 __be16 mask;
51 };
52
nft_flow_rule_transfer_vlan(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow)53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
54 struct nft_flow_rule *flow)
55 {
56 struct nft_flow_match *match = &flow->match;
57 struct nft_offload_ethertype ethertype = {
58 .value = match->key.basic.n_proto,
59 .mask = match->mask.basic.n_proto,
60 };
61
62 if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) &&
63 (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
64 match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
65 match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
66 match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
67 match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
68 match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
69 match->key.vlan.vlan_tpid = ethertype.value;
70 match->mask.vlan.vlan_tpid = ethertype.mask;
71 match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
72 offsetof(struct nft_flow_key, cvlan);
73 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
74 } else if (match->dissector.used_keys &
75 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) &&
76 (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
77 match->key.basic.n_proto == htons(ETH_P_8021AD))) {
78 match->key.basic.n_proto = match->key.vlan.vlan_tpid;
79 match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
80 match->key.vlan.vlan_tpid = ethertype.value;
81 match->mask.vlan.vlan_tpid = ethertype.mask;
82 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
83 offsetof(struct nft_flow_key, vlan);
84 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
85 }
86 }
87
nft_flow_rule_create(struct net * net,const struct nft_rule * rule)88 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
89 const struct nft_rule *rule)
90 {
91 struct nft_offload_ctx *ctx;
92 struct nft_flow_rule *flow;
93 int num_actions = 0, err;
94 struct nft_expr *expr;
95
96 expr = nft_expr_first(rule);
97 while (nft_expr_more(rule, expr)) {
98 if (expr->ops->offload_action &&
99 expr->ops->offload_action(expr))
100 num_actions++;
101
102 expr = nft_expr_next(expr);
103 }
104
105 if (num_actions == 0)
106 return ERR_PTR(-EOPNOTSUPP);
107
108 flow = nft_flow_rule_alloc(num_actions);
109 if (!flow)
110 return ERR_PTR(-ENOMEM);
111
112 expr = nft_expr_first(rule);
113
114 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
115 if (!ctx) {
116 err = -ENOMEM;
117 goto err_out;
118 }
119 ctx->net = net;
120 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
121
122 while (nft_expr_more(rule, expr)) {
123 if (!expr->ops->offload) {
124 err = -EOPNOTSUPP;
125 goto err_out;
126 }
127 err = expr->ops->offload(ctx, flow, expr);
128 if (err < 0)
129 goto err_out;
130
131 expr = nft_expr_next(expr);
132 }
133 nft_flow_rule_transfer_vlan(ctx, flow);
134
135 flow->proto = ctx->dep.l3num;
136 kfree(ctx);
137
138 return flow;
139 err_out:
140 kfree(ctx);
141 nft_flow_rule_destroy(flow);
142
143 return ERR_PTR(err);
144 }
145
nft_flow_rule_destroy(struct nft_flow_rule * flow)146 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
147 {
148 struct flow_action_entry *entry;
149 int i;
150
151 flow_action_for_each(i, entry, &flow->rule->action) {
152 switch (entry->id) {
153 case FLOW_ACTION_REDIRECT:
154 case FLOW_ACTION_MIRRED:
155 dev_put(entry->dev);
156 break;
157 default:
158 break;
159 }
160 }
161 kfree(flow->rule);
162 kfree(flow);
163 }
164
nft_offload_set_dependency(struct nft_offload_ctx * ctx,enum nft_offload_dep_type type)165 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
166 enum nft_offload_dep_type type)
167 {
168 ctx->dep.type = type;
169 }
170
nft_offload_update_dependency(struct nft_offload_ctx * ctx,const void * data,u32 len)171 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
172 const void *data, u32 len)
173 {
174 switch (ctx->dep.type) {
175 case NFT_OFFLOAD_DEP_NETWORK:
176 WARN_ON(len != sizeof(__u16));
177 memcpy(&ctx->dep.l3num, data, sizeof(__u16));
178 break;
179 case NFT_OFFLOAD_DEP_TRANSPORT:
180 WARN_ON(len != sizeof(__u8));
181 memcpy(&ctx->dep.protonum, data, sizeof(__u8));
182 break;
183 default:
184 break;
185 }
186 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
187 }
188
nft_flow_offload_common_init(struct flow_cls_common_offload * common,__be16 proto,int priority,struct netlink_ext_ack * extack)189 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
190 __be16 proto, int priority,
191 struct netlink_ext_ack *extack)
192 {
193 common->protocol = proto;
194 common->prio = priority;
195 common->extack = extack;
196 }
197
nft_setup_cb_call(enum tc_setup_type type,void * type_data,struct list_head * cb_list)198 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
199 struct list_head *cb_list)
200 {
201 struct flow_block_cb *block_cb;
202 int err;
203
204 list_for_each_entry(block_cb, cb_list, list) {
205 err = block_cb->cb(type, type_data, block_cb->cb_priv);
206 if (err < 0)
207 return err;
208 }
209 return 0;
210 }
211
nft_chain_offload_priority(const struct nft_base_chain * basechain)212 static int nft_chain_offload_priority(const struct nft_base_chain *basechain)
213 {
214 if (basechain->ops.priority <= 0 ||
215 basechain->ops.priority > USHRT_MAX)
216 return -1;
217
218 return 0;
219 }
220
nft_chain_offload_support(const struct nft_base_chain * basechain)221 bool nft_chain_offload_support(const struct nft_base_chain *basechain)
222 {
223 struct nf_hook_ops *ops;
224 struct net_device *dev;
225 struct nft_hook *hook;
226
227 if (nft_chain_offload_priority(basechain) < 0)
228 return false;
229
230 list_for_each_entry(hook, &basechain->hook_list, list) {
231 list_for_each_entry(ops, &hook->ops_list, list) {
232 if (ops->pf != NFPROTO_NETDEV ||
233 ops->hooknum != NF_NETDEV_INGRESS)
234 return false;
235
236 dev = ops->dev;
237 if (!dev->netdev_ops->ndo_setup_tc &&
238 !flow_indr_dev_exists())
239 return false;
240 }
241 }
242
243 return true;
244 }
245
nft_flow_cls_offload_setup(struct flow_cls_offload * cls_flow,const struct nft_base_chain * basechain,const struct nft_rule * rule,const struct nft_flow_rule * flow,struct netlink_ext_ack * extack,enum flow_cls_command command)246 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
247 const struct nft_base_chain *basechain,
248 const struct nft_rule *rule,
249 const struct nft_flow_rule *flow,
250 struct netlink_ext_ack *extack,
251 enum flow_cls_command command)
252 {
253 __be16 proto = ETH_P_ALL;
254
255 memset(cls_flow, 0, sizeof(*cls_flow));
256
257 if (flow)
258 proto = flow->proto;
259
260 nft_flow_offload_common_init(&cls_flow->common, proto,
261 basechain->ops.priority, extack);
262 cls_flow->command = command;
263 cls_flow->cookie = (unsigned long) rule;
264 if (flow)
265 cls_flow->rule = flow->rule;
266 }
267
nft_flow_offload_cmd(const struct nft_chain * chain,const struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command,struct flow_cls_offload * cls_flow)268 static int nft_flow_offload_cmd(const struct nft_chain *chain,
269 const struct nft_rule *rule,
270 struct nft_flow_rule *flow,
271 enum flow_cls_command command,
272 struct flow_cls_offload *cls_flow)
273 {
274 struct netlink_ext_ack extack = {};
275 struct nft_base_chain *basechain;
276
277 if (!nft_is_base_chain(chain))
278 return -EOPNOTSUPP;
279
280 basechain = nft_base_chain(chain);
281 nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack,
282 command);
283
284 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow,
285 &basechain->flow_block.cb_list);
286 }
287
nft_flow_offload_rule(const struct nft_chain * chain,struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command)288 static int nft_flow_offload_rule(const struct nft_chain *chain,
289 struct nft_rule *rule,
290 struct nft_flow_rule *flow,
291 enum flow_cls_command command)
292 {
293 struct flow_cls_offload cls_flow;
294
295 return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow);
296 }
297
nft_flow_rule_stats(const struct nft_chain * chain,const struct nft_rule * rule)298 int nft_flow_rule_stats(const struct nft_chain *chain,
299 const struct nft_rule *rule)
300 {
301 struct flow_cls_offload cls_flow = {};
302 struct nft_expr *expr, *next;
303 int err;
304
305 err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS,
306 &cls_flow);
307 if (err < 0)
308 return err;
309
310 nft_rule_for_each_expr(expr, next, rule) {
311 if (expr->ops->offload_stats)
312 expr->ops->offload_stats(expr, &cls_flow.stats);
313 }
314
315 return 0;
316 }
317
nft_flow_offload_bind(struct flow_block_offload * bo,struct nft_base_chain * basechain)318 static int nft_flow_offload_bind(struct flow_block_offload *bo,
319 struct nft_base_chain *basechain)
320 {
321 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
322 return 0;
323 }
324
nft_flow_offload_unbind(struct flow_block_offload * bo,struct nft_base_chain * basechain)325 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
326 struct nft_base_chain *basechain)
327 {
328 struct flow_block_cb *block_cb, *next;
329 struct flow_cls_offload cls_flow;
330 struct netlink_ext_ack extack;
331 struct nft_chain *chain;
332 struct nft_rule *rule;
333
334 chain = &basechain->chain;
335 list_for_each_entry(rule, &chain->rules, list) {
336 memset(&extack, 0, sizeof(extack));
337 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
338 &extack, FLOW_CLS_DESTROY);
339 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
340 }
341
342 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
343 list_del(&block_cb->list);
344 flow_block_cb_free(block_cb);
345 }
346
347 return 0;
348 }
349
nft_block_setup(struct nft_base_chain * basechain,struct flow_block_offload * bo,enum flow_block_command cmd)350 static int nft_block_setup(struct nft_base_chain *basechain,
351 struct flow_block_offload *bo,
352 enum flow_block_command cmd)
353 {
354 int err;
355
356 switch (cmd) {
357 case FLOW_BLOCK_BIND:
358 err = nft_flow_offload_bind(bo, basechain);
359 break;
360 case FLOW_BLOCK_UNBIND:
361 err = nft_flow_offload_unbind(bo, basechain);
362 break;
363 default:
364 WARN_ON_ONCE(1);
365 err = -EOPNOTSUPP;
366 }
367
368 return err;
369 }
370
nft_flow_block_offload_init(struct flow_block_offload * bo,struct net * net,enum flow_block_command cmd,struct nft_base_chain * basechain,struct netlink_ext_ack * extack)371 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
372 struct net *net,
373 enum flow_block_command cmd,
374 struct nft_base_chain *basechain,
375 struct netlink_ext_ack *extack)
376 {
377 memset(bo, 0, sizeof(*bo));
378 bo->net = net;
379 bo->block = &basechain->flow_block;
380 bo->command = cmd;
381 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
382 bo->extack = extack;
383 bo->cb_list_head = &basechain->flow_block.cb_list;
384 INIT_LIST_HEAD(&bo->cb_list);
385 }
386
nft_block_offload_cmd(struct nft_base_chain * chain,struct net_device * dev,enum flow_block_command cmd)387 static int nft_block_offload_cmd(struct nft_base_chain *chain,
388 struct net_device *dev,
389 enum flow_block_command cmd)
390 {
391 struct netlink_ext_ack extack = {};
392 struct flow_block_offload bo;
393 int err;
394
395 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
396
397 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
398 if (err < 0)
399 return err;
400
401 return nft_block_setup(chain, &bo, cmd);
402 }
403
nft_indr_block_cleanup(struct flow_block_cb * block_cb)404 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
405 {
406 struct nft_base_chain *basechain = block_cb->indr.data;
407 struct net_device *dev = block_cb->indr.dev;
408 struct netlink_ext_ack extack = {};
409 struct nftables_pernet *nft_net;
410 struct net *net = dev_net(dev);
411 struct flow_block_offload bo;
412
413 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
414 basechain, &extack);
415 nft_net = nft_pernet(net);
416 mutex_lock(&nft_net->commit_mutex);
417 list_del(&block_cb->driver_list);
418 list_move(&block_cb->list, &bo.cb_list);
419 nft_flow_offload_unbind(&bo, basechain);
420 mutex_unlock(&nft_net->commit_mutex);
421 }
422
nft_indr_block_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)423 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
424 struct net_device *dev,
425 enum flow_block_command cmd)
426 {
427 struct netlink_ext_ack extack = {};
428 struct flow_block_offload bo;
429 int err;
430
431 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
432
433 err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
434 nft_indr_block_cleanup);
435 if (err < 0)
436 return err;
437
438 if (list_empty(&bo.cb_list))
439 return -EOPNOTSUPP;
440
441 return nft_block_setup(basechain, &bo, cmd);
442 }
443
nft_chain_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)444 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
445 struct net_device *dev,
446 enum flow_block_command cmd)
447 {
448 int err;
449
450 if (dev->netdev_ops->ndo_setup_tc)
451 err = nft_block_offload_cmd(basechain, dev, cmd);
452 else
453 err = nft_indr_block_offload_cmd(basechain, dev, cmd);
454
455 return err;
456 }
457
nft_flow_block_chain(struct nft_base_chain * basechain,const struct net_device * this_dev,enum flow_block_command cmd)458 static int nft_flow_block_chain(struct nft_base_chain *basechain,
459 const struct net_device *this_dev,
460 enum flow_block_command cmd)
461 {
462 struct nf_hook_ops *ops;
463 struct nft_hook *hook;
464 int err, i = 0;
465
466 list_for_each_entry(hook, &basechain->hook_list, list) {
467 list_for_each_entry(ops, &hook->ops_list, list) {
468 if (this_dev && this_dev != ops->dev)
469 continue;
470
471 err = nft_chain_offload_cmd(basechain, ops->dev, cmd);
472 if (err < 0 && cmd == FLOW_BLOCK_BIND) {
473 if (!this_dev)
474 goto err_flow_block;
475
476 return err;
477 }
478 i++;
479 }
480 }
481
482 return 0;
483
484 err_flow_block:
485 list_for_each_entry(hook, &basechain->hook_list, list) {
486 list_for_each_entry(ops, &hook->ops_list, list) {
487 if (i-- <= 0)
488 break;
489
490 nft_chain_offload_cmd(basechain, ops->dev,
491 FLOW_BLOCK_UNBIND);
492 }
493 }
494 return err;
495 }
496
nft_flow_offload_chain(struct nft_chain * chain,u8 * ppolicy,enum flow_block_command cmd)497 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
498 enum flow_block_command cmd)
499 {
500 struct nft_base_chain *basechain;
501 u8 policy;
502
503 if (!nft_is_base_chain(chain))
504 return -EOPNOTSUPP;
505
506 basechain = nft_base_chain(chain);
507 policy = ppolicy ? *ppolicy : basechain->policy;
508
509 /* Only default policy to accept is supported for now. */
510 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
511 return -EOPNOTSUPP;
512
513 return nft_flow_block_chain(basechain, NULL, cmd);
514 }
515
nft_flow_rule_offload_abort(struct net * net,struct nft_trans * trans)516 static void nft_flow_rule_offload_abort(struct net *net,
517 struct nft_trans *trans)
518 {
519 struct nftables_pernet *nft_net = nft_pernet(net);
520 int err = 0;
521
522 list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
523 if (trans->table->family != NFPROTO_NETDEV)
524 continue;
525
526 switch (trans->msg_type) {
527 case NFT_MSG_NEWCHAIN:
528 if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
529 nft_trans_chain_update(trans))
530 continue;
531
532 err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
533 FLOW_BLOCK_UNBIND);
534 break;
535 case NFT_MSG_DELCHAIN:
536 if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
537 continue;
538
539 err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
540 FLOW_BLOCK_BIND);
541 break;
542 case NFT_MSG_NEWRULE:
543 if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
544 continue;
545
546 err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
547 nft_trans_rule(trans),
548 NULL, FLOW_CLS_DESTROY);
549 break;
550 case NFT_MSG_DELRULE:
551 if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
552 continue;
553
554 err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
555 nft_trans_rule(trans),
556 nft_trans_flow_rule(trans),
557 FLOW_CLS_REPLACE);
558 break;
559 }
560
561 if (WARN_ON_ONCE(err))
562 break;
563 }
564 }
565
nft_flow_rule_offload_commit(struct net * net)566 int nft_flow_rule_offload_commit(struct net *net)
567 {
568 struct nftables_pernet *nft_net = nft_pernet(net);
569 struct nft_trans *trans;
570 int err = 0;
571 u8 policy;
572
573 list_for_each_entry(trans, &nft_net->commit_list, list) {
574 if (trans->table->family != NFPROTO_NETDEV)
575 continue;
576
577 switch (trans->msg_type) {
578 case NFT_MSG_NEWCHAIN:
579 if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
580 nft_trans_chain_update(trans))
581 continue;
582
583 policy = nft_trans_chain_policy(trans);
584 err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
585 FLOW_BLOCK_BIND);
586 break;
587 case NFT_MSG_DELCHAIN:
588 if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
589 continue;
590
591 policy = nft_trans_chain_policy(trans);
592 err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
593 FLOW_BLOCK_UNBIND);
594 break;
595 case NFT_MSG_NEWRULE:
596 if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
597 continue;
598
599 if (trans->flags & NLM_F_REPLACE ||
600 !(trans->flags & NLM_F_APPEND)) {
601 err = -EOPNOTSUPP;
602 break;
603 }
604 err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
605 nft_trans_rule(trans),
606 nft_trans_flow_rule(trans),
607 FLOW_CLS_REPLACE);
608 break;
609 case NFT_MSG_DELRULE:
610 if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
611 continue;
612
613 err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
614 nft_trans_rule(trans),
615 NULL, FLOW_CLS_DESTROY);
616 break;
617 }
618
619 if (err) {
620 nft_flow_rule_offload_abort(net, trans);
621 break;
622 }
623 }
624
625 return err;
626 }
627
__nft_offload_get_chain(const struct nftables_pernet * nft_net,struct net_device * dev)628 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
629 struct net_device *dev)
630 {
631 struct nft_base_chain *basechain;
632 struct nft_hook *hook, *found;
633 const struct nft_table *table;
634 struct nft_chain *chain;
635
636 list_for_each_entry(table, &nft_net->tables, list) {
637 if (table->family != NFPROTO_NETDEV)
638 continue;
639
640 list_for_each_entry(chain, &table->chains, list) {
641 if (!nft_is_base_chain(chain) ||
642 !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
643 continue;
644
645 found = NULL;
646 basechain = nft_base_chain(chain);
647 list_for_each_entry(hook, &basechain->hook_list, list) {
648 if (!nft_hook_find_ops(hook, dev))
649 continue;
650
651 found = hook;
652 break;
653 }
654 if (!found)
655 continue;
656
657 return chain;
658 }
659 }
660
661 return NULL;
662 }
663
nft_offload_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)664 static int nft_offload_netdev_event(struct notifier_block *this,
665 unsigned long event, void *ptr)
666 {
667 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
668 struct nftables_pernet *nft_net;
669 struct net *net = dev_net(dev);
670 struct nft_chain *chain;
671
672 if (event != NETDEV_UNREGISTER)
673 return NOTIFY_DONE;
674
675 nft_net = nft_pernet(net);
676 mutex_lock(&nft_net->commit_mutex);
677 chain = __nft_offload_get_chain(nft_net, dev);
678 if (chain)
679 nft_flow_block_chain(nft_base_chain(chain), dev,
680 FLOW_BLOCK_UNBIND);
681
682 mutex_unlock(&nft_net->commit_mutex);
683
684 return NOTIFY_DONE;
685 }
686
687 static struct notifier_block nft_offload_netdev_notifier = {
688 .notifier_call = nft_offload_netdev_event,
689 };
690
nft_offload_init(void)691 int nft_offload_init(void)
692 {
693 return register_netdevice_notifier(&nft_offload_netdev_notifier);
694 }
695
nft_offload_exit(void)696 void nft_offload_exit(void)
697 {
698 unregister_netdevice_notifier(&nft_offload_netdev_notifier);
699 }
700