1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright 2020 NXP */
3
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/errno.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/tc_act/tc_gate.h>
17 #include <net/tc_wrapper.h>
18
19 static struct tc_action_ops act_gate_ops;
20
gate_get_time(struct tcf_gate * gact)21 static ktime_t gate_get_time(struct tcf_gate *gact)
22 {
23 ktime_t mono = ktime_get();
24
25 switch (gact->tk_offset) {
26 case TK_OFFS_MAX:
27 return mono;
28 default:
29 return ktime_mono_to_any(mono, gact->tk_offset);
30 }
31
32 return KTIME_MAX;
33 }
34
gate_get_start_time(struct tcf_gate * gact,ktime_t * start)35 static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
36 {
37 struct tcf_gate_params *param = &gact->param;
38 ktime_t now, base, cycle;
39 u64 n;
40
41 base = ns_to_ktime(param->tcfg_basetime);
42 now = gate_get_time(gact);
43
44 if (ktime_after(base, now)) {
45 *start = base;
46 return;
47 }
48
49 cycle = param->tcfg_cycletime;
50
51 n = div64_u64(ktime_sub_ns(now, base), cycle);
52 *start = ktime_add_ns(base, (n + 1) * cycle);
53 }
54
gate_start_timer(struct tcf_gate * gact,ktime_t start)55 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
56 {
57 ktime_t expires;
58
59 expires = hrtimer_get_expires(&gact->hitimer);
60 if (expires == 0)
61 expires = KTIME_MAX;
62
63 start = min_t(ktime_t, start, expires);
64
65 hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT);
66 }
67
gate_timer_func(struct hrtimer * timer)68 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
69 {
70 struct tcf_gate *gact = container_of(timer, struct tcf_gate,
71 hitimer);
72 struct tcf_gate_params *p = &gact->param;
73 struct tcfg_gate_entry *next;
74 ktime_t close_time, now;
75
76 spin_lock(&gact->tcf_lock);
77
78 next = gact->next_entry;
79
80 /* cycle start, clear pending bit, clear total octets */
81 gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
82 gact->current_entry_octets = 0;
83 gact->current_max_octets = next->maxoctets;
84
85 gact->current_close_time = ktime_add_ns(gact->current_close_time,
86 next->interval);
87
88 close_time = gact->current_close_time;
89
90 if (list_is_last(&next->list, &p->entries))
91 next = list_first_entry(&p->entries,
92 struct tcfg_gate_entry, list);
93 else
94 next = list_next_entry(next, list);
95
96 now = gate_get_time(gact);
97
98 if (ktime_after(now, close_time)) {
99 ktime_t cycle, base;
100 u64 n;
101
102 cycle = p->tcfg_cycletime;
103 base = ns_to_ktime(p->tcfg_basetime);
104 n = div64_u64(ktime_sub_ns(now, base), cycle);
105 close_time = ktime_add_ns(base, (n + 1) * cycle);
106 }
107
108 gact->next_entry = next;
109
110 hrtimer_set_expires(&gact->hitimer, close_time);
111
112 spin_unlock(&gact->tcf_lock);
113
114 return HRTIMER_RESTART;
115 }
116
tcf_gate_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)117 TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb,
118 const struct tc_action *a,
119 struct tcf_result *res)
120 {
121 struct tcf_gate *gact = to_gate(a);
122 int action = READ_ONCE(gact->tcf_action);
123
124 tcf_lastuse_update(&gact->tcf_tm);
125 tcf_action_update_bstats(&gact->common, skb);
126
127 spin_lock(&gact->tcf_lock);
128 if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
129 spin_unlock(&gact->tcf_lock);
130 return action;
131 }
132
133 if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) {
134 spin_unlock(&gact->tcf_lock);
135 goto drop;
136 }
137
138 if (gact->current_max_octets >= 0) {
139 gact->current_entry_octets += qdisc_pkt_len(skb);
140 if (gact->current_entry_octets > gact->current_max_octets) {
141 spin_unlock(&gact->tcf_lock);
142 goto overlimit;
143 }
144 }
145 spin_unlock(&gact->tcf_lock);
146
147 return action;
148
149 overlimit:
150 tcf_action_inc_overlimit_qstats(&gact->common);
151 drop:
152 tcf_action_inc_drop_qstats(&gact->common);
153 return TC_ACT_SHOT;
154 }
155
156 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
157 [TCA_GATE_ENTRY_INDEX] = { .type = NLA_U32 },
158 [TCA_GATE_ENTRY_GATE] = { .type = NLA_FLAG },
159 [TCA_GATE_ENTRY_INTERVAL] = { .type = NLA_U32 },
160 [TCA_GATE_ENTRY_IPV] = { .type = NLA_S32 },
161 [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 },
162 };
163
164 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
165 [TCA_GATE_PARMS] =
166 NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)),
167 [TCA_GATE_PRIORITY] = { .type = NLA_S32 },
168 [TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED },
169 [TCA_GATE_BASE_TIME] = { .type = NLA_U64 },
170 [TCA_GATE_CYCLE_TIME] = { .type = NLA_U64 },
171 [TCA_GATE_CYCLE_TIME_EXT] = { .type = NLA_U64 },
172 [TCA_GATE_FLAGS] = { .type = NLA_U32 },
173 [TCA_GATE_CLOCKID] = { .type = NLA_S32 },
174 };
175
fill_gate_entry(struct nlattr ** tb,struct tcfg_gate_entry * entry,struct netlink_ext_ack * extack)176 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
177 struct netlink_ext_ack *extack)
178 {
179 u32 interval = 0;
180
181 entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]);
182
183 if (tb[TCA_GATE_ENTRY_INTERVAL])
184 interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]);
185
186 if (interval == 0) {
187 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
188 return -EINVAL;
189 }
190
191 entry->interval = interval;
192
193 entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1);
194
195 entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS],
196 -1);
197
198 return 0;
199 }
200
parse_gate_entry(struct nlattr * n,struct tcfg_gate_entry * entry,int index,struct netlink_ext_ack * extack)201 static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry,
202 int index, struct netlink_ext_ack *extack)
203 {
204 struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
205 int err;
206
207 err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack);
208 if (err < 0) {
209 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
210 return -EINVAL;
211 }
212
213 entry->index = index;
214
215 return fill_gate_entry(tb, entry, extack);
216 }
217
release_entry_list(struct list_head * entries)218 static void release_entry_list(struct list_head *entries)
219 {
220 struct tcfg_gate_entry *entry, *e;
221
222 list_for_each_entry_safe(entry, e, entries, list) {
223 list_del(&entry->list);
224 kfree(entry);
225 }
226 }
227
parse_gate_list(struct nlattr * list_attr,struct tcf_gate_params * sched,struct netlink_ext_ack * extack)228 static int parse_gate_list(struct nlattr *list_attr,
229 struct tcf_gate_params *sched,
230 struct netlink_ext_ack *extack)
231 {
232 struct tcfg_gate_entry *entry;
233 struct nlattr *n;
234 int err, rem;
235 int i = 0;
236
237 if (!list_attr)
238 return -EINVAL;
239
240 nla_for_each_nested(n, list_attr, rem) {
241 if (nla_type(n) != TCA_GATE_ONE_ENTRY) {
242 NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'");
243 continue;
244 }
245
246 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
247 if (!entry) {
248 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
249 err = -ENOMEM;
250 goto release_list;
251 }
252
253 err = parse_gate_entry(n, entry, i, extack);
254 if (err < 0) {
255 kfree(entry);
256 goto release_list;
257 }
258
259 list_add_tail(&entry->list, &sched->entries);
260 i++;
261 }
262
263 sched->num_entries = i;
264
265 return i;
266
267 release_list:
268 release_entry_list(&sched->entries);
269
270 return err;
271 }
272
gate_setup_timer(struct tcf_gate * gact,u64 basetime,enum tk_offsets tko,s32 clockid,bool do_init)273 static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
274 enum tk_offsets tko, s32 clockid,
275 bool do_init)
276 {
277 if (!do_init) {
278 if (basetime == gact->param.tcfg_basetime &&
279 tko == gact->tk_offset &&
280 clockid == gact->param.tcfg_clockid)
281 return;
282
283 spin_unlock_bh(&gact->tcf_lock);
284 hrtimer_cancel(&gact->hitimer);
285 spin_lock_bh(&gact->tcf_lock);
286 }
287 gact->param.tcfg_basetime = basetime;
288 gact->param.tcfg_clockid = clockid;
289 gact->tk_offset = tko;
290 hrtimer_setup(&gact->hitimer, gate_timer_func, clockid, HRTIMER_MODE_ABS_SOFT);
291 }
292
tcf_gate_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)293 static int tcf_gate_init(struct net *net, struct nlattr *nla,
294 struct nlattr *est, struct tc_action **a,
295 struct tcf_proto *tp, u32 flags,
296 struct netlink_ext_ack *extack)
297 {
298 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
299 enum tk_offsets tk_offset = TK_OFFS_TAI;
300 bool bind = flags & TCA_ACT_FLAGS_BIND;
301 struct nlattr *tb[TCA_GATE_MAX + 1];
302 struct tcf_chain *goto_ch = NULL;
303 u64 cycletime = 0, basetime = 0;
304 struct tcf_gate_params *p;
305 s32 clockid = CLOCK_TAI;
306 struct tcf_gate *gact;
307 struct tc_gate *parm;
308 int ret = 0, err;
309 u32 gflags = 0;
310 s32 prio = -1;
311 ktime_t start;
312 u32 index;
313
314 if (!nla)
315 return -EINVAL;
316
317 err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack);
318 if (err < 0)
319 return err;
320
321 if (!tb[TCA_GATE_PARMS])
322 return -EINVAL;
323
324 if (tb[TCA_GATE_CLOCKID]) {
325 clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
326 switch (clockid) {
327 case CLOCK_REALTIME:
328 tk_offset = TK_OFFS_REAL;
329 break;
330 case CLOCK_MONOTONIC:
331 tk_offset = TK_OFFS_MAX;
332 break;
333 case CLOCK_BOOTTIME:
334 tk_offset = TK_OFFS_BOOT;
335 break;
336 case CLOCK_TAI:
337 tk_offset = TK_OFFS_TAI;
338 break;
339 default:
340 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
341 return -EINVAL;
342 }
343 }
344
345 parm = nla_data(tb[TCA_GATE_PARMS]);
346 index = parm->index;
347
348 err = tcf_idr_check_alloc(tn, &index, a, bind);
349 if (err < 0)
350 return err;
351
352 if (err && bind)
353 return ACT_P_BOUND;
354
355 if (!err) {
356 ret = tcf_idr_create_from_flags(tn, index, est, a,
357 &act_gate_ops, bind, flags);
358 if (ret) {
359 tcf_idr_cleanup(tn, index);
360 return ret;
361 }
362
363 ret = ACT_P_CREATED;
364 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
365 tcf_idr_release(*a, bind);
366 return -EEXIST;
367 }
368
369 if (tb[TCA_GATE_PRIORITY])
370 prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
371
372 if (tb[TCA_GATE_BASE_TIME])
373 basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
374
375 if (tb[TCA_GATE_FLAGS])
376 gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
377
378 gact = to_gate(*a);
379 if (ret == ACT_P_CREATED)
380 INIT_LIST_HEAD(&gact->param.entries);
381
382 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
383 if (err < 0)
384 goto release_idr;
385
386 spin_lock_bh(&gact->tcf_lock);
387 p = &gact->param;
388
389 if (tb[TCA_GATE_CYCLE_TIME])
390 cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
391
392 if (tb[TCA_GATE_ENTRY_LIST]) {
393 err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
394 if (err < 0)
395 goto chain_put;
396 }
397
398 if (!cycletime) {
399 struct tcfg_gate_entry *entry;
400 ktime_t cycle = 0;
401
402 list_for_each_entry(entry, &p->entries, list)
403 cycle = ktime_add_ns(cycle, entry->interval);
404 cycletime = cycle;
405 if (!cycletime) {
406 err = -EINVAL;
407 goto chain_put;
408 }
409 }
410 p->tcfg_cycletime = cycletime;
411
412 if (tb[TCA_GATE_CYCLE_TIME_EXT])
413 p->tcfg_cycletime_ext =
414 nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
415
416 gate_setup_timer(gact, basetime, tk_offset, clockid,
417 ret == ACT_P_CREATED);
418 p->tcfg_priority = prio;
419 p->tcfg_flags = gflags;
420 gate_get_start_time(gact, &start);
421
422 gact->current_close_time = start;
423 gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
424
425 gact->next_entry = list_first_entry(&p->entries,
426 struct tcfg_gate_entry, list);
427
428 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
429
430 gate_start_timer(gact, start);
431
432 spin_unlock_bh(&gact->tcf_lock);
433
434 if (goto_ch)
435 tcf_chain_put_by_act(goto_ch);
436
437 return ret;
438
439 chain_put:
440 spin_unlock_bh(&gact->tcf_lock);
441
442 if (goto_ch)
443 tcf_chain_put_by_act(goto_ch);
444 release_idr:
445 /* action is not inserted in any list: it's safe to init hitimer
446 * without taking tcf_lock.
447 */
448 if (ret == ACT_P_CREATED)
449 gate_setup_timer(gact, gact->param.tcfg_basetime,
450 gact->tk_offset, gact->param.tcfg_clockid,
451 true);
452 tcf_idr_release(*a, bind);
453 return err;
454 }
455
tcf_gate_cleanup(struct tc_action * a)456 static void tcf_gate_cleanup(struct tc_action *a)
457 {
458 struct tcf_gate *gact = to_gate(a);
459 struct tcf_gate_params *p;
460
461 p = &gact->param;
462 hrtimer_cancel(&gact->hitimer);
463 release_entry_list(&p->entries);
464 }
465
dumping_entry(struct sk_buff * skb,struct tcfg_gate_entry * entry)466 static int dumping_entry(struct sk_buff *skb,
467 struct tcfg_gate_entry *entry)
468 {
469 struct nlattr *item;
470
471 item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY);
472 if (!item)
473 return -ENOSPC;
474
475 if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
476 goto nla_put_failure;
477
478 if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
479 goto nla_put_failure;
480
481 if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval))
482 goto nla_put_failure;
483
484 if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets))
485 goto nla_put_failure;
486
487 if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv))
488 goto nla_put_failure;
489
490 return nla_nest_end(skb, item);
491
492 nla_put_failure:
493 nla_nest_cancel(skb, item);
494 return -1;
495 }
496
tcf_gate_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)497 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
498 int bind, int ref)
499 {
500 unsigned char *b = skb_tail_pointer(skb);
501 struct tcf_gate *gact = to_gate(a);
502 struct tc_gate opt = {
503 .index = gact->tcf_index,
504 .refcnt = refcount_read(&gact->tcf_refcnt) - ref,
505 .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind,
506 };
507 struct tcfg_gate_entry *entry;
508 struct tcf_gate_params *p;
509 struct nlattr *entry_list;
510 struct tcf_t t;
511
512 spin_lock_bh(&gact->tcf_lock);
513 opt.action = gact->tcf_action;
514
515 p = &gact->param;
516
517 if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
518 goto nla_put_failure;
519
520 if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
521 p->tcfg_basetime, TCA_GATE_PAD))
522 goto nla_put_failure;
523
524 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
525 p->tcfg_cycletime, TCA_GATE_PAD))
526 goto nla_put_failure;
527
528 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
529 p->tcfg_cycletime_ext, TCA_GATE_PAD))
530 goto nla_put_failure;
531
532 if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
533 goto nla_put_failure;
534
535 if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
536 goto nla_put_failure;
537
538 if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
539 goto nla_put_failure;
540
541 entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
542 if (!entry_list)
543 goto nla_put_failure;
544
545 list_for_each_entry(entry, &p->entries, list) {
546 if (dumping_entry(skb, entry) < 0)
547 goto nla_put_failure;
548 }
549
550 nla_nest_end(skb, entry_list);
551
552 tcf_tm_dump(&t, &gact->tcf_tm);
553 if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
554 goto nla_put_failure;
555 spin_unlock_bh(&gact->tcf_lock);
556
557 return skb->len;
558
559 nla_put_failure:
560 spin_unlock_bh(&gact->tcf_lock);
561 nlmsg_trim(skb, b);
562 return -1;
563 }
564
tcf_gate_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)565 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
566 u64 drops, u64 lastuse, bool hw)
567 {
568 struct tcf_gate *gact = to_gate(a);
569 struct tcf_t *tm = &gact->tcf_tm;
570
571 tcf_action_update_stats(a, bytes, packets, drops, hw);
572 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
573 }
574
tcf_gate_get_fill_size(const struct tc_action * act)575 static size_t tcf_gate_get_fill_size(const struct tc_action *act)
576 {
577 return nla_total_size(sizeof(struct tc_gate));
578 }
579
tcf_gate_entry_destructor(void * priv)580 static void tcf_gate_entry_destructor(void *priv)
581 {
582 struct action_gate_entry *oe = priv;
583
584 kfree(oe);
585 }
586
tcf_gate_get_entries(struct flow_action_entry * entry,const struct tc_action * act)587 static int tcf_gate_get_entries(struct flow_action_entry *entry,
588 const struct tc_action *act)
589 {
590 entry->gate.entries = tcf_gate_get_list(act);
591
592 if (!entry->gate.entries)
593 return -EINVAL;
594
595 entry->destructor = tcf_gate_entry_destructor;
596 entry->destructor_priv = entry->gate.entries;
597
598 return 0;
599 }
600
tcf_gate_offload_act_setup(struct tc_action * act,void * entry_data,u32 * index_inc,bool bind,struct netlink_ext_ack * extack)601 static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data,
602 u32 *index_inc, bool bind,
603 struct netlink_ext_ack *extack)
604 {
605 int err;
606
607 if (bind) {
608 struct flow_action_entry *entry = entry_data;
609
610 entry->id = FLOW_ACTION_GATE;
611 entry->gate.prio = tcf_gate_prio(act);
612 entry->gate.basetime = tcf_gate_basetime(act);
613 entry->gate.cycletime = tcf_gate_cycletime(act);
614 entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
615 entry->gate.num_entries = tcf_gate_num_entries(act);
616 err = tcf_gate_get_entries(entry, act);
617 if (err)
618 return err;
619 *index_inc = 1;
620 } else {
621 struct flow_offload_action *fl_action = entry_data;
622
623 fl_action->id = FLOW_ACTION_GATE;
624 }
625
626 return 0;
627 }
628
629 static struct tc_action_ops act_gate_ops = {
630 .kind = "gate",
631 .id = TCA_ID_GATE,
632 .owner = THIS_MODULE,
633 .act = tcf_gate_act,
634 .dump = tcf_gate_dump,
635 .init = tcf_gate_init,
636 .cleanup = tcf_gate_cleanup,
637 .stats_update = tcf_gate_stats_update,
638 .get_fill_size = tcf_gate_get_fill_size,
639 .offload_act_setup = tcf_gate_offload_act_setup,
640 .size = sizeof(struct tcf_gate),
641 };
642 MODULE_ALIAS_NET_ACT("gate");
643
gate_init_net(struct net * net)644 static __net_init int gate_init_net(struct net *net)
645 {
646 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
647
648 return tc_action_net_init(net, tn, &act_gate_ops);
649 }
650
gate_exit_net(struct list_head * net_list)651 static void __net_exit gate_exit_net(struct list_head *net_list)
652 {
653 tc_action_net_exit(net_list, act_gate_ops.net_id);
654 }
655
656 static struct pernet_operations gate_net_ops = {
657 .init = gate_init_net,
658 .exit_batch = gate_exit_net,
659 .id = &act_gate_ops.net_id,
660 .size = sizeof(struct tc_action_net),
661 };
662
gate_init_module(void)663 static int __init gate_init_module(void)
664 {
665 return tcf_register_action(&act_gate_ops, &gate_net_ops);
666 }
667
gate_cleanup_module(void)668 static void __exit gate_cleanup_module(void)
669 {
670 tcf_unregister_action(&act_gate_ops, &gate_net_ops);
671 }
672
673 module_init(gate_init_module);
674 module_exit(gate_cleanup_module);
675 MODULE_DESCRIPTION("TC gate action");
676 MODULE_LICENSE("GPL v2");
677