1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * xt_hashlimit - Netfilter module to limit the number of packets per time 4 * separately for each hashbucket (sourceip/sourceport/dstip/dstport) 5 * 6 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> 7 * (C) 2006-2012 Patrick McHardy <kaber@trash.net> 8 * Copyright © CC Computer Consultants GmbH, 2007 - 2008 9 * 10 * Development of this code was funded by Astaro AG, http://www.astaro.com/ 11 */ 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 #include <linux/module.h> 14 #include <linux/spinlock.h> 15 #include <linux/random.h> 16 #include <linux/jhash.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/list.h> 22 #include <linux/skbuff.h> 23 #include <linux/mm.h> 24 #include <linux/in.h> 25 #include <linux/ip.h> 26 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 27 #include <linux/ipv6.h> 28 #include <net/ipv6.h> 29 #endif 30 31 #include <net/net_namespace.h> 32 #include <net/netns/generic.h> 33 34 #include <linux/netfilter/x_tables.h> 35 #include <linux/netfilter_ipv4/ip_tables.h> 36 #include <linux/netfilter_ipv6/ip6_tables.h> 37 #include <linux/mutex.h> 38 #include <linux/kernel.h> 39 #include <linux/refcount.h> 40 #include <uapi/linux/netfilter/xt_hashlimit.h> 41 42 #define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ 43 XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \ 44 XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\ 45 XT_HASHLIMIT_RATE_MATCH) 46 47 MODULE_LICENSE("GPL"); 48 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 49 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); 50 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); 51 MODULE_ALIAS("ipt_hashlimit"); 52 MODULE_ALIAS("ip6t_hashlimit"); 53 54 struct hashlimit_net { 55 struct hlist_head htables; 56 struct proc_dir_entry *ipt_hashlimit; 57 struct proc_dir_entry *ip6t_hashlimit; 58 }; 59 60 static unsigned int hashlimit_net_id; 61 static inline struct hashlimit_net *hashlimit_pernet(struct net *net) 62 { 63 return net_generic(net, hashlimit_net_id); 64 } 65 66 /* need to declare this at the top */ 67 static const struct seq_operations dl_seq_ops_v2; 68 static const struct seq_operations dl_seq_ops_v1; 69 static const struct seq_operations dl_seq_ops; 70 71 /* hash table crap */ 72 struct dsthash_dst { 73 union { 74 struct { 75 __be32 src; 76 __be32 dst; 77 } ip; 78 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 79 struct { 80 __be32 src[4]; 81 __be32 dst[4]; 82 } ip6; 83 #endif 84 }; 85 __be16 src_port; 86 __be16 dst_port; 87 }; 88 89 struct dsthash_ent { 90 /* static / read-only parts in the beginning */ 91 struct hlist_node node; 92 struct dsthash_dst dst; 93 94 /* modified structure members in the end */ 95 spinlock_t lock; 96 unsigned long expires; /* precalculated expiry time */ 97 struct { 98 unsigned long prev; /* last modification */ 99 union { 100 struct { 101 u_int64_t credit; 102 u_int64_t credit_cap; 103 u_int64_t cost; 104 }; 105 struct { 106 u_int32_t interval, prev_window; 107 u_int64_t current_rate; 108 u_int64_t rate; 109 int64_t burst; 110 }; 111 }; 112 } rateinfo; 113 struct rcu_head rcu; 114 }; 115 116 struct xt_hashlimit_htable { 117 struct hlist_node node; /* global list of all htables */ 118 refcount_t use; 119 u_int8_t family; 120 bool rnd_initialized; 121 122 struct hashlimit_cfg3 cfg; /* config */ 123 124 /* used internally */ 125 spinlock_t lock; /* lock for list_head */ 126 u_int32_t rnd; /* random seed for hash */ 127 unsigned int count; /* number entries in table */ 128 struct delayed_work gc_work; 129 130 /* seq_file stuff */ 131 struct proc_dir_entry *pde; 132 const char *name; 133 struct net *net; 134 135 struct hlist_head hash[]; /* hashtable itself */ 136 }; 137 138 static int 139 cfg_copy(struct hashlimit_cfg3 *to, const void *from, int revision) 140 { 141 if (revision == 1) { 142 struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from; 143 144 to->mode = cfg->mode; 145 to->avg = cfg->avg; 146 to->burst = cfg->burst; 147 to->size = cfg->size; 148 to->max = cfg->max; 149 to->gc_interval = cfg->gc_interval; 150 to->expire = cfg->expire; 151 to->srcmask = cfg->srcmask; 152 to->dstmask = cfg->dstmask; 153 } else if (revision == 2) { 154 struct hashlimit_cfg2 *cfg = (struct hashlimit_cfg2 *)from; 155 156 to->mode = cfg->mode; 157 to->avg = cfg->avg; 158 to->burst = cfg->burst; 159 to->size = cfg->size; 160 to->max = cfg->max; 161 to->gc_interval = cfg->gc_interval; 162 to->expire = cfg->expire; 163 to->srcmask = cfg->srcmask; 164 to->dstmask = cfg->dstmask; 165 } else if (revision == 3) { 166 memcpy(to, from, sizeof(struct hashlimit_cfg3)); 167 } else { 168 return -EINVAL; 169 } 170 171 return 0; 172 } 173 174 static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */ 175 static struct kmem_cache *hashlimit_cachep __read_mostly; 176 177 static inline bool dst_cmp(const struct dsthash_ent *ent, 178 const struct dsthash_dst *b) 179 { 180 return !memcmp(&ent->dst, b, sizeof(ent->dst)); 181 } 182 183 static u_int32_t 184 hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) 185 { 186 u_int32_t hash = jhash2((const u32 *)dst, 187 sizeof(*dst)/sizeof(u32), 188 ht->rnd); 189 /* 190 * Instead of returning hash % ht->cfg.size (implying a divide) 191 * we return the high 32 bits of the (hash * ht->cfg.size) that will 192 * give results between [0 and cfg.size-1] and same hash distribution, 193 * but using a multiply, less expensive than a divide 194 */ 195 return reciprocal_scale(hash, ht->cfg.size); 196 } 197 198 static struct dsthash_ent * 199 dsthash_find(const struct xt_hashlimit_htable *ht, 200 const struct dsthash_dst *dst) 201 { 202 struct dsthash_ent *ent; 203 u_int32_t hash = hash_dst(ht, dst); 204 205 if (!hlist_empty(&ht->hash[hash])) { 206 hlist_for_each_entry_rcu(ent, &ht->hash[hash], node) 207 if (dst_cmp(ent, dst)) { 208 spin_lock(&ent->lock); 209 return ent; 210 } 211 } 212 return NULL; 213 } 214 215 /* allocate dsthash_ent, initialize dst, put in htable and lock it */ 216 static struct dsthash_ent * 217 dsthash_alloc_init(struct xt_hashlimit_htable *ht, 218 const struct dsthash_dst *dst, bool *race) 219 { 220 struct dsthash_ent *ent; 221 222 spin_lock(&ht->lock); 223 224 /* Two or more packets may race to create the same entry in the 225 * hashtable, double check if this packet lost race. 226 */ 227 ent = dsthash_find(ht, dst); 228 if (ent != NULL) { 229 spin_unlock(&ht->lock); 230 *race = true; 231 return ent; 232 } 233 234 /* initialize hash with random val at the time we allocate 235 * the first hashtable entry */ 236 if (unlikely(!ht->rnd_initialized)) { 237 get_random_bytes(&ht->rnd, sizeof(ht->rnd)); 238 ht->rnd_initialized = true; 239 } 240 241 if (ht->cfg.max && ht->count >= ht->cfg.max) { 242 /* FIXME: do something. question is what.. */ 243 net_err_ratelimited("max count of %u reached\n", ht->cfg.max); 244 ent = NULL; 245 } else 246 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); 247 if (ent) { 248 memcpy(&ent->dst, dst, sizeof(ent->dst)); 249 spin_lock_init(&ent->lock); 250 251 spin_lock(&ent->lock); 252 hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]); 253 ht->count++; 254 } 255 spin_unlock(&ht->lock); 256 return ent; 257 } 258 259 static void dsthash_free_rcu(struct rcu_head *head) 260 { 261 struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu); 262 263 kmem_cache_free(hashlimit_cachep, ent); 264 } 265 266 static inline void 267 dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) 268 { 269 hlist_del_rcu(&ent->node); 270 call_rcu(&ent->rcu, dsthash_free_rcu); 271 ht->count--; 272 } 273 static void htable_gc(struct work_struct *work); 274 275 static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, 276 const char *name, u_int8_t family, 277 struct xt_hashlimit_htable **out_hinfo, 278 int revision) 279 { 280 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 281 struct xt_hashlimit_htable *hinfo; 282 const struct seq_operations *ops; 283 unsigned int size, i; 284 unsigned long nr_pages = totalram_pages(); 285 int ret; 286 287 if (cfg->size) { 288 size = cfg->size; 289 } else { 290 size = (nr_pages << PAGE_SHIFT) / 16384 / 291 sizeof(struct hlist_head); 292 if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE) 293 size = 8192; 294 if (size < 16) 295 size = 16; 296 } 297 /* FIXME: don't use vmalloc() here or anywhere else -HW */ 298 hinfo = vmalloc(struct_size(hinfo, hash, size)); 299 if (hinfo == NULL) 300 return -ENOMEM; 301 *out_hinfo = hinfo; 302 303 /* copy match config into hashtable config */ 304 ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3); 305 if (ret) { 306 vfree(hinfo); 307 return ret; 308 } 309 310 hinfo->cfg.size = size; 311 if (hinfo->cfg.max == 0) 312 hinfo->cfg.max = 8 * hinfo->cfg.size; 313 else if (hinfo->cfg.max < hinfo->cfg.size) 314 hinfo->cfg.max = hinfo->cfg.size; 315 316 for (i = 0; i < hinfo->cfg.size; i++) 317 INIT_HLIST_HEAD(&hinfo->hash[i]); 318 319 refcount_set(&hinfo->use, 1); 320 hinfo->count = 0; 321 hinfo->family = family; 322 hinfo->rnd_initialized = false; 323 hinfo->name = kstrdup(name, GFP_KERNEL); 324 if (!hinfo->name) { 325 vfree(hinfo); 326 return -ENOMEM; 327 } 328 spin_lock_init(&hinfo->lock); 329 330 switch (revision) { 331 case 1: 332 ops = &dl_seq_ops_v1; 333 break; 334 case 2: 335 ops = &dl_seq_ops_v2; 336 break; 337 default: 338 ops = &dl_seq_ops; 339 } 340 341 hinfo->pde = proc_create_seq_data(name, 0, 342 (family == NFPROTO_IPV4) ? 343 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit, 344 ops, hinfo); 345 if (hinfo->pde == NULL) { 346 kfree(hinfo->name); 347 vfree(hinfo); 348 return -ENOMEM; 349 } 350 hinfo->net = net; 351 352 INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc); 353 queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work, 354 msecs_to_jiffies(hinfo->cfg.gc_interval)); 355 356 hlist_add_head(&hinfo->node, &hashlimit_net->htables); 357 358 return 0; 359 } 360 361 static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all) 362 { 363 unsigned int i; 364 365 for (i = 0; i < ht->cfg.size; i++) { 366 struct hlist_head *head = &ht->hash[i]; 367 struct dsthash_ent *dh; 368 struct hlist_node *n; 369 370 if (hlist_empty(head)) 371 continue; 372 373 spin_lock_bh(&ht->lock); 374 hlist_for_each_entry_safe(dh, n, head, node) { 375 if (time_after_eq(jiffies, dh->expires) || select_all) 376 dsthash_free(ht, dh); 377 } 378 spin_unlock_bh(&ht->lock); 379 cond_resched(); 380 } 381 } 382 383 static void htable_gc(struct work_struct *work) 384 { 385 struct xt_hashlimit_htable *ht; 386 387 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); 388 389 htable_selective_cleanup(ht, false); 390 391 queue_delayed_work(system_power_efficient_wq, 392 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval)); 393 } 394 395 static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) 396 { 397 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); 398 struct proc_dir_entry *parent; 399 400 if (hinfo->family == NFPROTO_IPV4) 401 parent = hashlimit_net->ipt_hashlimit; 402 else 403 parent = hashlimit_net->ip6t_hashlimit; 404 405 if (parent != NULL) 406 remove_proc_entry(hinfo->name, parent); 407 } 408 409 static struct xt_hashlimit_htable *htable_find_get(struct net *net, 410 const char *name, 411 u_int8_t family) 412 { 413 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 414 struct xt_hashlimit_htable *hinfo; 415 416 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) { 417 if (!strcmp(name, hinfo->name) && 418 hinfo->family == family) { 419 refcount_inc(&hinfo->use); 420 return hinfo; 421 } 422 } 423 return NULL; 424 } 425 426 static void htable_put(struct xt_hashlimit_htable *hinfo) 427 { 428 if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) { 429 hlist_del(&hinfo->node); 430 htable_remove_proc_entry(hinfo); 431 mutex_unlock(&hashlimit_mutex); 432 433 cancel_delayed_work_sync(&hinfo->gc_work); 434 htable_selective_cleanup(hinfo, true); 435 kfree(hinfo->name); 436 vfree(hinfo); 437 } 438 } 439 440 /* The algorithm used is the Simple Token Bucket Filter (TBF) 441 * see net/sched/sch_tbf.c in the linux source tree 442 */ 443 444 /* Rusty: This is my (non-mathematically-inclined) understanding of 445 this algorithm. The `average rate' in jiffies becomes your initial 446 amount of credit `credit' and the most credit you can ever have 447 `credit_cap'. The `peak rate' becomes the cost of passing the 448 test, `cost'. 449 450 `prev' tracks the last packet hit: you gain one credit per jiffy. 451 If you get credit balance more than this, the extra credit is 452 discarded. Every time the match passes, you lose `cost' credits; 453 if you don't have that many, the test fails. 454 455 See Alexey's formal explanation in net/sched/sch_tbf.c. 456 457 To get the maximum range, we multiply by this factor (ie. you get N 458 credits per jiffy). We want to allow a rate as low as 1 per day 459 (slowest userspace tool allows), which means 460 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. 461 */ 462 #define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24)) 463 #define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24)) 464 465 /* Repeated shift and or gives us all 1s, final shift and add 1 gives 466 * us the power of 2 below the theoretical max, so GCC simply does a 467 * shift. */ 468 #define _POW2_BELOW2(x) ((x)|((x)>>1)) 469 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2)) 470 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4)) 471 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8)) 472 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16)) 473 #define _POW2_BELOW64(x) (_POW2_BELOW32(x)|_POW2_BELOW32((x)>>32)) 474 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1) 475 #define POW2_BELOW64(x) ((_POW2_BELOW64(x)>>1) + 1) 476 477 #define CREDITS_PER_JIFFY POW2_BELOW64(MAX_CPJ) 478 #define CREDITS_PER_JIFFY_v1 POW2_BELOW32(MAX_CPJ_v1) 479 480 /* in byte mode, the lowest possible rate is one packet/second. 481 * credit_cap is used as a counter that tells us how many times we can 482 * refill the "credits available" counter when it becomes empty. 483 */ 484 #define MAX_CPJ_BYTES (0xFFFFFFFF / HZ) 485 #define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES) 486 487 static u32 xt_hashlimit_len_to_chunks(u32 len) 488 { 489 return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1; 490 } 491 492 /* Precision saver. */ 493 static u64 user2credits(u64 user, int revision) 494 { 495 u64 scale = (revision == 1) ? 496 XT_HASHLIMIT_SCALE : XT_HASHLIMIT_SCALE_v2; 497 u64 cpj = (revision == 1) ? 498 CREDITS_PER_JIFFY_v1 : CREDITS_PER_JIFFY; 499 500 /* Avoid overflow: divide the constant operands first */ 501 if (scale >= HZ * cpj) 502 return div64_u64(user, div64_u64(scale, HZ * cpj)); 503 504 return user * div64_u64(HZ * cpj, scale); 505 } 506 507 static u32 user2credits_byte(u32 user) 508 { 509 u64 us = user; 510 us *= HZ * CREDITS_PER_JIFFY_BYTES; 511 return (u32) (us >> 32); 512 } 513 514 static u64 user2rate(u64 user) 515 { 516 if (user != 0) { 517 return div64_u64(XT_HASHLIMIT_SCALE_v2, user); 518 } else { 519 pr_info_ratelimited("invalid rate from userspace: %llu\n", 520 user); 521 return 0; 522 } 523 } 524 525 static u64 user2rate_bytes(u32 user) 526 { 527 u64 r; 528 529 r = user ? U32_MAX / user : U32_MAX; 530 return (r - 1) << XT_HASHLIMIT_BYTE_SHIFT; 531 } 532 533 static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, 534 u32 mode, int revision) 535 { 536 unsigned long delta = now - dh->rateinfo.prev; 537 u64 cap, cpj; 538 539 if (delta == 0) 540 return; 541 542 if (revision >= 3 && mode & XT_HASHLIMIT_RATE_MATCH) { 543 u64 interval = dh->rateinfo.interval * HZ; 544 545 if (delta < interval) 546 return; 547 548 dh->rateinfo.prev = now; 549 dh->rateinfo.prev_window = 550 ((dh->rateinfo.current_rate * interval) > 551 (delta * dh->rateinfo.rate)); 552 dh->rateinfo.current_rate = 0; 553 554 return; 555 } 556 557 dh->rateinfo.prev = now; 558 559 if (mode & XT_HASHLIMIT_BYTES) { 560 u64 tmp = dh->rateinfo.credit; 561 dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta; 562 cap = CREDITS_PER_JIFFY_BYTES * HZ; 563 if (tmp >= dh->rateinfo.credit) {/* overflow */ 564 dh->rateinfo.credit = cap; 565 return; 566 } 567 } else { 568 cpj = (revision == 1) ? 569 CREDITS_PER_JIFFY_v1 : CREDITS_PER_JIFFY; 570 dh->rateinfo.credit += delta * cpj; 571 cap = dh->rateinfo.credit_cap; 572 } 573 if (dh->rateinfo.credit > cap) 574 dh->rateinfo.credit = cap; 575 } 576 577 static void rateinfo_init(struct dsthash_ent *dh, 578 struct xt_hashlimit_htable *hinfo, int revision) 579 { 580 dh->rateinfo.prev = jiffies; 581 if (revision >= 3 && hinfo->cfg.mode & XT_HASHLIMIT_RATE_MATCH) { 582 dh->rateinfo.prev_window = 0; 583 dh->rateinfo.current_rate = 0; 584 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { 585 dh->rateinfo.rate = 586 user2rate_bytes((u32)hinfo->cfg.avg); 587 if (hinfo->cfg.burst) 588 dh->rateinfo.burst = 589 hinfo->cfg.burst * dh->rateinfo.rate; 590 else 591 dh->rateinfo.burst = dh->rateinfo.rate; 592 } else { 593 dh->rateinfo.rate = user2rate(hinfo->cfg.avg); 594 dh->rateinfo.burst = 595 hinfo->cfg.burst + dh->rateinfo.rate; 596 } 597 dh->rateinfo.interval = hinfo->cfg.interval; 598 } else if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { 599 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; 600 dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg); 601 dh->rateinfo.credit_cap = hinfo->cfg.burst; 602 } else { 603 dh->rateinfo.credit = user2credits(hinfo->cfg.avg * 604 hinfo->cfg.burst, revision); 605 dh->rateinfo.cost = user2credits(hinfo->cfg.avg, revision); 606 dh->rateinfo.credit_cap = dh->rateinfo.credit; 607 } 608 } 609 610 static inline __be32 maskl(__be32 a, unsigned int l) 611 { 612 return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; 613 } 614 615 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 616 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) 617 { 618 switch (p) { 619 case 0 ... 31: 620 i[0] = maskl(i[0], p); 621 i[1] = i[2] = i[3] = 0; 622 break; 623 case 32 ... 63: 624 i[1] = maskl(i[1], p - 32); 625 i[2] = i[3] = 0; 626 break; 627 case 64 ... 95: 628 i[2] = maskl(i[2], p - 64); 629 i[3] = 0; 630 break; 631 case 96 ... 127: 632 i[3] = maskl(i[3], p - 96); 633 break; 634 case 128: 635 break; 636 } 637 } 638 #endif 639 640 static int 641 hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, 642 struct dsthash_dst *dst, 643 const struct sk_buff *skb, unsigned int protoff) 644 { 645 __be16 _ports[2], *ports; 646 u8 nexthdr; 647 int poff; 648 649 memset(dst, 0, sizeof(*dst)); 650 651 switch (hinfo->family) { 652 case NFPROTO_IPV4: 653 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) 654 dst->ip.dst = maskl(ip_hdr(skb)->daddr, 655 hinfo->cfg.dstmask); 656 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) 657 dst->ip.src = maskl(ip_hdr(skb)->saddr, 658 hinfo->cfg.srcmask); 659 660 if (!(hinfo->cfg.mode & 661 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) 662 return 0; 663 nexthdr = ip_hdr(skb)->protocol; 664 break; 665 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 666 case NFPROTO_IPV6: 667 { 668 __be16 frag_off; 669 670 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) { 671 memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr, 672 sizeof(dst->ip6.dst)); 673 hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask); 674 } 675 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) { 676 memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr, 677 sizeof(dst->ip6.src)); 678 hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask); 679 } 680 681 if (!(hinfo->cfg.mode & 682 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) 683 return 0; 684 nexthdr = ipv6_hdr(skb)->nexthdr; 685 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); 686 if ((int)protoff < 0) 687 return -1; 688 break; 689 } 690 #endif 691 default: 692 BUG(); 693 return 0; 694 } 695 696 poff = proto_ports_offset(nexthdr); 697 if (poff >= 0) { 698 ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports), 699 &_ports); 700 } else { 701 _ports[0] = _ports[1] = 0; 702 ports = _ports; 703 } 704 if (!ports) 705 return -1; 706 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT) 707 dst->src_port = ports[0]; 708 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT) 709 dst->dst_port = ports[1]; 710 return 0; 711 } 712 713 static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh) 714 { 715 u64 tmp = xt_hashlimit_len_to_chunks(len); 716 tmp = tmp * dh->rateinfo.cost; 717 718 if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ)) 719 tmp = CREDITS_PER_JIFFY_BYTES * HZ; 720 721 if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) { 722 dh->rateinfo.credit_cap--; 723 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; 724 } 725 return (u32) tmp; 726 } 727 728 static bool 729 hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par, 730 struct xt_hashlimit_htable *hinfo, 731 const struct hashlimit_cfg3 *cfg, int revision) 732 { 733 unsigned long now = jiffies; 734 struct dsthash_ent *dh; 735 struct dsthash_dst dst; 736 bool race = false; 737 u64 cost; 738 739 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) 740 goto hotdrop; 741 742 local_bh_disable(); 743 dh = dsthash_find(hinfo, &dst); 744 if (dh == NULL) { 745 dh = dsthash_alloc_init(hinfo, &dst, &race); 746 if (dh == NULL) { 747 local_bh_enable(); 748 goto hotdrop; 749 } else if (race) { 750 /* Already got an entry, update expiration timeout */ 751 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); 752 rateinfo_recalc(dh, now, hinfo->cfg.mode, revision); 753 } else { 754 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); 755 rateinfo_init(dh, hinfo, revision); 756 } 757 } else { 758 /* update expiration timeout */ 759 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); 760 rateinfo_recalc(dh, now, hinfo->cfg.mode, revision); 761 } 762 763 if (cfg->mode & XT_HASHLIMIT_RATE_MATCH) { 764 cost = (cfg->mode & XT_HASHLIMIT_BYTES) ? skb->len : 1; 765 dh->rateinfo.current_rate += cost; 766 767 if (!dh->rateinfo.prev_window && 768 (dh->rateinfo.current_rate <= dh->rateinfo.burst)) { 769 spin_unlock(&dh->lock); 770 local_bh_enable(); 771 return !(cfg->mode & XT_HASHLIMIT_INVERT); 772 } else { 773 goto overlimit; 774 } 775 } 776 777 if (cfg->mode & XT_HASHLIMIT_BYTES) 778 cost = hashlimit_byte_cost(skb->len, dh); 779 else 780 cost = dh->rateinfo.cost; 781 782 if (dh->rateinfo.credit >= cost) { 783 /* below the limit */ 784 dh->rateinfo.credit -= cost; 785 spin_unlock(&dh->lock); 786 local_bh_enable(); 787 return !(cfg->mode & XT_HASHLIMIT_INVERT); 788 } 789 790 overlimit: 791 spin_unlock(&dh->lock); 792 local_bh_enable(); 793 /* default match is underlimit - so over the limit, we need to invert */ 794 return cfg->mode & XT_HASHLIMIT_INVERT; 795 796 hotdrop: 797 par->hotdrop = true; 798 return false; 799 } 800 801 static bool 802 hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) 803 { 804 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 805 struct xt_hashlimit_htable *hinfo = info->hinfo; 806 struct hashlimit_cfg3 cfg = {}; 807 int ret; 808 809 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 810 if (ret) 811 return ret; 812 813 return hashlimit_mt_common(skb, par, hinfo, &cfg, 1); 814 } 815 816 static bool 817 hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par) 818 { 819 const struct xt_hashlimit_mtinfo2 *info = par->matchinfo; 820 struct xt_hashlimit_htable *hinfo = info->hinfo; 821 struct hashlimit_cfg3 cfg = {}; 822 int ret; 823 824 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 825 if (ret) 826 return ret; 827 828 return hashlimit_mt_common(skb, par, hinfo, &cfg, 2); 829 } 830 831 static bool 832 hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) 833 { 834 const struct xt_hashlimit_mtinfo3 *info = par->matchinfo; 835 struct xt_hashlimit_htable *hinfo = info->hinfo; 836 837 return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3); 838 } 839 840 #define HASHLIMIT_MAX_SIZE 1048576 841 842 static int hashlimit_mt_check_common(const struct xt_mtchk_param *par, 843 struct xt_hashlimit_htable **hinfo, 844 struct hashlimit_cfg3 *cfg, 845 const char *name, int revision) 846 { 847 struct net *net = par->net; 848 int ret; 849 850 if (cfg->gc_interval == 0 || cfg->expire == 0) 851 return -EINVAL; 852 if (cfg->size > HASHLIMIT_MAX_SIZE) { 853 cfg->size = HASHLIMIT_MAX_SIZE; 854 pr_info_ratelimited("size too large, truncated to %u\n", cfg->size); 855 } 856 if (cfg->max > HASHLIMIT_MAX_SIZE) { 857 cfg->max = HASHLIMIT_MAX_SIZE; 858 pr_info_ratelimited("max too large, truncated to %u\n", cfg->max); 859 } 860 if (par->family == NFPROTO_IPV4) { 861 if (cfg->srcmask > 32 || cfg->dstmask > 32) 862 return -EINVAL; 863 } else { 864 if (cfg->srcmask > 128 || cfg->dstmask > 128) 865 return -EINVAL; 866 } 867 868 if (cfg->mode & ~XT_HASHLIMIT_ALL) { 869 pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n", 870 cfg->mode); 871 return -EINVAL; 872 } 873 874 /* Check for overflow. */ 875 if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { 876 if (cfg->avg == 0 || cfg->avg > U32_MAX) { 877 pr_info_ratelimited("invalid rate\n"); 878 return -ERANGE; 879 } 880 881 if (cfg->interval == 0) { 882 pr_info_ratelimited("invalid interval\n"); 883 return -EINVAL; 884 } 885 } else if (cfg->mode & XT_HASHLIMIT_BYTES) { 886 if (user2credits_byte(cfg->avg) == 0) { 887 pr_info_ratelimited("overflow, rate too high: %llu\n", 888 cfg->avg); 889 return -EINVAL; 890 } 891 } else if (cfg->burst == 0 || 892 user2credits(cfg->avg * cfg->burst, revision) < 893 user2credits(cfg->avg, revision)) { 894 pr_info_ratelimited("overflow, try lower: %llu/%llu\n", 895 cfg->avg, cfg->burst); 896 return -ERANGE; 897 } 898 899 mutex_lock(&hashlimit_mutex); 900 *hinfo = htable_find_get(net, name, par->family); 901 if (*hinfo == NULL) { 902 ret = htable_create(net, cfg, name, par->family, 903 hinfo, revision); 904 if (ret < 0) { 905 mutex_unlock(&hashlimit_mutex); 906 return ret; 907 } 908 } 909 mutex_unlock(&hashlimit_mutex); 910 911 return 0; 912 } 913 914 static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par) 915 { 916 struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 917 struct hashlimit_cfg3 cfg = {}; 918 int ret; 919 920 ret = xt_check_proc_name(info->name, sizeof(info->name)); 921 if (ret) 922 return ret; 923 924 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 925 if (ret) 926 return ret; 927 928 return hashlimit_mt_check_common(par, &info->hinfo, 929 &cfg, info->name, 1); 930 } 931 932 static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par) 933 { 934 struct xt_hashlimit_mtinfo2 *info = par->matchinfo; 935 struct hashlimit_cfg3 cfg = {}; 936 int ret; 937 938 ret = xt_check_proc_name(info->name, sizeof(info->name)); 939 if (ret) 940 return ret; 941 942 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 943 if (ret) 944 return ret; 945 946 return hashlimit_mt_check_common(par, &info->hinfo, 947 &cfg, info->name, 2); 948 } 949 950 static int hashlimit_mt_check(const struct xt_mtchk_param *par) 951 { 952 struct xt_hashlimit_mtinfo3 *info = par->matchinfo; 953 int ret; 954 955 ret = xt_check_proc_name(info->name, sizeof(info->name)); 956 if (ret) 957 return ret; 958 959 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg, 960 info->name, 3); 961 } 962 963 static void hashlimit_mt_destroy_v2(const struct xt_mtdtor_param *par) 964 { 965 const struct xt_hashlimit_mtinfo2 *info = par->matchinfo; 966 967 htable_put(info->hinfo); 968 } 969 970 static void hashlimit_mt_destroy_v1(const struct xt_mtdtor_param *par) 971 { 972 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 973 974 htable_put(info->hinfo); 975 } 976 977 static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par) 978 { 979 const struct xt_hashlimit_mtinfo3 *info = par->matchinfo; 980 981 htable_put(info->hinfo); 982 } 983 984 static struct xt_match hashlimit_mt_reg[] __read_mostly = { 985 { 986 .name = "hashlimit", 987 .revision = 1, 988 .family = NFPROTO_IPV4, 989 .match = hashlimit_mt_v1, 990 .matchsize = sizeof(struct xt_hashlimit_mtinfo1), 991 .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), 992 .checkentry = hashlimit_mt_check_v1, 993 .destroy = hashlimit_mt_destroy_v1, 994 .me = THIS_MODULE, 995 }, 996 { 997 .name = "hashlimit", 998 .revision = 2, 999 .family = NFPROTO_IPV4, 1000 .match = hashlimit_mt_v2, 1001 .matchsize = sizeof(struct xt_hashlimit_mtinfo2), 1002 .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), 1003 .checkentry = hashlimit_mt_check_v2, 1004 .destroy = hashlimit_mt_destroy_v2, 1005 .me = THIS_MODULE, 1006 }, 1007 { 1008 .name = "hashlimit", 1009 .revision = 3, 1010 .family = NFPROTO_IPV4, 1011 .match = hashlimit_mt, 1012 .matchsize = sizeof(struct xt_hashlimit_mtinfo3), 1013 .usersize = offsetof(struct xt_hashlimit_mtinfo3, hinfo), 1014 .checkentry = hashlimit_mt_check, 1015 .destroy = hashlimit_mt_destroy, 1016 .me = THIS_MODULE, 1017 }, 1018 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 1019 { 1020 .name = "hashlimit", 1021 .revision = 1, 1022 .family = NFPROTO_IPV6, 1023 .match = hashlimit_mt_v1, 1024 .matchsize = sizeof(struct xt_hashlimit_mtinfo1), 1025 .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), 1026 .checkentry = hashlimit_mt_check_v1, 1027 .destroy = hashlimit_mt_destroy_v1, 1028 .me = THIS_MODULE, 1029 }, 1030 { 1031 .name = "hashlimit", 1032 .revision = 2, 1033 .family = NFPROTO_IPV6, 1034 .match = hashlimit_mt_v2, 1035 .matchsize = sizeof(struct xt_hashlimit_mtinfo2), 1036 .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), 1037 .checkentry = hashlimit_mt_check_v2, 1038 .destroy = hashlimit_mt_destroy_v2, 1039 .me = THIS_MODULE, 1040 }, 1041 { 1042 .name = "hashlimit", 1043 .revision = 3, 1044 .family = NFPROTO_IPV6, 1045 .match = hashlimit_mt, 1046 .matchsize = sizeof(struct xt_hashlimit_mtinfo3), 1047 .usersize = offsetof(struct xt_hashlimit_mtinfo3, hinfo), 1048 .checkentry = hashlimit_mt_check, 1049 .destroy = hashlimit_mt_destroy, 1050 .me = THIS_MODULE, 1051 }, 1052 #endif 1053 }; 1054 1055 /* PROC stuff */ 1056 static void *dl_seq_start(struct seq_file *s, loff_t *pos) 1057 __acquires(htable->lock) 1058 { 1059 struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file)); 1060 unsigned int *bucket; 1061 1062 spin_lock_bh(&htable->lock); 1063 if (*pos >= htable->cfg.size) 1064 return NULL; 1065 1066 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); 1067 if (!bucket) 1068 return ERR_PTR(-ENOMEM); 1069 1070 *bucket = *pos; 1071 return bucket; 1072 } 1073 1074 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 1075 { 1076 struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file)); 1077 unsigned int *bucket = v; 1078 1079 *pos = ++(*bucket); 1080 if (*pos >= htable->cfg.size) { 1081 kfree(v); 1082 return NULL; 1083 } 1084 return bucket; 1085 } 1086 1087 static void dl_seq_stop(struct seq_file *s, void *v) 1088 __releases(htable->lock) 1089 { 1090 struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file)); 1091 unsigned int *bucket = v; 1092 1093 if (!IS_ERR(bucket)) 1094 kfree(bucket); 1095 spin_unlock_bh(&htable->lock); 1096 } 1097 1098 static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family, 1099 struct seq_file *s) 1100 { 1101 switch (family) { 1102 case NFPROTO_IPV4: 1103 seq_printf(s, "%ld %pI4:%u->%pI4:%u %llu %llu %llu\n", 1104 (long)(ent->expires - jiffies)/HZ, 1105 &ent->dst.ip.src, 1106 ntohs(ent->dst.src_port), 1107 &ent->dst.ip.dst, 1108 ntohs(ent->dst.dst_port), 1109 ent->rateinfo.credit, ent->rateinfo.credit_cap, 1110 ent->rateinfo.cost); 1111 break; 1112 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 1113 case NFPROTO_IPV6: 1114 seq_printf(s, "%ld %pI6:%u->%pI6:%u %llu %llu %llu\n", 1115 (long)(ent->expires - jiffies)/HZ, 1116 &ent->dst.ip6.src, 1117 ntohs(ent->dst.src_port), 1118 &ent->dst.ip6.dst, 1119 ntohs(ent->dst.dst_port), 1120 ent->rateinfo.credit, ent->rateinfo.credit_cap, 1121 ent->rateinfo.cost); 1122 break; 1123 #endif 1124 default: 1125 BUG(); 1126 } 1127 } 1128 1129 static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, 1130 struct seq_file *s) 1131 { 1132 struct xt_hashlimit_htable *ht = pde_data(file_inode(s->file)); 1133 1134 spin_lock(&ent->lock); 1135 /* recalculate to show accurate numbers */ 1136 rateinfo_recalc(ent, jiffies, ht->cfg.mode, 2); 1137 1138 dl_seq_print(ent, family, s); 1139 1140 spin_unlock(&ent->lock); 1141 return seq_has_overflowed(s); 1142 } 1143 1144 static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, 1145 struct seq_file *s) 1146 { 1147 struct xt_hashlimit_htable *ht = pde_data(file_inode(s->file)); 1148 1149 spin_lock(&ent->lock); 1150 /* recalculate to show accurate numbers */ 1151 rateinfo_recalc(ent, jiffies, ht->cfg.mode, 1); 1152 1153 dl_seq_print(ent, family, s); 1154 1155 spin_unlock(&ent->lock); 1156 return seq_has_overflowed(s); 1157 } 1158 1159 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, 1160 struct seq_file *s) 1161 { 1162 struct xt_hashlimit_htable *ht = pde_data(file_inode(s->file)); 1163 1164 spin_lock(&ent->lock); 1165 /* recalculate to show accurate numbers */ 1166 rateinfo_recalc(ent, jiffies, ht->cfg.mode, 3); 1167 1168 dl_seq_print(ent, family, s); 1169 1170 spin_unlock(&ent->lock); 1171 return seq_has_overflowed(s); 1172 } 1173 1174 static int dl_seq_show_v2(struct seq_file *s, void *v) 1175 { 1176 struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file)); 1177 unsigned int *bucket = (unsigned int *)v; 1178 struct dsthash_ent *ent; 1179 1180 if (!hlist_empty(&htable->hash[*bucket])) { 1181 hlist_for_each_entry(ent, &htable->hash[*bucket], node) 1182 if (dl_seq_real_show_v2(ent, htable->family, s)) 1183 return -1; 1184 } 1185 return 0; 1186 } 1187 1188 static int dl_seq_show_v1(struct seq_file *s, void *v) 1189 { 1190 struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file)); 1191 unsigned int *bucket = v; 1192 struct dsthash_ent *ent; 1193 1194 if (!hlist_empty(&htable->hash[*bucket])) { 1195 hlist_for_each_entry(ent, &htable->hash[*bucket], node) 1196 if (dl_seq_real_show_v1(ent, htable->family, s)) 1197 return -1; 1198 } 1199 return 0; 1200 } 1201 1202 static int dl_seq_show(struct seq_file *s, void *v) 1203 { 1204 struct xt_hashlimit_htable *htable = pde_data(file_inode(s->file)); 1205 unsigned int *bucket = v; 1206 struct dsthash_ent *ent; 1207 1208 if (!hlist_empty(&htable->hash[*bucket])) { 1209 hlist_for_each_entry(ent, &htable->hash[*bucket], node) 1210 if (dl_seq_real_show(ent, htable->family, s)) 1211 return -1; 1212 } 1213 return 0; 1214 } 1215 1216 static const struct seq_operations dl_seq_ops_v1 = { 1217 .start = dl_seq_start, 1218 .next = dl_seq_next, 1219 .stop = dl_seq_stop, 1220 .show = dl_seq_show_v1 1221 }; 1222 1223 static const struct seq_operations dl_seq_ops_v2 = { 1224 .start = dl_seq_start, 1225 .next = dl_seq_next, 1226 .stop = dl_seq_stop, 1227 .show = dl_seq_show_v2 1228 }; 1229 1230 static const struct seq_operations dl_seq_ops = { 1231 .start = dl_seq_start, 1232 .next = dl_seq_next, 1233 .stop = dl_seq_stop, 1234 .show = dl_seq_show 1235 }; 1236 1237 static int __net_init hashlimit_proc_net_init(struct net *net) 1238 { 1239 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 1240 1241 hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net); 1242 if (!hashlimit_net->ipt_hashlimit) 1243 return -ENOMEM; 1244 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 1245 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); 1246 if (!hashlimit_net->ip6t_hashlimit) { 1247 remove_proc_entry("ipt_hashlimit", net->proc_net); 1248 return -ENOMEM; 1249 } 1250 #endif 1251 return 0; 1252 } 1253 1254 static void __net_exit hashlimit_proc_net_exit(struct net *net) 1255 { 1256 struct xt_hashlimit_htable *hinfo; 1257 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 1258 1259 /* hashlimit_net_exit() is called before hashlimit_mt_destroy(). 1260 * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc 1261 * entries is empty before trying to remove it. 1262 */ 1263 mutex_lock(&hashlimit_mutex); 1264 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) 1265 htable_remove_proc_entry(hinfo); 1266 hashlimit_net->ipt_hashlimit = NULL; 1267 hashlimit_net->ip6t_hashlimit = NULL; 1268 mutex_unlock(&hashlimit_mutex); 1269 1270 remove_proc_entry("ipt_hashlimit", net->proc_net); 1271 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 1272 remove_proc_entry("ip6t_hashlimit", net->proc_net); 1273 #endif 1274 } 1275 1276 static int __net_init hashlimit_net_init(struct net *net) 1277 { 1278 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 1279 1280 INIT_HLIST_HEAD(&hashlimit_net->htables); 1281 return hashlimit_proc_net_init(net); 1282 } 1283 1284 static void __net_exit hashlimit_net_exit(struct net *net) 1285 { 1286 hashlimit_proc_net_exit(net); 1287 } 1288 1289 static struct pernet_operations hashlimit_net_ops = { 1290 .init = hashlimit_net_init, 1291 .exit = hashlimit_net_exit, 1292 .id = &hashlimit_net_id, 1293 .size = sizeof(struct hashlimit_net), 1294 }; 1295 1296 static int __init hashlimit_mt_init(void) 1297 { 1298 int err; 1299 1300 err = register_pernet_subsys(&hashlimit_net_ops); 1301 if (err < 0) 1302 return err; 1303 err = xt_register_matches(hashlimit_mt_reg, 1304 ARRAY_SIZE(hashlimit_mt_reg)); 1305 if (err < 0) 1306 goto err1; 1307 1308 err = -ENOMEM; 1309 hashlimit_cachep = kmem_cache_create("xt_hashlimit", 1310 sizeof(struct dsthash_ent), 0, 0, 1311 NULL); 1312 if (!hashlimit_cachep) { 1313 pr_warn("unable to create slab cache\n"); 1314 goto err2; 1315 } 1316 return 0; 1317 1318 err2: 1319 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1320 err1: 1321 unregister_pernet_subsys(&hashlimit_net_ops); 1322 return err; 1323 1324 } 1325 1326 static void __exit hashlimit_mt_exit(void) 1327 { 1328 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1329 unregister_pernet_subsys(&hashlimit_net_ops); 1330 1331 rcu_barrier(); 1332 kmem_cache_destroy(hashlimit_cachep); 1333 } 1334 1335 module_init(hashlimit_mt_init); 1336 module_exit(hashlimit_mt_exit); 1337