1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
32 #include <linux/mm.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
35
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_l3proto.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_core.h>
42 #include <net/netfilter/nf_conntrack_extend.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_ecache.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_nat.h>
48 #include <net/netfilter/nf_nat_core.h>
49
50 #define NF_CONNTRACK_VERSION "0.5.0"
51
52 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
53 enum nf_nat_manip_type manip,
54 const struct nlattr *attr) __read_mostly;
55 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
56
57 DEFINE_SPINLOCK(nf_conntrack_lock);
58 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
59
60 unsigned int nf_conntrack_htable_size __read_mostly;
61 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
62
63 unsigned int nf_conntrack_max __read_mostly;
64 EXPORT_SYMBOL_GPL(nf_conntrack_max);
65
66 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
67 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
68
69 unsigned int nf_conntrack_hash_rnd __read_mostly;
70 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
71
hash_conntrack_raw(const struct nf_conntrack_tuple * tuple,u16 zone)72 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
73 {
74 unsigned int n;
75
76 /* The direction must be ignored, so we hash everything up to the
77 * destination ports (which is a multiple of 4) and treat the last
78 * three bytes manually.
79 */
80 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
81 return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
82 (((__force __u16)tuple->dst.u.all << 16) |
83 tuple->dst.protonum));
84 }
85
__hash_bucket(u32 hash,unsigned int size)86 static u32 __hash_bucket(u32 hash, unsigned int size)
87 {
88 return ((u64)hash * size) >> 32;
89 }
90
hash_bucket(u32 hash,const struct net * net)91 static u32 hash_bucket(u32 hash, const struct net *net)
92 {
93 return __hash_bucket(hash, net->ct.htable_size);
94 }
95
__hash_conntrack(const struct nf_conntrack_tuple * tuple,u16 zone,unsigned int size)96 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
97 u16 zone, unsigned int size)
98 {
99 return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
100 }
101
hash_conntrack(const struct net * net,u16 zone,const struct nf_conntrack_tuple * tuple)102 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
103 const struct nf_conntrack_tuple *tuple)
104 {
105 return __hash_conntrack(tuple, zone, net->ct.htable_size);
106 }
107
108 bool
nf_ct_get_tuple(const struct sk_buff * skb,unsigned int nhoff,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct nf_conntrack_tuple * tuple,const struct nf_conntrack_l3proto * l3proto,const struct nf_conntrack_l4proto * l4proto)109 nf_ct_get_tuple(const struct sk_buff *skb,
110 unsigned int nhoff,
111 unsigned int dataoff,
112 u_int16_t l3num,
113 u_int8_t protonum,
114 struct nf_conntrack_tuple *tuple,
115 const struct nf_conntrack_l3proto *l3proto,
116 const struct nf_conntrack_l4proto *l4proto)
117 {
118 memset(tuple, 0, sizeof(*tuple));
119
120 tuple->src.l3num = l3num;
121 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
122 return false;
123
124 tuple->dst.protonum = protonum;
125 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
126
127 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
128 }
129 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
130
nf_ct_get_tuplepr(const struct sk_buff * skb,unsigned int nhoff,u_int16_t l3num,struct nf_conntrack_tuple * tuple)131 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
132 u_int16_t l3num, struct nf_conntrack_tuple *tuple)
133 {
134 struct nf_conntrack_l3proto *l3proto;
135 struct nf_conntrack_l4proto *l4proto;
136 unsigned int protoff;
137 u_int8_t protonum;
138 int ret;
139
140 rcu_read_lock();
141
142 l3proto = __nf_ct_l3proto_find(l3num);
143 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
144 if (ret != NF_ACCEPT) {
145 rcu_read_unlock();
146 return false;
147 }
148
149 l4proto = __nf_ct_l4proto_find(l3num, protonum);
150
151 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
152 l3proto, l4proto);
153
154 rcu_read_unlock();
155 return ret;
156 }
157 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
158
159 bool
nf_ct_invert_tuple(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_l3proto * l3proto,const struct nf_conntrack_l4proto * l4proto)160 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
161 const struct nf_conntrack_tuple *orig,
162 const struct nf_conntrack_l3proto *l3proto,
163 const struct nf_conntrack_l4proto *l4proto)
164 {
165 memset(inverse, 0, sizeof(*inverse));
166
167 inverse->src.l3num = orig->src.l3num;
168 if (l3proto->invert_tuple(inverse, orig) == 0)
169 return false;
170
171 inverse->dst.dir = !orig->dst.dir;
172
173 inverse->dst.protonum = orig->dst.protonum;
174 return l4proto->invert_tuple(inverse, orig);
175 }
176 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
177
178 static void
clean_from_lists(struct nf_conn * ct)179 clean_from_lists(struct nf_conn *ct)
180 {
181 pr_debug("clean_from_lists(%p)\n", ct);
182 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
183 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
184
185 /* Destroy all pending expectations */
186 nf_ct_remove_expectations(ct);
187 }
188
189 static void
destroy_conntrack(struct nf_conntrack * nfct)190 destroy_conntrack(struct nf_conntrack *nfct)
191 {
192 struct nf_conn *ct = (struct nf_conn *)nfct;
193 struct net *net = nf_ct_net(ct);
194 struct nf_conntrack_l4proto *l4proto;
195
196 pr_debug("destroy_conntrack(%p)\n", ct);
197 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
198 NF_CT_ASSERT(!timer_pending(&ct->timeout));
199
200 /* To make sure we don't get any weird locking issues here:
201 * destroy_conntrack() MUST NOT be called with a write lock
202 * to nf_conntrack_lock!!! -HW */
203 rcu_read_lock();
204 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
205 if (l4proto && l4proto->destroy)
206 l4proto->destroy(ct);
207
208 rcu_read_unlock();
209
210 spin_lock_bh(&nf_conntrack_lock);
211 /* Expectations will have been removed in clean_from_lists,
212 * except TFTP can create an expectation on the first packet,
213 * before connection is in the list, so we need to clean here,
214 * too. */
215 nf_ct_remove_expectations(ct);
216
217 /* We overload first tuple to link into unconfirmed list. */
218 if (!nf_ct_is_confirmed(ct)) {
219 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
220 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
221 }
222
223 NF_CT_STAT_INC(net, delete);
224 spin_unlock_bh(&nf_conntrack_lock);
225
226 if (ct->master)
227 nf_ct_put(ct->master);
228
229 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
230 nf_conntrack_free(ct);
231 }
232
nf_ct_delete_from_lists(struct nf_conn * ct)233 void nf_ct_delete_from_lists(struct nf_conn *ct)
234 {
235 struct net *net = nf_ct_net(ct);
236
237 nf_ct_helper_destroy(ct);
238 spin_lock_bh(&nf_conntrack_lock);
239 /* Inside lock so preempt is disabled on module removal path.
240 * Otherwise we can get spurious warnings. */
241 NF_CT_STAT_INC(net, delete_list);
242 clean_from_lists(ct);
243 spin_unlock_bh(&nf_conntrack_lock);
244 }
245 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
246
death_by_event(unsigned long ul_conntrack)247 static void death_by_event(unsigned long ul_conntrack)
248 {
249 struct nf_conn *ct = (void *)ul_conntrack;
250 struct net *net = nf_ct_net(ct);
251
252 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
253 /* bad luck, let's retry again */
254 ct->timeout.expires = jiffies +
255 (random32() % net->ct.sysctl_events_retry_timeout);
256 add_timer(&ct->timeout);
257 return;
258 }
259 /* we've got the event delivered, now it's dying */
260 set_bit(IPS_DYING_BIT, &ct->status);
261 spin_lock(&nf_conntrack_lock);
262 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
263 spin_unlock(&nf_conntrack_lock);
264 nf_ct_put(ct);
265 }
266
nf_ct_insert_dying_list(struct nf_conn * ct)267 void nf_ct_insert_dying_list(struct nf_conn *ct)
268 {
269 struct net *net = nf_ct_net(ct);
270
271 /* add this conntrack to the dying list */
272 spin_lock_bh(&nf_conntrack_lock);
273 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
274 &net->ct.dying);
275 spin_unlock_bh(&nf_conntrack_lock);
276 /* set a new timer to retry event delivery */
277 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
278 ct->timeout.expires = jiffies +
279 (random32() % net->ct.sysctl_events_retry_timeout);
280 add_timer(&ct->timeout);
281 }
282 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
283
death_by_timeout(unsigned long ul_conntrack)284 static void death_by_timeout(unsigned long ul_conntrack)
285 {
286 struct nf_conn *ct = (void *)ul_conntrack;
287 struct nf_conn_tstamp *tstamp;
288
289 tstamp = nf_conn_tstamp_find(ct);
290 if (tstamp && tstamp->stop == 0)
291 tstamp->stop = ktime_to_ns(ktime_get_real());
292
293 if (!test_bit(IPS_DYING_BIT, &ct->status) &&
294 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
295 /* destroy event was not delivered */
296 nf_ct_delete_from_lists(ct);
297 nf_ct_insert_dying_list(ct);
298 return;
299 }
300 set_bit(IPS_DYING_BIT, &ct->status);
301 nf_ct_delete_from_lists(ct);
302 nf_ct_put(ct);
303 }
304
305 /*
306 * Warning :
307 * - Caller must take a reference on returned object
308 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
309 * OR
310 * - Caller must lock nf_conntrack_lock before calling this function
311 */
312 static struct nf_conntrack_tuple_hash *
____nf_conntrack_find(struct net * net,u16 zone,const struct nf_conntrack_tuple * tuple,u32 hash)313 ____nf_conntrack_find(struct net *net, u16 zone,
314 const struct nf_conntrack_tuple *tuple, u32 hash)
315 {
316 struct nf_conntrack_tuple_hash *h;
317 struct hlist_nulls_node *n;
318 unsigned int bucket = hash_bucket(hash, net);
319
320 /* Disable BHs the entire time since we normally need to disable them
321 * at least once for the stats anyway.
322 */
323 local_bh_disable();
324 begin:
325 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
326 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
327 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
328 NF_CT_STAT_INC(net, found);
329 local_bh_enable();
330 return h;
331 }
332 NF_CT_STAT_INC(net, searched);
333 }
334 /*
335 * if the nulls value we got at the end of this lookup is
336 * not the expected one, we must restart lookup.
337 * We probably met an item that was moved to another chain.
338 */
339 if (get_nulls_value(n) != bucket) {
340 NF_CT_STAT_INC(net, search_restart);
341 goto begin;
342 }
343 local_bh_enable();
344
345 return NULL;
346 }
347
348 struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net * net,u16 zone,const struct nf_conntrack_tuple * tuple)349 __nf_conntrack_find(struct net *net, u16 zone,
350 const struct nf_conntrack_tuple *tuple)
351 {
352 return ____nf_conntrack_find(net, zone, tuple,
353 hash_conntrack_raw(tuple, zone));
354 }
355 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
356
357 /* Find a connection corresponding to a tuple. */
358 static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net * net,u16 zone,const struct nf_conntrack_tuple * tuple,u32 hash)359 __nf_conntrack_find_get(struct net *net, u16 zone,
360 const struct nf_conntrack_tuple *tuple, u32 hash)
361 {
362 struct nf_conntrack_tuple_hash *h;
363 struct nf_conn *ct;
364
365 rcu_read_lock();
366 begin:
367 h = ____nf_conntrack_find(net, zone, tuple, hash);
368 if (h) {
369 ct = nf_ct_tuplehash_to_ctrack(h);
370 if (unlikely(nf_ct_is_dying(ct) ||
371 !atomic_inc_not_zero(&ct->ct_general.use)))
372 h = NULL;
373 else {
374 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
375 nf_ct_zone(ct) != zone)) {
376 nf_ct_put(ct);
377 goto begin;
378 }
379 }
380 }
381 rcu_read_unlock();
382
383 return h;
384 }
385
386 struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net * net,u16 zone,const struct nf_conntrack_tuple * tuple)387 nf_conntrack_find_get(struct net *net, u16 zone,
388 const struct nf_conntrack_tuple *tuple)
389 {
390 return __nf_conntrack_find_get(net, zone, tuple,
391 hash_conntrack_raw(tuple, zone));
392 }
393 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
394
__nf_conntrack_hash_insert(struct nf_conn * ct,unsigned int hash,unsigned int repl_hash)395 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
396 unsigned int hash,
397 unsigned int repl_hash)
398 {
399 struct net *net = nf_ct_net(ct);
400
401 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
402 &net->ct.hash[hash]);
403 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
404 &net->ct.hash[repl_hash]);
405 }
406
407 int
nf_conntrack_hash_check_insert(struct nf_conn * ct)408 nf_conntrack_hash_check_insert(struct nf_conn *ct)
409 {
410 struct net *net = nf_ct_net(ct);
411 unsigned int hash, repl_hash;
412 struct nf_conntrack_tuple_hash *h;
413 struct hlist_nulls_node *n;
414 u16 zone;
415
416 zone = nf_ct_zone(ct);
417 hash = hash_conntrack(net, zone,
418 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
419 repl_hash = hash_conntrack(net, zone,
420 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
421
422 spin_lock_bh(&nf_conntrack_lock);
423
424 /* See if there's one in the list already, including reverse */
425 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
426 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
427 &h->tuple) &&
428 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
429 goto out;
430 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
431 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
432 &h->tuple) &&
433 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
434 goto out;
435
436 add_timer(&ct->timeout);
437 nf_conntrack_get(&ct->ct_general);
438 __nf_conntrack_hash_insert(ct, hash, repl_hash);
439 NF_CT_STAT_INC(net, insert);
440 spin_unlock_bh(&nf_conntrack_lock);
441
442 return 0;
443
444 out:
445 NF_CT_STAT_INC(net, insert_failed);
446 spin_unlock_bh(&nf_conntrack_lock);
447 return -EEXIST;
448 }
449 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
450
451 /* Confirm a connection given skb; places it in hash table */
452 int
__nf_conntrack_confirm(struct sk_buff * skb)453 __nf_conntrack_confirm(struct sk_buff *skb)
454 {
455 unsigned int hash, repl_hash;
456 struct nf_conntrack_tuple_hash *h;
457 struct nf_conn *ct;
458 struct nf_conn_help *help;
459 struct nf_conn_tstamp *tstamp;
460 struct hlist_nulls_node *n;
461 enum ip_conntrack_info ctinfo;
462 struct net *net;
463 u16 zone;
464
465 ct = nf_ct_get(skb, &ctinfo);
466 net = nf_ct_net(ct);
467
468 /* ipt_REJECT uses nf_conntrack_attach to attach related
469 ICMP/TCP RST packets in other direction. Actual packet
470 which created connection will be IP_CT_NEW or for an
471 expected connection, IP_CT_RELATED. */
472 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
473 return NF_ACCEPT;
474
475 zone = nf_ct_zone(ct);
476 /* reuse the hash saved before */
477 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
478 hash = hash_bucket(hash, net);
479 repl_hash = hash_conntrack(net, zone,
480 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
481
482 /* We're not in hash table, and we refuse to set up related
483 connections for unconfirmed conns. But packet copies and
484 REJECT will give spurious warnings here. */
485 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
486
487 /* No external references means no one else could have
488 confirmed us. */
489 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
490 pr_debug("Confirming conntrack %p\n", ct);
491
492 spin_lock_bh(&nf_conntrack_lock);
493
494 /* We have to check the DYING flag inside the lock to prevent
495 a race against nf_ct_get_next_corpse() possibly called from
496 user context, else we insert an already 'dead' hash, blocking
497 further use of that particular connection -JM */
498
499 if (unlikely(nf_ct_is_dying(ct))) {
500 spin_unlock_bh(&nf_conntrack_lock);
501 return NF_ACCEPT;
502 }
503
504 /* See if there's one in the list already, including reverse:
505 NAT could have grabbed it without realizing, since we're
506 not in the hash. If there is, we lost race. */
507 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
508 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
509 &h->tuple) &&
510 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
511 goto out;
512 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
513 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
514 &h->tuple) &&
515 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
516 goto out;
517
518 /* Remove from unconfirmed list */
519 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
520
521 /* Timer relative to confirmation time, not original
522 setting time, otherwise we'd get timer wrap in
523 weird delay cases. */
524 ct->timeout.expires += jiffies;
525 add_timer(&ct->timeout);
526 atomic_inc(&ct->ct_general.use);
527 ct->status |= IPS_CONFIRMED;
528
529 /* set conntrack timestamp, if enabled. */
530 tstamp = nf_conn_tstamp_find(ct);
531 if (tstamp) {
532 if (skb->tstamp.tv64 == 0)
533 __net_timestamp((struct sk_buff *)skb);
534
535 tstamp->start = ktime_to_ns(skb->tstamp);
536 }
537 /* Since the lookup is lockless, hash insertion must be done after
538 * starting the timer and setting the CONFIRMED bit. The RCU barriers
539 * guarantee that no other CPU can find the conntrack before the above
540 * stores are visible.
541 */
542 __nf_conntrack_hash_insert(ct, hash, repl_hash);
543 NF_CT_STAT_INC(net, insert);
544 spin_unlock_bh(&nf_conntrack_lock);
545
546 help = nfct_help(ct);
547 if (help && help->helper)
548 nf_conntrack_event_cache(IPCT_HELPER, ct);
549
550 nf_conntrack_event_cache(master_ct(ct) ?
551 IPCT_RELATED : IPCT_NEW, ct);
552 return NF_ACCEPT;
553
554 out:
555 NF_CT_STAT_INC(net, insert_failed);
556 spin_unlock_bh(&nf_conntrack_lock);
557 return NF_DROP;
558 }
559 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
560
561 /* Returns true if a connection correspondings to the tuple (required
562 for NAT). */
563 int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack)564 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
565 const struct nf_conn *ignored_conntrack)
566 {
567 struct net *net = nf_ct_net(ignored_conntrack);
568 struct nf_conntrack_tuple_hash *h;
569 struct hlist_nulls_node *n;
570 struct nf_conn *ct;
571 u16 zone = nf_ct_zone(ignored_conntrack);
572 unsigned int hash = hash_conntrack(net, zone, tuple);
573
574 /* Disable BHs the entire time since we need to disable them at
575 * least once for the stats anyway.
576 */
577 rcu_read_lock_bh();
578 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
579 ct = nf_ct_tuplehash_to_ctrack(h);
580 if (ct != ignored_conntrack &&
581 nf_ct_tuple_equal(tuple, &h->tuple) &&
582 nf_ct_zone(ct) == zone) {
583 NF_CT_STAT_INC(net, found);
584 rcu_read_unlock_bh();
585 return 1;
586 }
587 NF_CT_STAT_INC(net, searched);
588 }
589 rcu_read_unlock_bh();
590
591 return 0;
592 }
593 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
594
595 #define NF_CT_EVICTION_RANGE 8
596
597 /* There's a small race here where we may free a just-assured
598 connection. Too bad: we're in trouble anyway. */
early_drop(struct net * net,unsigned int hash)599 static noinline int early_drop(struct net *net, unsigned int hash)
600 {
601 /* Use oldest entry, which is roughly LRU */
602 struct nf_conntrack_tuple_hash *h;
603 struct nf_conn *ct = NULL, *tmp;
604 struct hlist_nulls_node *n;
605 unsigned int i, cnt = 0;
606 int dropped = 0;
607
608 rcu_read_lock();
609 for (i = 0; i < net->ct.htable_size; i++) {
610 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
611 hnnode) {
612 tmp = nf_ct_tuplehash_to_ctrack(h);
613 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
614 ct = tmp;
615 cnt++;
616 }
617
618 if (ct != NULL) {
619 if (likely(!nf_ct_is_dying(ct) &&
620 atomic_inc_not_zero(&ct->ct_general.use)))
621 break;
622 else
623 ct = NULL;
624 }
625
626 if (cnt >= NF_CT_EVICTION_RANGE)
627 break;
628
629 hash = (hash + 1) % net->ct.htable_size;
630 }
631 rcu_read_unlock();
632
633 if (!ct)
634 return dropped;
635
636 if (del_timer(&ct->timeout)) {
637 death_by_timeout((unsigned long)ct);
638 /* Check if we indeed killed this entry. Reliable event
639 delivery may have inserted it into the dying list. */
640 if (test_bit(IPS_DYING_BIT, &ct->status)) {
641 dropped = 1;
642 NF_CT_STAT_INC_ATOMIC(net, early_drop);
643 }
644 }
645 nf_ct_put(ct);
646 return dropped;
647 }
648
init_nf_conntrack_hash_rnd(void)649 void init_nf_conntrack_hash_rnd(void)
650 {
651 unsigned int rand;
652
653 /*
654 * Why not initialize nf_conntrack_rnd in a "init()" function ?
655 * Because there isn't enough entropy when system initializing,
656 * and we initialize it as late as possible.
657 */
658 do {
659 get_random_bytes(&rand, sizeof(rand));
660 } while (!rand);
661 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
662 }
663
664 static struct nf_conn *
__nf_conntrack_alloc(struct net * net,u16 zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp,u32 hash)665 __nf_conntrack_alloc(struct net *net, u16 zone,
666 const struct nf_conntrack_tuple *orig,
667 const struct nf_conntrack_tuple *repl,
668 gfp_t gfp, u32 hash)
669 {
670 struct nf_conn *ct;
671
672 if (unlikely(!nf_conntrack_hash_rnd)) {
673 init_nf_conntrack_hash_rnd();
674 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
675 hash = hash_conntrack_raw(orig, zone);
676 }
677
678 /* We don't want any race condition at early drop stage */
679 atomic_inc(&net->ct.count);
680
681 if (nf_conntrack_max &&
682 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
683 if (!early_drop(net, hash_bucket(hash, net))) {
684 atomic_dec(&net->ct.count);
685 if (net_ratelimit())
686 printk(KERN_WARNING
687 "nf_conntrack: table full, dropping"
688 " packet.\n");
689 return ERR_PTR(-ENOMEM);
690 }
691 }
692
693 /*
694 * Do not use kmem_cache_zalloc(), as this cache uses
695 * SLAB_DESTROY_BY_RCU.
696 */
697 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
698 if (ct == NULL) {
699 atomic_dec(&net->ct.count);
700 return ERR_PTR(-ENOMEM);
701 }
702 /*
703 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
704 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
705 */
706 memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
707 offsetof(struct nf_conn, proto) -
708 offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
709 spin_lock_init(&ct->lock);
710 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
711 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
712 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
713 /* save hash for reusing when confirming */
714 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
715 /* Don't set timer yet: wait for confirmation */
716 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
717 write_pnet(&ct->ct_net, net);
718 #ifdef CONFIG_NF_CONNTRACK_ZONES
719 if (zone) {
720 struct nf_conntrack_zone *nf_ct_zone;
721
722 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
723 if (!nf_ct_zone)
724 goto out_free;
725 nf_ct_zone->id = zone;
726 }
727 #endif
728 /*
729 * changes to lookup keys must be done before setting refcnt to 1
730 */
731 smp_wmb();
732 atomic_set(&ct->ct_general.use, 1);
733 return ct;
734
735 #ifdef CONFIG_NF_CONNTRACK_ZONES
736 out_free:
737 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
738 return ERR_PTR(-ENOMEM);
739 #endif
740 }
741
nf_conntrack_alloc(struct net * net,u16 zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp)742 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
743 const struct nf_conntrack_tuple *orig,
744 const struct nf_conntrack_tuple *repl,
745 gfp_t gfp)
746 {
747 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
748 }
749 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
750
nf_conntrack_free(struct nf_conn * ct)751 void nf_conntrack_free(struct nf_conn *ct)
752 {
753 struct net *net = nf_ct_net(ct);
754
755 nf_ct_ext_destroy(ct);
756 atomic_dec(&net->ct.count);
757 nf_ct_ext_free(ct);
758 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
759 }
760 EXPORT_SYMBOL_GPL(nf_conntrack_free);
761
762 /* Allocate a new conntrack: we return -ENOMEM if classification
763 failed due to stress. Otherwise it really is unclassifiable. */
764 static struct nf_conntrack_tuple_hash *
init_conntrack(struct net * net,struct nf_conn * tmpl,const struct nf_conntrack_tuple * tuple,struct nf_conntrack_l3proto * l3proto,struct nf_conntrack_l4proto * l4proto,struct sk_buff * skb,unsigned int dataoff,u32 hash)765 init_conntrack(struct net *net, struct nf_conn *tmpl,
766 const struct nf_conntrack_tuple *tuple,
767 struct nf_conntrack_l3proto *l3proto,
768 struct nf_conntrack_l4proto *l4proto,
769 struct sk_buff *skb,
770 unsigned int dataoff, u32 hash)
771 {
772 struct nf_conn *ct;
773 struct nf_conn_help *help;
774 struct nf_conntrack_tuple repl_tuple;
775 struct nf_conntrack_ecache *ecache;
776 struct nf_conntrack_expect *exp;
777 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
778
779 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
780 pr_debug("Can't invert tuple.\n");
781 return NULL;
782 }
783
784 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
785 hash);
786 if (IS_ERR(ct))
787 return (struct nf_conntrack_tuple_hash *)ct;
788
789 if (!l4proto->new(ct, skb, dataoff)) {
790 nf_conntrack_free(ct);
791 pr_debug("init conntrack: can't track with proto module\n");
792 return NULL;
793 }
794
795 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
796 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
797
798 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
799 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
800 ecache ? ecache->expmask : 0,
801 GFP_ATOMIC);
802
803 spin_lock_bh(&nf_conntrack_lock);
804 exp = nf_ct_find_expectation(net, zone, tuple);
805 if (exp) {
806 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
807 ct, exp);
808 /* Welcome, Mr. Bond. We've been expecting you... */
809 __set_bit(IPS_EXPECTED_BIT, &ct->status);
810 ct->master = exp->master;
811 if (exp->helper) {
812 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
813 if (help)
814 rcu_assign_pointer(help->helper, exp->helper);
815 }
816
817 #ifdef CONFIG_NF_CONNTRACK_MARK
818 ct->mark = exp->master->mark;
819 #endif
820 #ifdef CONFIG_NF_CONNTRACK_SECMARK
821 ct->secmark = exp->master->secmark;
822 #endif
823 nf_conntrack_get(&ct->master->ct_general);
824 NF_CT_STAT_INC(net, expect_new);
825 } else {
826 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
827 NF_CT_STAT_INC(net, new);
828 }
829
830 /* Overload tuple linked list to put us in unconfirmed list. */
831 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
832 &net->ct.unconfirmed);
833
834 spin_unlock_bh(&nf_conntrack_lock);
835
836 if (exp) {
837 if (exp->expectfn)
838 exp->expectfn(ct, exp);
839 nf_ct_expect_put(exp);
840 }
841
842 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
843 }
844
845 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
846 static inline struct nf_conn *
resolve_normal_ct(struct net * net,struct nf_conn * tmpl,struct sk_buff * skb,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct nf_conntrack_l3proto * l3proto,struct nf_conntrack_l4proto * l4proto,int * set_reply,enum ip_conntrack_info * ctinfo)847 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
848 struct sk_buff *skb,
849 unsigned int dataoff,
850 u_int16_t l3num,
851 u_int8_t protonum,
852 struct nf_conntrack_l3proto *l3proto,
853 struct nf_conntrack_l4proto *l4proto,
854 int *set_reply,
855 enum ip_conntrack_info *ctinfo)
856 {
857 struct nf_conntrack_tuple tuple;
858 struct nf_conntrack_tuple_hash *h;
859 struct nf_conn *ct;
860 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
861 u32 hash;
862
863 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
864 dataoff, l3num, protonum, &tuple, l3proto,
865 l4proto)) {
866 pr_debug("resolve_normal_ct: Can't get tuple\n");
867 return NULL;
868 }
869
870 /* look for tuple match */
871 hash = hash_conntrack_raw(&tuple, zone);
872 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
873 if (!h) {
874 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
875 skb, dataoff, hash);
876 if (!h)
877 return NULL;
878 if (IS_ERR(h))
879 return (void *)h;
880 }
881 ct = nf_ct_tuplehash_to_ctrack(h);
882
883 /* It exists; we have (non-exclusive) reference. */
884 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
885 *ctinfo = IP_CT_ESTABLISHED_REPLY;
886 /* Please set reply bit if this packet OK */
887 *set_reply = 1;
888 } else {
889 /* Once we've had two way comms, always ESTABLISHED. */
890 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
891 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
892 *ctinfo = IP_CT_ESTABLISHED;
893 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
894 pr_debug("nf_conntrack_in: related packet for %p\n",
895 ct);
896 *ctinfo = IP_CT_RELATED;
897 } else {
898 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
899 *ctinfo = IP_CT_NEW;
900 }
901 *set_reply = 0;
902 }
903 skb->nfct = &ct->ct_general;
904 skb->nfctinfo = *ctinfo;
905 return ct;
906 }
907
908 unsigned int
nf_conntrack_in(struct net * net,u_int8_t pf,unsigned int hooknum,struct sk_buff * skb)909 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
910 struct sk_buff *skb)
911 {
912 struct nf_conn *ct, *tmpl = NULL;
913 enum ip_conntrack_info ctinfo;
914 struct nf_conntrack_l3proto *l3proto;
915 struct nf_conntrack_l4proto *l4proto;
916 unsigned int dataoff;
917 u_int8_t protonum;
918 int set_reply = 0;
919 int ret;
920
921 if (skb->nfct) {
922 /* Previously seen (loopback or untracked)? Ignore. */
923 tmpl = (struct nf_conn *)skb->nfct;
924 if (!nf_ct_is_template(tmpl)) {
925 NF_CT_STAT_INC_ATOMIC(net, ignore);
926 return NF_ACCEPT;
927 }
928 skb->nfct = NULL;
929 }
930
931 /* rcu_read_lock()ed by nf_hook_slow */
932 l3proto = __nf_ct_l3proto_find(pf);
933 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
934 &dataoff, &protonum);
935 if (ret <= 0) {
936 pr_debug("not prepared to track yet or error occurred\n");
937 NF_CT_STAT_INC_ATOMIC(net, error);
938 NF_CT_STAT_INC_ATOMIC(net, invalid);
939 ret = -ret;
940 goto out;
941 }
942
943 l4proto = __nf_ct_l4proto_find(pf, protonum);
944
945 /* It may be an special packet, error, unclean...
946 * inverse of the return code tells to the netfilter
947 * core what to do with the packet. */
948 if (l4proto->error != NULL) {
949 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
950 pf, hooknum);
951 if (ret <= 0) {
952 NF_CT_STAT_INC_ATOMIC(net, error);
953 NF_CT_STAT_INC_ATOMIC(net, invalid);
954 ret = -ret;
955 goto out;
956 }
957 /* ICMP[v6] protocol trackers may assign one conntrack. */
958 if (skb->nfct)
959 goto out;
960 }
961
962 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
963 l3proto, l4proto, &set_reply, &ctinfo);
964 if (!ct) {
965 /* Not valid part of a connection */
966 NF_CT_STAT_INC_ATOMIC(net, invalid);
967 ret = NF_ACCEPT;
968 goto out;
969 }
970
971 if (IS_ERR(ct)) {
972 /* Too stressed to deal. */
973 NF_CT_STAT_INC_ATOMIC(net, drop);
974 ret = NF_DROP;
975 goto out;
976 }
977
978 NF_CT_ASSERT(skb->nfct);
979
980 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
981 if (ret <= 0) {
982 /* Invalid: inverse of the return code tells
983 * the netfilter core what to do */
984 pr_debug("nf_conntrack_in: Can't track with proto module\n");
985 nf_conntrack_put(skb->nfct);
986 skb->nfct = NULL;
987 NF_CT_STAT_INC_ATOMIC(net, invalid);
988 if (ret == -NF_DROP)
989 NF_CT_STAT_INC_ATOMIC(net, drop);
990 ret = -ret;
991 goto out;
992 }
993
994 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
995 nf_conntrack_event_cache(IPCT_REPLY, ct);
996 out:
997 if (tmpl) {
998 /* Special case: we have to repeat this hook, assign the
999 * template again to this packet. We assume that this packet
1000 * has no conntrack assigned. This is used by nf_ct_tcp. */
1001 if (ret == NF_REPEAT)
1002 skb->nfct = (struct nf_conntrack *)tmpl;
1003 else
1004 nf_ct_put(tmpl);
1005 }
1006
1007 return ret;
1008 }
1009 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1010
nf_ct_invert_tuplepr(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig)1011 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1012 const struct nf_conntrack_tuple *orig)
1013 {
1014 bool ret;
1015
1016 rcu_read_lock();
1017 ret = nf_ct_invert_tuple(inverse, orig,
1018 __nf_ct_l3proto_find(orig->src.l3num),
1019 __nf_ct_l4proto_find(orig->src.l3num,
1020 orig->dst.protonum));
1021 rcu_read_unlock();
1022 return ret;
1023 }
1024 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1025
1026 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
1027 implicitly racy: see __nf_conntrack_confirm */
nf_conntrack_alter_reply(struct nf_conn * ct,const struct nf_conntrack_tuple * newreply)1028 void nf_conntrack_alter_reply(struct nf_conn *ct,
1029 const struct nf_conntrack_tuple *newreply)
1030 {
1031 struct nf_conn_help *help = nfct_help(ct);
1032
1033 /* Should be unconfirmed, so not in hash table yet */
1034 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1035
1036 pr_debug("Altering reply tuple of %p to ", ct);
1037 nf_ct_dump_tuple(newreply);
1038
1039 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1040 if (ct->master || (help && !hlist_empty(&help->expectations)))
1041 return;
1042
1043 rcu_read_lock();
1044 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1045 rcu_read_unlock();
1046 }
1047 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1048
1049 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
__nf_ct_refresh_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,unsigned long extra_jiffies,int do_acct)1050 void __nf_ct_refresh_acct(struct nf_conn *ct,
1051 enum ip_conntrack_info ctinfo,
1052 const struct sk_buff *skb,
1053 unsigned long extra_jiffies,
1054 int do_acct)
1055 {
1056 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1057 NF_CT_ASSERT(skb);
1058
1059 /* Only update if this is not a fixed timeout */
1060 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1061 goto acct;
1062
1063 /* If not in hash table, timer will not be active yet */
1064 if (!nf_ct_is_confirmed(ct)) {
1065 ct->timeout.expires = extra_jiffies;
1066 } else {
1067 unsigned long newtime = jiffies + extra_jiffies;
1068
1069 /* Only update the timeout if the new timeout is at least
1070 HZ jiffies from the old timeout. Need del_timer for race
1071 avoidance (may already be dying). */
1072 if (newtime - ct->timeout.expires >= HZ)
1073 mod_timer_pending(&ct->timeout, newtime);
1074 }
1075
1076 acct:
1077 if (do_acct) {
1078 struct nf_conn_counter *acct;
1079
1080 acct = nf_conn_acct_find(ct);
1081 if (acct) {
1082 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1083 atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
1084 }
1085 }
1086 }
1087 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1088
__nf_ct_kill_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,int do_acct)1089 bool __nf_ct_kill_acct(struct nf_conn *ct,
1090 enum ip_conntrack_info ctinfo,
1091 const struct sk_buff *skb,
1092 int do_acct)
1093 {
1094 if (do_acct) {
1095 struct nf_conn_counter *acct;
1096
1097 acct = nf_conn_acct_find(ct);
1098 if (acct) {
1099 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1100 atomic64_add(skb->len - skb_network_offset(skb),
1101 &acct[CTINFO2DIR(ctinfo)].bytes);
1102 }
1103 }
1104
1105 if (del_timer(&ct->timeout)) {
1106 ct->timeout.function((unsigned long)ct);
1107 return true;
1108 }
1109 return false;
1110 }
1111 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1112
1113 #ifdef CONFIG_NF_CONNTRACK_ZONES
1114 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1115 .len = sizeof(struct nf_conntrack_zone),
1116 .align = __alignof__(struct nf_conntrack_zone),
1117 .id = NF_CT_EXT_ZONE,
1118 };
1119 #endif
1120
1121 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1122
1123 #include <linux/netfilter/nfnetlink.h>
1124 #include <linux/netfilter/nfnetlink_conntrack.h>
1125 #include <linux/mutex.h>
1126
1127 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1128 * in ip_conntrack_core, since we don't want the protocols to autoload
1129 * or depend on ctnetlink */
nf_ct_port_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)1130 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1131 const struct nf_conntrack_tuple *tuple)
1132 {
1133 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
1134 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
1135 return 0;
1136
1137 nla_put_failure:
1138 return -1;
1139 }
1140 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1141
1142 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1143 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1144 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1145 };
1146 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1147
nf_ct_port_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t)1148 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1149 struct nf_conntrack_tuple *t)
1150 {
1151 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1152 return -EINVAL;
1153
1154 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1155 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1156
1157 return 0;
1158 }
1159 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1160
nf_ct_port_nlattr_tuple_size(void)1161 int nf_ct_port_nlattr_tuple_size(void)
1162 {
1163 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1164 }
1165 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1166 #endif
1167
1168 /* Used by ipt_REJECT and ip6t_REJECT. */
nf_conntrack_attach(struct sk_buff * nskb,struct sk_buff * skb)1169 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1170 {
1171 struct nf_conn *ct;
1172 enum ip_conntrack_info ctinfo;
1173
1174 /* This ICMP is in reverse direction to the packet which caused it */
1175 ct = nf_ct_get(skb, &ctinfo);
1176 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1177 ctinfo = IP_CT_RELATED_REPLY;
1178 else
1179 ctinfo = IP_CT_RELATED;
1180
1181 /* Attach to new skbuff, and increment count */
1182 nskb->nfct = &ct->ct_general;
1183 nskb->nfctinfo = ctinfo;
1184 nf_conntrack_get(nskb->nfct);
1185 }
1186
1187 /* Bring out ya dead! */
1188 static struct nf_conn *
get_next_corpse(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data,unsigned int * bucket)1189 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1190 void *data, unsigned int *bucket)
1191 {
1192 struct nf_conntrack_tuple_hash *h;
1193 struct nf_conn *ct;
1194 struct hlist_nulls_node *n;
1195
1196 spin_lock_bh(&nf_conntrack_lock);
1197 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1198 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1199 ct = nf_ct_tuplehash_to_ctrack(h);
1200 if (iter(ct, data))
1201 goto found;
1202 }
1203 }
1204 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1205 ct = nf_ct_tuplehash_to_ctrack(h);
1206 if (iter(ct, data))
1207 set_bit(IPS_DYING_BIT, &ct->status);
1208 }
1209 spin_unlock_bh(&nf_conntrack_lock);
1210 return NULL;
1211 found:
1212 atomic_inc(&ct->ct_general.use);
1213 spin_unlock_bh(&nf_conntrack_lock);
1214 return ct;
1215 }
1216
nf_ct_iterate_cleanup(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data)1217 void nf_ct_iterate_cleanup(struct net *net,
1218 int (*iter)(struct nf_conn *i, void *data),
1219 void *data)
1220 {
1221 struct nf_conn *ct;
1222 unsigned int bucket = 0;
1223
1224 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1225 /* Time to push up daises... */
1226 if (del_timer(&ct->timeout))
1227 death_by_timeout((unsigned long)ct);
1228 /* ... else the timer will get him soon. */
1229
1230 nf_ct_put(ct);
1231 }
1232 }
1233 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1234
1235 struct __nf_ct_flush_report {
1236 u32 pid;
1237 int report;
1238 };
1239
kill_report(struct nf_conn * i,void * data)1240 static int kill_report(struct nf_conn *i, void *data)
1241 {
1242 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1243 struct nf_conn_tstamp *tstamp;
1244
1245 tstamp = nf_conn_tstamp_find(i);
1246 if (tstamp && tstamp->stop == 0)
1247 tstamp->stop = ktime_to_ns(ktime_get_real());
1248
1249 /* If we fail to deliver the event, death_by_timeout() will retry */
1250 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1251 fr->pid, fr->report) < 0)
1252 return 1;
1253
1254 /* Avoid the delivery of the destroy event in death_by_timeout(). */
1255 set_bit(IPS_DYING_BIT, &i->status);
1256 return 1;
1257 }
1258
kill_all(struct nf_conn * i,void * data)1259 static int kill_all(struct nf_conn *i, void *data)
1260 {
1261 return 1;
1262 }
1263
nf_ct_free_hashtable(void * hash,unsigned int size)1264 void nf_ct_free_hashtable(void *hash, unsigned int size)
1265 {
1266 if (is_vmalloc_addr(hash))
1267 vfree(hash);
1268 else
1269 free_pages((unsigned long)hash,
1270 get_order(sizeof(struct hlist_head) * size));
1271 }
1272 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1273
nf_conntrack_flush_report(struct net * net,u32 pid,int report)1274 void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1275 {
1276 struct __nf_ct_flush_report fr = {
1277 .pid = pid,
1278 .report = report,
1279 };
1280 nf_ct_iterate_cleanup(net, kill_report, &fr);
1281 }
1282 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1283
nf_ct_release_dying_list(struct net * net)1284 static void nf_ct_release_dying_list(struct net *net)
1285 {
1286 struct nf_conntrack_tuple_hash *h;
1287 struct nf_conn *ct;
1288 struct hlist_nulls_node *n;
1289
1290 spin_lock_bh(&nf_conntrack_lock);
1291 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1292 ct = nf_ct_tuplehash_to_ctrack(h);
1293 /* never fails to remove them, no listeners at this point */
1294 nf_ct_kill(ct);
1295 }
1296 spin_unlock_bh(&nf_conntrack_lock);
1297 }
1298
untrack_refs(void)1299 static int untrack_refs(void)
1300 {
1301 int cnt = 0, cpu;
1302
1303 for_each_possible_cpu(cpu) {
1304 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1305
1306 cnt += atomic_read(&ct->ct_general.use) - 1;
1307 }
1308 return cnt;
1309 }
1310
nf_conntrack_cleanup_init_net(void)1311 static void nf_conntrack_cleanup_init_net(void)
1312 {
1313 while (untrack_refs() > 0)
1314 schedule();
1315
1316 nf_conntrack_helper_fini();
1317 nf_conntrack_proto_fini();
1318 #ifdef CONFIG_NF_CONNTRACK_ZONES
1319 nf_ct_extend_unregister(&nf_ct_zone_extend);
1320 #endif
1321 }
1322
nf_conntrack_cleanup_net(struct net * net)1323 static void nf_conntrack_cleanup_net(struct net *net)
1324 {
1325 i_see_dead_people:
1326 nf_ct_iterate_cleanup(net, kill_all, NULL);
1327 nf_ct_release_dying_list(net);
1328 if (atomic_read(&net->ct.count) != 0) {
1329 schedule();
1330 goto i_see_dead_people;
1331 }
1332
1333 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1334 nf_conntrack_ecache_fini(net);
1335 nf_conntrack_tstamp_fini(net);
1336 nf_conntrack_acct_fini(net);
1337 nf_conntrack_expect_fini(net);
1338 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1339 kfree(net->ct.slabname);
1340 free_percpu(net->ct.stat);
1341 }
1342
1343 /* Mishearing the voices in his head, our hero wonders how he's
1344 supposed to kill the mall. */
nf_conntrack_cleanup(struct net * net)1345 void nf_conntrack_cleanup(struct net *net)
1346 {
1347 if (net_eq(net, &init_net))
1348 RCU_INIT_POINTER(ip_ct_attach, NULL);
1349
1350 /* This makes sure all current packets have passed through
1351 netfilter framework. Roll on, two-stage module
1352 delete... */
1353 synchronize_net();
1354
1355 nf_conntrack_cleanup_net(net);
1356
1357 if (net_eq(net, &init_net)) {
1358 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1359 nf_conntrack_cleanup_init_net();
1360 }
1361 }
1362
nf_ct_alloc_hashtable(unsigned int * sizep,int nulls)1363 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1364 {
1365 struct hlist_nulls_head *hash;
1366 unsigned int nr_slots, i;
1367 size_t sz;
1368
1369 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1370 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1371 sz = nr_slots * sizeof(struct hlist_nulls_head);
1372 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1373 get_order(sz));
1374 if (!hash) {
1375 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1376 hash = vzalloc(sz);
1377 }
1378
1379 if (hash && nulls)
1380 for (i = 0; i < nr_slots; i++)
1381 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1382
1383 return hash;
1384 }
1385 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1386
nf_conntrack_set_hashsize(const char * val,struct kernel_param * kp)1387 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1388 {
1389 int i, bucket;
1390 unsigned int hashsize, old_size;
1391 struct hlist_nulls_head *hash, *old_hash;
1392 struct nf_conntrack_tuple_hash *h;
1393 struct nf_conn *ct;
1394
1395 if (current->nsproxy->net_ns != &init_net)
1396 return -EOPNOTSUPP;
1397
1398 /* On boot, we can set this without any fancy locking. */
1399 if (!nf_conntrack_htable_size)
1400 return param_set_uint(val, kp);
1401
1402 hashsize = simple_strtoul(val, NULL, 0);
1403 if (!hashsize)
1404 return -EINVAL;
1405
1406 hash = nf_ct_alloc_hashtable(&hashsize, 1);
1407 if (!hash)
1408 return -ENOMEM;
1409
1410 /* Lookups in the old hash might happen in parallel, which means we
1411 * might get false negatives during connection lookup. New connections
1412 * created because of a false negative won't make it into the hash
1413 * though since that required taking the lock.
1414 */
1415 spin_lock_bh(&nf_conntrack_lock);
1416 for (i = 0; i < init_net.ct.htable_size; i++) {
1417 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1418 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1419 struct nf_conntrack_tuple_hash, hnnode);
1420 ct = nf_ct_tuplehash_to_ctrack(h);
1421 hlist_nulls_del_rcu(&h->hnnode);
1422 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1423 hashsize);
1424 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1425 }
1426 }
1427 old_size = init_net.ct.htable_size;
1428 old_hash = init_net.ct.hash;
1429
1430 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1431 init_net.ct.hash = hash;
1432 spin_unlock_bh(&nf_conntrack_lock);
1433
1434 nf_ct_free_hashtable(old_hash, old_size);
1435 return 0;
1436 }
1437 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1438
1439 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1440 &nf_conntrack_htable_size, 0600);
1441
nf_ct_untracked_status_or(unsigned long bits)1442 void nf_ct_untracked_status_or(unsigned long bits)
1443 {
1444 int cpu;
1445
1446 for_each_possible_cpu(cpu)
1447 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1448 }
1449 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1450
nf_conntrack_init_init_net(void)1451 static int nf_conntrack_init_init_net(void)
1452 {
1453 int max_factor = 8;
1454 int ret, cpu;
1455
1456 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1457 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1458 if (!nf_conntrack_htable_size) {
1459 nf_conntrack_htable_size
1460 = (((totalram_pages << PAGE_SHIFT) / 16384)
1461 / sizeof(struct hlist_head));
1462 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1463 nf_conntrack_htable_size = 16384;
1464 if (nf_conntrack_htable_size < 32)
1465 nf_conntrack_htable_size = 32;
1466
1467 /* Use a max. factor of four by default to get the same max as
1468 * with the old struct list_heads. When a table size is given
1469 * we use the old value of 8 to avoid reducing the max.
1470 * entries. */
1471 max_factor = 4;
1472 }
1473 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1474
1475 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1476 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1477 nf_conntrack_max);
1478
1479 ret = nf_conntrack_proto_init();
1480 if (ret < 0)
1481 goto err_proto;
1482
1483 ret = nf_conntrack_helper_init();
1484 if (ret < 0)
1485 goto err_helper;
1486
1487 #ifdef CONFIG_NF_CONNTRACK_ZONES
1488 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1489 if (ret < 0)
1490 goto err_extend;
1491 #endif
1492 /* Set up fake conntrack: to never be deleted, not in any hashes */
1493 for_each_possible_cpu(cpu) {
1494 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1495 write_pnet(&ct->ct_net, &init_net);
1496 atomic_set(&ct->ct_general.use, 1);
1497 }
1498 /* - and look it like as a confirmed connection */
1499 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1500 return 0;
1501
1502 #ifdef CONFIG_NF_CONNTRACK_ZONES
1503 err_extend:
1504 nf_conntrack_helper_fini();
1505 #endif
1506 err_helper:
1507 nf_conntrack_proto_fini();
1508 err_proto:
1509 return ret;
1510 }
1511
1512 /*
1513 * We need to use special "null" values, not used in hash table
1514 */
1515 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1516 #define DYING_NULLS_VAL ((1<<30)+1)
1517
nf_conntrack_init_net(struct net * net)1518 static int nf_conntrack_init_net(struct net *net)
1519 {
1520 int ret;
1521
1522 atomic_set(&net->ct.count, 0);
1523 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1524 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1525 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1526 if (!net->ct.stat) {
1527 ret = -ENOMEM;
1528 goto err_stat;
1529 }
1530
1531 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1532 if (!net->ct.slabname) {
1533 ret = -ENOMEM;
1534 goto err_slabname;
1535 }
1536
1537 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1538 sizeof(struct nf_conn), 0,
1539 SLAB_DESTROY_BY_RCU, NULL);
1540 if (!net->ct.nf_conntrack_cachep) {
1541 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1542 ret = -ENOMEM;
1543 goto err_cache;
1544 }
1545
1546 net->ct.htable_size = nf_conntrack_htable_size;
1547 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1548 if (!net->ct.hash) {
1549 ret = -ENOMEM;
1550 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1551 goto err_hash;
1552 }
1553 ret = nf_conntrack_expect_init(net);
1554 if (ret < 0)
1555 goto err_expect;
1556 ret = nf_conntrack_acct_init(net);
1557 if (ret < 0)
1558 goto err_acct;
1559 ret = nf_conntrack_tstamp_init(net);
1560 if (ret < 0)
1561 goto err_tstamp;
1562 ret = nf_conntrack_ecache_init(net);
1563 if (ret < 0)
1564 goto err_ecache;
1565
1566 return 0;
1567
1568 err_ecache:
1569 nf_conntrack_tstamp_fini(net);
1570 err_tstamp:
1571 nf_conntrack_acct_fini(net);
1572 err_acct:
1573 nf_conntrack_expect_fini(net);
1574 err_expect:
1575 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1576 err_hash:
1577 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1578 err_cache:
1579 kfree(net->ct.slabname);
1580 err_slabname:
1581 free_percpu(net->ct.stat);
1582 err_stat:
1583 return ret;
1584 }
1585
1586 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1587 enum ip_conntrack_dir dir,
1588 u32 seq);
1589 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1590
nf_conntrack_init(struct net * net)1591 int nf_conntrack_init(struct net *net)
1592 {
1593 int ret;
1594
1595 if (net_eq(net, &init_net)) {
1596 ret = nf_conntrack_init_init_net();
1597 if (ret < 0)
1598 goto out_init_net;
1599 }
1600 ret = nf_conntrack_init_net(net);
1601 if (ret < 0)
1602 goto out_net;
1603
1604 if (net_eq(net, &init_net)) {
1605 /* For use by REJECT target */
1606 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1607 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1608
1609 /* Howto get NAT offsets */
1610 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1611 }
1612 return 0;
1613
1614 out_net:
1615 if (net_eq(net, &init_net))
1616 nf_conntrack_cleanup_init_net();
1617 out_init_net:
1618 return ret;
1619 }
1620