1 /*
2  * This is a module which is used for queueing IPv4 packets and
3  * communicating with userspace via netlink.
4  *
5  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/ip.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/security.h>
27 #include <linux/net.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <net/net_namespace.h>
31 #include <net/sock.h>
32 #include <net/route.h>
33 #include <net/netfilter/nf_queue.h>
34 #include <net/ip.h>
35 
36 #define IPQ_QMAX_DEFAULT 1024
37 #define IPQ_PROC_FS_NAME "ip_queue"
38 #define NET_IPQ_QMAX 2088
39 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
40 
41 typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
42 
43 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
44 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
45 static DEFINE_SPINLOCK(queue_lock);
46 static int peer_pid __read_mostly;
47 static unsigned int copy_range __read_mostly;
48 static unsigned int queue_total;
49 static unsigned int queue_dropped = 0;
50 static unsigned int queue_user_dropped = 0;
51 static struct sock *ipqnl __read_mostly;
52 static LIST_HEAD(queue_list);
53 static DEFINE_MUTEX(ipqnl_mutex);
54 
55 static inline void
__ipq_enqueue_entry(struct nf_queue_entry * entry)56 __ipq_enqueue_entry(struct nf_queue_entry *entry)
57 {
58        list_add_tail(&entry->list, &queue_list);
59        queue_total++;
60 }
61 
62 static inline int
__ipq_set_mode(unsigned char mode,unsigned int range)63 __ipq_set_mode(unsigned char mode, unsigned int range)
64 {
65 	int status = 0;
66 
67 	switch(mode) {
68 	case IPQ_COPY_NONE:
69 	case IPQ_COPY_META:
70 		copy_mode = mode;
71 		copy_range = 0;
72 		break;
73 
74 	case IPQ_COPY_PACKET:
75 		if (range > 0xFFFF)
76 			range = 0xFFFF;
77 		copy_range = range;
78 		copy_mode = mode;
79 		break;
80 
81 	default:
82 		status = -EINVAL;
83 
84 	}
85 	return status;
86 }
87 
88 static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
89 
90 static inline void
__ipq_reset(void)91 __ipq_reset(void)
92 {
93 	peer_pid = 0;
94 	net_disable_timestamp();
95 	__ipq_set_mode(IPQ_COPY_NONE, 0);
96 	__ipq_flush(NULL, 0);
97 }
98 
99 static struct nf_queue_entry *
ipq_find_dequeue_entry(unsigned long id)100 ipq_find_dequeue_entry(unsigned long id)
101 {
102 	struct nf_queue_entry *entry = NULL, *i;
103 
104 	spin_lock_bh(&queue_lock);
105 
106 	list_for_each_entry(i, &queue_list, list) {
107 		if ((unsigned long)i == id) {
108 			entry = i;
109 			break;
110 		}
111 	}
112 
113 	if (entry) {
114 		list_del(&entry->list);
115 		queue_total--;
116 	}
117 
118 	spin_unlock_bh(&queue_lock);
119 	return entry;
120 }
121 
122 static void
__ipq_flush(ipq_cmpfn cmpfn,unsigned long data)123 __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
124 {
125 	struct nf_queue_entry *entry, *next;
126 
127 	list_for_each_entry_safe(entry, next, &queue_list, list) {
128 		if (!cmpfn || cmpfn(entry, data)) {
129 			list_del(&entry->list);
130 			queue_total--;
131 			nf_reinject(entry, NF_DROP);
132 		}
133 	}
134 }
135 
136 static void
ipq_flush(ipq_cmpfn cmpfn,unsigned long data)137 ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
138 {
139 	spin_lock_bh(&queue_lock);
140 	__ipq_flush(cmpfn, data);
141 	spin_unlock_bh(&queue_lock);
142 }
143 
144 static struct sk_buff *
ipq_build_packet_message(struct nf_queue_entry * entry,int * errp)145 ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
146 {
147 	sk_buff_data_t old_tail;
148 	size_t size = 0;
149 	size_t data_len = 0;
150 	struct sk_buff *skb;
151 	struct ipq_packet_msg *pmsg;
152 	struct nlmsghdr *nlh;
153 	struct timeval tv;
154 
155 	switch (ACCESS_ONCE(copy_mode)) {
156 	case IPQ_COPY_META:
157 	case IPQ_COPY_NONE:
158 		size = NLMSG_SPACE(sizeof(*pmsg));
159 		break;
160 
161 	case IPQ_COPY_PACKET:
162 		if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
163 		    (*errp = skb_checksum_help(entry->skb)))
164 			return NULL;
165 
166 		data_len = ACCESS_ONCE(copy_range);
167 		if (data_len == 0 || data_len > entry->skb->len)
168 			data_len = entry->skb->len;
169 
170 		size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
171 		break;
172 
173 	default:
174 		*errp = -EINVAL;
175 		return NULL;
176 	}
177 
178 	skb = alloc_skb(size, GFP_ATOMIC);
179 	if (!skb)
180 		goto nlmsg_failure;
181 
182 	old_tail = skb->tail;
183 	nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
184 	pmsg = NLMSG_DATA(nlh);
185 	memset(pmsg, 0, sizeof(*pmsg));
186 
187 	pmsg->packet_id       = (unsigned long )entry;
188 	pmsg->data_len        = data_len;
189 	tv = ktime_to_timeval(entry->skb->tstamp);
190 	pmsg->timestamp_sec   = tv.tv_sec;
191 	pmsg->timestamp_usec  = tv.tv_usec;
192 	pmsg->mark            = entry->skb->mark;
193 	pmsg->hook            = entry->hook;
194 	pmsg->hw_protocol     = entry->skb->protocol;
195 
196 	if (entry->indev)
197 		strcpy(pmsg->indev_name, entry->indev->name);
198 	else
199 		pmsg->indev_name[0] = '\0';
200 
201 	if (entry->outdev)
202 		strcpy(pmsg->outdev_name, entry->outdev->name);
203 	else
204 		pmsg->outdev_name[0] = '\0';
205 
206 	if (entry->indev && entry->skb->dev &&
207 	    entry->skb->mac_header != entry->skb->network_header) {
208 		pmsg->hw_type = entry->skb->dev->type;
209 		pmsg->hw_addrlen = dev_parse_header(entry->skb,
210 						    pmsg->hw_addr);
211 	}
212 
213 	if (data_len)
214 		if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
215 			BUG();
216 
217 	nlh->nlmsg_len = skb->tail - old_tail;
218 	return skb;
219 
220 nlmsg_failure:
221 	kfree_skb(skb);
222 	*errp = -EINVAL;
223 	printk(KERN_ERR "ip_queue: error creating packet message\n");
224 	return NULL;
225 }
226 
227 static int
ipq_enqueue_packet(struct nf_queue_entry * entry,unsigned int queuenum)228 ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
229 {
230 	int status = -EINVAL;
231 	struct sk_buff *nskb;
232 
233 	if (copy_mode == IPQ_COPY_NONE)
234 		return -EAGAIN;
235 
236 	nskb = ipq_build_packet_message(entry, &status);
237 	if (nskb == NULL)
238 		return status;
239 
240 	spin_lock_bh(&queue_lock);
241 
242 	if (!peer_pid)
243 		goto err_out_free_nskb;
244 
245 	if (queue_total >= queue_maxlen) {
246 		queue_dropped++;
247 		status = -ENOSPC;
248 		if (net_ratelimit())
249 			  printk (KERN_WARNING "ip_queue: full at %d entries, "
250 				  "dropping packets(s). Dropped: %d\n", queue_total,
251 				  queue_dropped);
252 		goto err_out_free_nskb;
253 	}
254 
255 	/* netlink_unicast will either free the nskb or attach it to a socket */
256 	status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
257 	if (status < 0) {
258 		queue_user_dropped++;
259 		goto err_out_unlock;
260 	}
261 
262 	__ipq_enqueue_entry(entry);
263 
264 	spin_unlock_bh(&queue_lock);
265 	return status;
266 
267 err_out_free_nskb:
268 	kfree_skb(nskb);
269 
270 err_out_unlock:
271 	spin_unlock_bh(&queue_lock);
272 	return status;
273 }
274 
275 static int
ipq_mangle_ipv4(ipq_verdict_msg_t * v,struct nf_queue_entry * e)276 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
277 {
278 	int diff;
279 	struct iphdr *user_iph = (struct iphdr *)v->payload;
280 	struct sk_buff *nskb;
281 
282 	if (v->data_len < sizeof(*user_iph))
283 		return 0;
284 	diff = v->data_len - e->skb->len;
285 	if (diff < 0) {
286 		if (pskb_trim(e->skb, v->data_len))
287 			return -ENOMEM;
288 	} else if (diff > 0) {
289 		if (v->data_len > 0xFFFF)
290 			return -EINVAL;
291 		if (diff > skb_tailroom(e->skb)) {
292 			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
293 					       diff, GFP_ATOMIC);
294 			if (!nskb) {
295 				printk(KERN_WARNING "ip_queue: error "
296 				      "in mangle, dropping packet\n");
297 				return -ENOMEM;
298 			}
299 			kfree_skb(e->skb);
300 			e->skb = nskb;
301 		}
302 		skb_put(e->skb, diff);
303 	}
304 	if (!skb_make_writable(e->skb, v->data_len))
305 		return -ENOMEM;
306 	skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
307 	e->skb->ip_summed = CHECKSUM_NONE;
308 
309 	return 0;
310 }
311 
312 static int
ipq_set_verdict(struct ipq_verdict_msg * vmsg,unsigned int len)313 ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
314 {
315 	struct nf_queue_entry *entry;
316 
317 	if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
318 		return -EINVAL;
319 
320 	entry = ipq_find_dequeue_entry(vmsg->id);
321 	if (entry == NULL)
322 		return -ENOENT;
323 	else {
324 		int verdict = vmsg->value;
325 
326 		if (vmsg->data_len && vmsg->data_len == len)
327 			if (ipq_mangle_ipv4(vmsg, entry) < 0)
328 				verdict = NF_DROP;
329 
330 		nf_reinject(entry, verdict);
331 		return 0;
332 	}
333 }
334 
335 static int
ipq_set_mode(unsigned char mode,unsigned int range)336 ipq_set_mode(unsigned char mode, unsigned int range)
337 {
338 	int status;
339 
340 	spin_lock_bh(&queue_lock);
341 	status = __ipq_set_mode(mode, range);
342 	spin_unlock_bh(&queue_lock);
343 	return status;
344 }
345 
346 static int
ipq_receive_peer(struct ipq_peer_msg * pmsg,unsigned char type,unsigned int len)347 ipq_receive_peer(struct ipq_peer_msg *pmsg,
348 		 unsigned char type, unsigned int len)
349 {
350 	int status = 0;
351 
352 	if (len < sizeof(*pmsg))
353 		return -EINVAL;
354 
355 	switch (type) {
356 	case IPQM_MODE:
357 		status = ipq_set_mode(pmsg->msg.mode.value,
358 				      pmsg->msg.mode.range);
359 		break;
360 
361 	case IPQM_VERDICT:
362 		status = ipq_set_verdict(&pmsg->msg.verdict,
363 					 len - sizeof(*pmsg));
364 		break;
365 	default:
366 		status = -EINVAL;
367 	}
368 	return status;
369 }
370 
371 static int
dev_cmp(struct nf_queue_entry * entry,unsigned long ifindex)372 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
373 {
374 	if (entry->indev)
375 		if (entry->indev->ifindex == ifindex)
376 			return 1;
377 	if (entry->outdev)
378 		if (entry->outdev->ifindex == ifindex)
379 			return 1;
380 #ifdef CONFIG_BRIDGE_NETFILTER
381 	if (entry->skb->nf_bridge) {
382 		if (entry->skb->nf_bridge->physindev &&
383 		    entry->skb->nf_bridge->physindev->ifindex == ifindex)
384 			return 1;
385 		if (entry->skb->nf_bridge->physoutdev &&
386 		    entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
387 			return 1;
388 	}
389 #endif
390 	return 0;
391 }
392 
393 static void
ipq_dev_drop(int ifindex)394 ipq_dev_drop(int ifindex)
395 {
396 	ipq_flush(dev_cmp, ifindex);
397 }
398 
399 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
400 
401 static inline void
__ipq_rcv_skb(struct sk_buff * skb)402 __ipq_rcv_skb(struct sk_buff *skb)
403 {
404 	int status, type, pid, flags;
405 	unsigned int nlmsglen, skblen;
406 	struct nlmsghdr *nlh;
407 	bool enable_timestamp = false;
408 
409 	skblen = skb->len;
410 	if (skblen < sizeof(*nlh))
411 		return;
412 
413 	nlh = nlmsg_hdr(skb);
414 	nlmsglen = nlh->nlmsg_len;
415 	if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
416 		return;
417 
418 	pid = nlh->nlmsg_pid;
419 	flags = nlh->nlmsg_flags;
420 
421 	if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
422 		RCV_SKB_FAIL(-EINVAL);
423 
424 	if (flags & MSG_TRUNC)
425 		RCV_SKB_FAIL(-ECOMM);
426 
427 	type = nlh->nlmsg_type;
428 	if (type < NLMSG_NOOP || type >= IPQM_MAX)
429 		RCV_SKB_FAIL(-EINVAL);
430 
431 	if (type <= IPQM_BASE)
432 		return;
433 
434 	if (!capable(CAP_NET_ADMIN))
435 		RCV_SKB_FAIL(-EPERM);
436 
437 	spin_lock_bh(&queue_lock);
438 
439 	if (peer_pid) {
440 		if (peer_pid != pid) {
441 			spin_unlock_bh(&queue_lock);
442 			RCV_SKB_FAIL(-EBUSY);
443 		}
444 	} else {
445 		enable_timestamp = true;
446 		peer_pid = pid;
447 	}
448 
449 	spin_unlock_bh(&queue_lock);
450 	if (enable_timestamp)
451 		net_enable_timestamp();
452 	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
453 				  nlmsglen - NLMSG_LENGTH(0));
454 	if (status < 0)
455 		RCV_SKB_FAIL(status);
456 
457 	if (flags & NLM_F_ACK)
458 		netlink_ack(skb, nlh, 0);
459 }
460 
461 static void
ipq_rcv_skb(struct sk_buff * skb)462 ipq_rcv_skb(struct sk_buff *skb)
463 {
464 	mutex_lock(&ipqnl_mutex);
465 	__ipq_rcv_skb(skb);
466 	mutex_unlock(&ipqnl_mutex);
467 }
468 
469 static int
ipq_rcv_dev_event(struct notifier_block * this,unsigned long event,void * ptr)470 ipq_rcv_dev_event(struct notifier_block *this,
471 		  unsigned long event, void *ptr)
472 {
473 	struct net_device *dev = ptr;
474 
475 	if (!net_eq(dev_net(dev), &init_net))
476 		return NOTIFY_DONE;
477 
478 	/* Drop any packets associated with the downed device */
479 	if (event == NETDEV_DOWN)
480 		ipq_dev_drop(dev->ifindex);
481 	return NOTIFY_DONE;
482 }
483 
484 static struct notifier_block ipq_dev_notifier = {
485 	.notifier_call	= ipq_rcv_dev_event,
486 };
487 
488 static int
ipq_rcv_nl_event(struct notifier_block * this,unsigned long event,void * ptr)489 ipq_rcv_nl_event(struct notifier_block *this,
490 		 unsigned long event, void *ptr)
491 {
492 	struct netlink_notify *n = ptr;
493 
494 	if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
495 		spin_lock_bh(&queue_lock);
496 		if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
497 			__ipq_reset();
498 		spin_unlock_bh(&queue_lock);
499 	}
500 	return NOTIFY_DONE;
501 }
502 
503 static struct notifier_block ipq_nl_notifier = {
504 	.notifier_call	= ipq_rcv_nl_event,
505 };
506 
507 #ifdef CONFIG_SYSCTL
508 static struct ctl_table_header *ipq_sysctl_header;
509 
510 static ctl_table ipq_table[] = {
511 	{
512 		.procname	= NET_IPQ_QMAX_NAME,
513 		.data		= &queue_maxlen,
514 		.maxlen		= sizeof(queue_maxlen),
515 		.mode		= 0644,
516 		.proc_handler	= proc_dointvec
517 	},
518 	{ }
519 };
520 #endif
521 
522 #ifdef CONFIG_PROC_FS
ip_queue_show(struct seq_file * m,void * v)523 static int ip_queue_show(struct seq_file *m, void *v)
524 {
525 	spin_lock_bh(&queue_lock);
526 
527 	seq_printf(m,
528 		      "Peer PID          : %d\n"
529 		      "Copy mode         : %hu\n"
530 		      "Copy range        : %u\n"
531 		      "Queue length      : %u\n"
532 		      "Queue max. length : %u\n"
533 		      "Queue dropped     : %u\n"
534 		      "Netlink dropped   : %u\n",
535 		      peer_pid,
536 		      copy_mode,
537 		      copy_range,
538 		      queue_total,
539 		      queue_maxlen,
540 		      queue_dropped,
541 		      queue_user_dropped);
542 
543 	spin_unlock_bh(&queue_lock);
544 	return 0;
545 }
546 
ip_queue_open(struct inode * inode,struct file * file)547 static int ip_queue_open(struct inode *inode, struct file *file)
548 {
549 	return single_open(file, ip_queue_show, NULL);
550 }
551 
552 static const struct file_operations ip_queue_proc_fops = {
553 	.open		= ip_queue_open,
554 	.read		= seq_read,
555 	.llseek		= seq_lseek,
556 	.release	= single_release,
557 	.owner		= THIS_MODULE,
558 };
559 #endif
560 
561 static const struct nf_queue_handler nfqh = {
562 	.name	= "ip_queue",
563 	.outfn	= &ipq_enqueue_packet,
564 };
565 
ip_queue_init(void)566 static int __init ip_queue_init(void)
567 {
568 	int status = -ENOMEM;
569 	struct proc_dir_entry *proc __maybe_unused;
570 
571 	netlink_register_notifier(&ipq_nl_notifier);
572 	ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
573 				      ipq_rcv_skb, NULL, THIS_MODULE);
574 	if (ipqnl == NULL) {
575 		printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
576 		goto cleanup_netlink_notifier;
577 	}
578 
579 #ifdef CONFIG_PROC_FS
580 	proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
581 			   &ip_queue_proc_fops);
582 	if (!proc) {
583 		printk(KERN_ERR "ip_queue: failed to create proc entry\n");
584 		goto cleanup_ipqnl;
585 	}
586 #endif
587 	register_netdevice_notifier(&ipq_dev_notifier);
588 #ifdef CONFIG_SYSCTL
589 	ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
590 #endif
591 	status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
592 	if (status < 0) {
593 		printk(KERN_ERR "ip_queue: failed to register queue handler\n");
594 		goto cleanup_sysctl;
595 	}
596 	return status;
597 
598 cleanup_sysctl:
599 #ifdef CONFIG_SYSCTL
600 	unregister_sysctl_table(ipq_sysctl_header);
601 #endif
602 	unregister_netdevice_notifier(&ipq_dev_notifier);
603 	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
604 cleanup_ipqnl: __maybe_unused
605 	netlink_kernel_release(ipqnl);
606 	mutex_lock(&ipqnl_mutex);
607 	mutex_unlock(&ipqnl_mutex);
608 
609 cleanup_netlink_notifier:
610 	netlink_unregister_notifier(&ipq_nl_notifier);
611 	return status;
612 }
613 
ip_queue_fini(void)614 static void __exit ip_queue_fini(void)
615 {
616 	nf_unregister_queue_handlers(&nfqh);
617 
618 	ipq_flush(NULL, 0);
619 
620 #ifdef CONFIG_SYSCTL
621 	unregister_sysctl_table(ipq_sysctl_header);
622 #endif
623 	unregister_netdevice_notifier(&ipq_dev_notifier);
624 	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
625 
626 	netlink_kernel_release(ipqnl);
627 	mutex_lock(&ipqnl_mutex);
628 	mutex_unlock(&ipqnl_mutex);
629 
630 	netlink_unregister_notifier(&ipq_nl_notifier);
631 }
632 
633 MODULE_DESCRIPTION("IPv4 packet queue handler");
634 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
635 MODULE_LICENSE("GPL");
636 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL);
637 
638 module_init(ip_queue_init);
639 module_exit(ip_queue_fini);
640