1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NF_QUEUE_H
3 #define _NF_QUEUE_H
4
5 #include <linux/ip.h>
6 #include <linux/ipv6.h>
7 #include <linux/jhash.h>
8 #include <linux/netfilter.h>
9 #include <linux/rhashtable-types.h>
10 #include <linux/skbuff.h>
11
12 /* Each queued (to userspace) skbuff has one of these. */
13 struct nf_queue_entry {
14 struct list_head list;
15 struct rhash_head hash_node;
16 struct sk_buff *skb;
17 unsigned int id;
18 unsigned int hook_index; /* index in hook_entries->hook[] */
19 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
20 struct net_device *physin;
21 struct net_device *physout;
22 #endif
23 struct nf_hook_state state;
24 bool nf_ct_is_unconfirmed;
25 u16 size; /* sizeof(entry) + saved route keys */
26
27 /* extra space to store route keys */
28 };
29
30 #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
31
32 /* Packet queuing */
33 struct nf_queue_handler {
34 int (*outfn)(struct nf_queue_entry *entry,
35 unsigned int queuenum);
36 void (*nf_hook_drop)(struct net *net);
37 };
38
39 void nf_register_queue_handler(const struct nf_queue_handler *qh);
40 void nf_unregister_queue_handler(void);
41
42 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
43 void nf_queue_entry_free(struct nf_queue_entry *entry);
44
init_hashrandom(u32 * jhash_initval)45 static inline void init_hashrandom(u32 *jhash_initval)
46 {
47 while (*jhash_initval == 0)
48 *jhash_initval = get_random_u32();
49 }
50
hash_v4(const struct iphdr * iph,u32 initval)51 static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
52 {
53 /* packets in either direction go into same queue */
54 if ((__force u32)iph->saddr < (__force u32)iph->daddr)
55 return jhash_3words((__force u32)iph->saddr,
56 (__force u32)iph->daddr, iph->protocol, initval);
57
58 return jhash_3words((__force u32)iph->daddr,
59 (__force u32)iph->saddr, iph->protocol, initval);
60 }
61
hash_v6(const struct ipv6hdr * ip6h,u32 initval)62 static inline u32 hash_v6(const struct ipv6hdr *ip6h, u32 initval)
63 {
64 u32 a, b, c;
65
66 if ((__force u32)ip6h->saddr.s6_addr32[3] <
67 (__force u32)ip6h->daddr.s6_addr32[3]) {
68 a = (__force u32) ip6h->saddr.s6_addr32[3];
69 b = (__force u32) ip6h->daddr.s6_addr32[3];
70 } else {
71 b = (__force u32) ip6h->saddr.s6_addr32[3];
72 a = (__force u32) ip6h->daddr.s6_addr32[3];
73 }
74
75 if ((__force u32)ip6h->saddr.s6_addr32[1] <
76 (__force u32)ip6h->daddr.s6_addr32[1])
77 c = (__force u32) ip6h->saddr.s6_addr32[1];
78 else
79 c = (__force u32) ip6h->daddr.s6_addr32[1];
80
81 return jhash_3words(a, b, c, initval);
82 }
83
hash_bridge(const struct sk_buff * skb,u32 initval)84 static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
85 {
86 struct ipv6hdr *ip6h, _ip6h;
87 struct iphdr *iph, _iph;
88
89 switch (eth_hdr(skb)->h_proto) {
90 case htons(ETH_P_IP):
91 iph = skb_header_pointer(skb, skb_network_offset(skb),
92 sizeof(*iph), &_iph);
93 if (iph)
94 return hash_v4(iph, initval);
95 break;
96 case htons(ETH_P_IPV6):
97 ip6h = skb_header_pointer(skb, skb_network_offset(skb),
98 sizeof(*ip6h), &_ip6h);
99 if (ip6h)
100 return hash_v6(ip6h, initval);
101 break;
102 }
103
104 return 0;
105 }
106
107 static inline u32
nfqueue_hash(const struct sk_buff * skb,u16 queue,u16 queues_total,u8 family,u32 initval)108 nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
109 u32 initval)
110 {
111 switch (family) {
112 case NFPROTO_IPV4:
113 queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
114 queues_total);
115 break;
116 case NFPROTO_IPV6:
117 queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
118 queues_total);
119 break;
120 case NFPROTO_BRIDGE:
121 queue += reciprocal_scale(hash_bridge(skb, initval),
122 queues_total);
123 break;
124 }
125
126 return queue;
127 }
128
129 int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
130 unsigned int index, unsigned int verdict);
131
132 #endif /* _NF_QUEUE_H */
133