1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPV6 GSO/GRO offload support
4 * Linux INET6 implementation
5 *
6 * TCPv6 GSO/GRO support
7 */
8 #include <linux/indirect_call_wrapper.h>
9 #include <linux/skbuff.h>
10 #include <net/inet6_hashtables.h>
11 #include <net/gro.h>
12 #include <net/protocol.h>
13 #include <net/tcp.h>
14 #include <net/ip6_checksum.h>
15 #include "ip6_offload.h"
16
tcp6_check_fraglist_gro(struct list_head * head,struct sk_buff * skb,struct tcphdr * th)17 static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
18 struct tcphdr *th)
19 {
20 #if IS_ENABLED(CONFIG_IPV6)
21 const struct ipv6hdr *hdr;
22 struct sk_buff *p;
23 struct sock *sk;
24 struct net *net;
25 int iif, sdif;
26
27 p = tcp_gro_lookup(head, th);
28 if (p) {
29 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
30 return;
31 }
32
33 inet6_get_iif_sdif(skb, &iif, &sdif);
34 hdr = skb_gro_network_header(skb);
35 net = dev_net_rcu(skb->dev);
36 sk = __inet6_lookup_established(net, &hdr->saddr, th->source,
37 &hdr->daddr, ntohs(th->dest),
38 iif, sdif);
39 NAPI_GRO_CB(skb)->is_flist = !sk;
40 if (sk)
41 sock_gen_put(sk);
42 #endif /* IS_ENABLED(CONFIG_IPV6) */
43 }
44
tcp6_gro_receive(struct list_head * head,struct sk_buff * skb)45 static __always_inline struct sk_buff *tcp6_gro_receive(struct list_head *head,
46 struct sk_buff *skb)
47 {
48 struct tcphdr *th;
49
50 /* Don't bother verifying checksum if we're going to flush anyway. */
51 if (!NAPI_GRO_CB(skb)->flush &&
52 skb_gro_checksum_validate(skb, IPPROTO_TCP,
53 ip6_gro_compute_pseudo))
54 goto flush;
55
56 th = tcp_gro_pull_header(skb);
57 if (!th)
58 goto flush;
59
60 if (unlikely(skb->dev->features & NETIF_F_GRO_FRAGLIST))
61 tcp6_check_fraglist_gro(head, skb, th);
62
63 return tcp_gro_receive(head, skb, th);
64
65 flush:
66 NAPI_GRO_CB(skb)->flush = 1;
67 return NULL;
68 }
69
tcp6_gro_complete(struct sk_buff * skb,int thoff)70 static __always_inline int tcp6_gro_complete(struct sk_buff *skb, int thoff)
71 {
72 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
73 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
74 struct tcphdr *th = tcp_hdr(skb);
75
76 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
77 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
78 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
79
80 __skb_incr_checksum_unnecessary(skb);
81
82 return 0;
83 }
84
85 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
86 &iph->daddr, 0);
87 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
88
89 tcp_gro_complete(skb);
90 return 0;
91 }
92
__tcpv6_gso_segment_csum(struct sk_buff * seg,struct in6_addr * oldip,const struct in6_addr * newip,__be16 * oldport,__be16 newport)93 static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
94 struct in6_addr *oldip,
95 const struct in6_addr *newip,
96 __be16 *oldport, __be16 newport)
97 {
98 struct tcphdr *th = tcp_hdr(seg);
99
100 if (!ipv6_addr_equal(oldip, newip)) {
101 inet_proto_csum_replace16(&th->check, seg,
102 oldip->s6_addr32,
103 newip->s6_addr32,
104 true);
105 *oldip = *newip;
106 }
107
108 if (*oldport == newport)
109 return;
110
111 inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
112 *oldport = newport;
113 }
114
__tcpv6_gso_segment_list_csum(struct sk_buff * segs)115 static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
116 {
117 const struct tcphdr *th;
118 const struct ipv6hdr *iph;
119 struct sk_buff *seg;
120 struct tcphdr *th2;
121 struct ipv6hdr *iph2;
122
123 seg = segs;
124 th = tcp_hdr(seg);
125 iph = ipv6_hdr(seg);
126 th2 = tcp_hdr(seg->next);
127 iph2 = ipv6_hdr(seg->next);
128
129 if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
130 ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
131 ipv6_addr_equal(&iph->daddr, &iph2->daddr))
132 return segs;
133
134 while ((seg = seg->next)) {
135 th2 = tcp_hdr(seg);
136 iph2 = ipv6_hdr(seg);
137
138 __tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
139 &th2->source, th->source);
140 __tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
141 &th2->dest, th->dest);
142 }
143
144 return segs;
145 }
146
__tcp6_gso_segment_list(struct sk_buff * skb,netdev_features_t features)147 static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
148 netdev_features_t features)
149 {
150 skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
151 if (IS_ERR(skb))
152 return skb;
153
154 return __tcpv6_gso_segment_list_csum(skb);
155 }
156
tcp6_gso_segment(struct sk_buff * skb,netdev_features_t features)157 static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
158 netdev_features_t features)
159 {
160 struct tcphdr *th;
161
162 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
163 return ERR_PTR(-EINVAL);
164
165 if (!pskb_may_pull(skb, sizeof(*th)))
166 return ERR_PTR(-EINVAL);
167
168 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
169 struct tcphdr *th = tcp_hdr(skb);
170
171 if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) &&
172 !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
173 return __tcp6_gso_segment_list(skb, features);
174
175 skb->ip_summed = CHECKSUM_NONE;
176 }
177
178 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
179 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
180 struct tcphdr *th = tcp_hdr(skb);
181
182 /* Set up pseudo header, usually expect stack to have done
183 * this.
184 */
185
186 th->check = 0;
187 skb->ip_summed = CHECKSUM_PARTIAL;
188 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
189 }
190
191 return tcp_gso_segment(skb, features);
192 }
193
tcpv6_offload_init(void)194 int __init tcpv6_offload_init(void)
195 {
196 net_hotdata.tcpv6_offload = (struct net_offload) {
197 .callbacks = {
198 .gso_segment = tcp6_gso_segment,
199 .gro_receive = tcp6_gro_receive,
200 .gro_complete = tcp6_gro_complete,
201 },
202 };
203 return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
204 }
205