1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * TCPv4 GSO/GRO support 7 */ 8 9 #include <linux/indirect_call_wrapper.h> 10 #include <linux/skbuff.h> 11 #include <net/gro.h> 12 #include <net/gso.h> 13 #include <net/tcp.h> 14 #include <net/protocol.h> 15 16 static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb, 17 unsigned int seq, unsigned int mss) 18 { 19 u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP; 20 u32 ts_seq = skb_shinfo(gso_skb)->tskey; 21 22 while (skb) { 23 if (before(ts_seq, seq + mss)) { 24 skb_shinfo(skb)->tx_flags |= flags; 25 skb_shinfo(skb)->tskey = ts_seq; 26 return; 27 } 28 29 skb = skb->next; 30 seq += mss; 31 } 32 } 33 34 static void __tcpv4_gso_segment_csum(struct sk_buff *seg, 35 __be32 *oldip, __be32 newip, 36 __be16 *oldport, __be16 newport) 37 { 38 struct tcphdr *th; 39 struct iphdr *iph; 40 41 if (*oldip == newip && *oldport == newport) 42 return; 43 44 th = tcp_hdr(seg); 45 iph = ip_hdr(seg); 46 47 inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true); 48 inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false); 49 *oldport = newport; 50 51 csum_replace4(&iph->check, *oldip, newip); 52 *oldip = newip; 53 } 54 55 static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs) 56 { 57 const struct tcphdr *th; 58 const struct iphdr *iph; 59 struct sk_buff *seg; 60 struct tcphdr *th2; 61 struct iphdr *iph2; 62 63 seg = segs; 64 th = tcp_hdr(seg); 65 iph = ip_hdr(seg); 66 th2 = tcp_hdr(seg->next); 67 iph2 = ip_hdr(seg->next); 68 69 if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) && 70 iph->daddr == iph2->daddr && iph->saddr == iph2->saddr) 71 return segs; 72 73 while ((seg = seg->next)) { 74 th2 = tcp_hdr(seg); 75 iph2 = ip_hdr(seg); 76 77 __tcpv4_gso_segment_csum(seg, 78 &iph2->saddr, iph->saddr, 79 &th2->source, th->source); 80 __tcpv4_gso_segment_csum(seg, 81 &iph2->daddr, iph->daddr, 82 &th2->dest, th->dest); 83 } 84 85 return segs; 86 } 87 88 static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb, 89 netdev_features_t features) 90 { 91 skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); 92 if (IS_ERR(skb)) 93 return skb; 94 95 return __tcpv4_gso_segment_list_csum(skb); 96 } 97 98 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, 99 netdev_features_t features) 100 { 101 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) 102 return ERR_PTR(-EINVAL); 103 104 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 105 return ERR_PTR(-EINVAL); 106 107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { 108 struct tcphdr *th = tcp_hdr(skb); 109 110 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) 111 return __tcp4_gso_segment_list(skb, features); 112 113 skb->ip_summed = CHECKSUM_NONE; 114 } 115 116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 117 const struct iphdr *iph = ip_hdr(skb); 118 struct tcphdr *th = tcp_hdr(skb); 119 120 /* Set up checksum pseudo header, usually expect stack to 121 * have done this already. 122 */ 123 124 th->check = 0; 125 skb->ip_summed = CHECKSUM_PARTIAL; 126 __tcp_v4_send_check(skb, iph->saddr, iph->daddr); 127 } 128 129 return tcp_gso_segment(skb, features); 130 } 131 132 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 133 netdev_features_t features) 134 { 135 struct sk_buff *segs = ERR_PTR(-EINVAL); 136 unsigned int sum_truesize = 0; 137 struct tcphdr *th; 138 unsigned int thlen; 139 unsigned int seq; 140 unsigned int oldlen; 141 unsigned int mss; 142 struct sk_buff *gso_skb = skb; 143 __sum16 newcheck; 144 bool ooo_okay, copy_destructor; 145 bool ecn_cwr_mask; 146 __wsum delta; 147 148 th = tcp_hdr(skb); 149 thlen = th->doff * 4; 150 if (thlen < sizeof(*th)) 151 goto out; 152 153 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb))) 154 goto out; 155 156 if (!pskb_may_pull(skb, thlen)) 157 goto out; 158 159 oldlen = ~skb->len; 160 __skb_pull(skb, thlen); 161 162 mss = skb_shinfo(skb)->gso_size; 163 if (unlikely(skb->len <= mss)) 164 goto out; 165 166 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 167 /* Packet is from an untrusted source, reset gso_segs. */ 168 169 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 170 171 segs = NULL; 172 goto out; 173 } 174 175 copy_destructor = gso_skb->destructor == tcp_wfree; 176 ooo_okay = gso_skb->ooo_okay; 177 /* All segments but the first should have ooo_okay cleared */ 178 skb->ooo_okay = 0; 179 180 segs = skb_segment(skb, features); 181 if (IS_ERR(segs)) 182 goto out; 183 184 /* Only first segment might have ooo_okay set */ 185 segs->ooo_okay = ooo_okay; 186 187 /* GSO partial and frag_list segmentation only requires splitting 188 * the frame into an MSS multiple and possibly a remainder, both 189 * cases return a GSO skb. So update the mss now. 190 */ 191 if (skb_is_gso(segs)) 192 mss *= skb_shinfo(segs)->gso_segs; 193 194 delta = (__force __wsum)htonl(oldlen + thlen + mss); 195 196 skb = segs; 197 th = tcp_hdr(skb); 198 seq = ntohl(th->seq); 199 200 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP)) 201 tcp_gso_tstamp(segs, gso_skb, seq, mss); 202 203 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 204 205 ecn_cwr_mask = !!(skb_shinfo(gso_skb)->gso_type & SKB_GSO_TCP_ACCECN); 206 207 while (skb->next) { 208 th->fin = th->psh = 0; 209 th->check = newcheck; 210 211 if (skb->ip_summed == CHECKSUM_PARTIAL) 212 gso_reset_checksum(skb, ~th->check); 213 else 214 th->check = gso_make_checksum(skb, ~th->check); 215 216 seq += mss; 217 if (copy_destructor) { 218 skb->destructor = gso_skb->destructor; 219 skb->sk = gso_skb->sk; 220 sum_truesize += skb->truesize; 221 } 222 skb = skb->next; 223 th = tcp_hdr(skb); 224 225 th->seq = htonl(seq); 226 227 th->cwr &= ecn_cwr_mask; 228 } 229 230 /* Following permits TCP Small Queues to work well with GSO : 231 * The callback to TCP stack will be called at the time last frag 232 * is freed at TX completion, and not right now when gso_skb 233 * is freed by GSO engine 234 */ 235 if (copy_destructor) { 236 int delta; 237 238 swap(gso_skb->sk, skb->sk); 239 swap(gso_skb->destructor, skb->destructor); 240 sum_truesize += skb->truesize; 241 delta = sum_truesize - gso_skb->truesize; 242 /* In some pathological cases, delta can be negative. 243 * We need to either use refcount_add() or refcount_sub_and_test() 244 */ 245 if (likely(delta >= 0)) 246 refcount_add(delta, &skb->sk->sk_wmem_alloc); 247 else 248 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); 249 } 250 251 delta = (__force __wsum)htonl(oldlen + 252 (skb_tail_pointer(skb) - 253 skb_transport_header(skb)) + 254 skb->data_len); 255 th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 256 if (skb->ip_summed == CHECKSUM_PARTIAL) 257 gso_reset_checksum(skb, ~th->check); 258 else 259 th->check = gso_make_checksum(skb, ~th->check); 260 out: 261 return segs; 262 } 263 264 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th) 265 { 266 struct tcphdr *th2; 267 struct sk_buff *p; 268 269 list_for_each_entry(p, head, list) { 270 if (!NAPI_GRO_CB(p)->same_flow) 271 continue; 272 273 th2 = tcp_hdr(p); 274 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { 275 NAPI_GRO_CB(p)->same_flow = 0; 276 continue; 277 } 278 279 return p; 280 } 281 282 return NULL; 283 } 284 285 struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb) 286 { 287 unsigned int thlen, hlen, off; 288 struct tcphdr *th; 289 290 off = skb_gro_offset(skb); 291 hlen = off + sizeof(*th); 292 th = skb_gro_header(skb, hlen, off); 293 if (unlikely(!th)) 294 return NULL; 295 296 thlen = th->doff * 4; 297 if (thlen < sizeof(*th)) 298 return NULL; 299 300 hlen = off + thlen; 301 if (!skb_gro_may_pull(skb, hlen)) { 302 th = skb_gro_header_slow(skb, hlen, off); 303 if (unlikely(!th)) 304 return NULL; 305 } 306 307 skb_gro_pull(skb, thlen); 308 309 return th; 310 } 311 312 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, 313 struct tcphdr *th) 314 { 315 unsigned int thlen = th->doff * 4; 316 struct sk_buff *pp = NULL; 317 struct sk_buff *p; 318 struct tcphdr *th2; 319 unsigned int len; 320 __be32 flags; 321 unsigned int mss = 1; 322 int flush = 1; 323 int i; 324 325 len = skb_gro_len(skb); 326 flags = tcp_flag_word(th); 327 328 p = tcp_gro_lookup(head, th); 329 if (!p) 330 goto out_check_final; 331 332 th2 = tcp_hdr(p); 333 flush = (__force int)(flags & TCP_FLAG_CWR); 334 flush |= (__force int)((flags ^ tcp_flag_word(th2)) & 335 ~(TCP_FLAG_FIN | TCP_FLAG_PSH)); 336 flush |= (__force int)(th->ack_seq ^ th2->ack_seq); 337 for (i = sizeof(*th); i < thlen; i += 4) 338 flush |= *(u32 *)((u8 *)th + i) ^ 339 *(u32 *)((u8 *)th2 + i); 340 341 flush |= gro_receive_network_flush(th, th2, p); 342 343 mss = skb_shinfo(p)->gso_size; 344 345 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss. 346 * If it is a single frame, do not aggregate it if its length 347 * is bigger than our mss. 348 */ 349 if (unlikely(skb_is_gso(skb))) 350 flush |= (mss != skb_shinfo(skb)->gso_size); 351 else 352 flush |= (len - 1) >= mss; 353 354 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 355 flush |= skb_cmp_decrypted(p, skb); 356 357 if (unlikely(NAPI_GRO_CB(p)->is_flist)) { 358 flush |= (__force int)(flags ^ tcp_flag_word(th2)); 359 flush |= skb->ip_summed != p->ip_summed; 360 flush |= skb->csum_level != p->csum_level; 361 flush |= NAPI_GRO_CB(p)->count >= 64; 362 363 if (flush || skb_gro_receive_list(p, skb)) 364 mss = 1; 365 366 goto out_check_final; 367 } 368 369 if (flush || skb_gro_receive(p, skb)) { 370 mss = 1; 371 goto out_check_final; 372 } 373 374 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 375 376 out_check_final: 377 /* Force a flush if last segment is smaller than mss. */ 378 if (unlikely(skb_is_gso(skb))) 379 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size; 380 else 381 flush = len < mss; 382 383 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | 384 TCP_FLAG_RST | TCP_FLAG_SYN | 385 TCP_FLAG_FIN)); 386 387 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 388 pp = p; 389 390 NAPI_GRO_CB(skb)->flush |= (flush != 0); 391 392 return pp; 393 } 394 395 void tcp_gro_complete(struct sk_buff *skb) 396 { 397 struct tcphdr *th = tcp_hdr(skb); 398 struct skb_shared_info *shinfo; 399 400 if (skb->encapsulation) 401 skb->inner_transport_header = skb->transport_header; 402 403 skb->csum_start = (unsigned char *)th - skb->head; 404 skb->csum_offset = offsetof(struct tcphdr, check); 405 skb->ip_summed = CHECKSUM_PARTIAL; 406 407 shinfo = skb_shinfo(skb); 408 shinfo->gso_segs = NAPI_GRO_CB(skb)->count; 409 410 if (th->cwr) 411 shinfo->gso_type |= SKB_GSO_TCP_ACCECN; 412 } 413 EXPORT_SYMBOL(tcp_gro_complete); 414 415 static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, 416 struct tcphdr *th) 417 { 418 const struct iphdr *iph; 419 struct sk_buff *p; 420 struct sock *sk; 421 struct net *net; 422 int iif, sdif; 423 424 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST))) 425 return; 426 427 p = tcp_gro_lookup(head, th); 428 if (p) { 429 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; 430 return; 431 } 432 433 inet_get_iif_sdif(skb, &iif, &sdif); 434 iph = skb_gro_network_header(skb); 435 net = dev_net_rcu(skb->dev); 436 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, 437 iph->saddr, th->source, 438 iph->daddr, ntohs(th->dest), 439 iif, sdif); 440 NAPI_GRO_CB(skb)->is_flist = !sk; 441 if (sk) 442 sock_gen_put(sk); 443 } 444 445 INDIRECT_CALLABLE_SCOPE 446 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) 447 { 448 struct tcphdr *th; 449 450 /* Don't bother verifying checksum if we're going to flush anyway. */ 451 if (!NAPI_GRO_CB(skb)->flush && 452 skb_gro_checksum_validate(skb, IPPROTO_TCP, 453 inet_gro_compute_pseudo)) 454 goto flush; 455 456 th = tcp_gro_pull_header(skb); 457 if (!th) 458 goto flush; 459 460 tcp4_check_fraglist_gro(head, skb, th); 461 462 return tcp_gro_receive(head, skb, th); 463 464 flush: 465 NAPI_GRO_CB(skb)->flush = 1; 466 return NULL; 467 } 468 469 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff) 470 { 471 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; 472 const struct iphdr *iph = (struct iphdr *)(skb->data + offset); 473 struct tcphdr *th = tcp_hdr(skb); 474 475 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) { 476 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4; 477 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 478 479 __skb_incr_checksum_unnecessary(skb); 480 481 return 0; 482 } 483 484 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 485 iph->daddr, 0); 486 487 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 | 488 (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID); 489 490 tcp_gro_complete(skb); 491 return 0; 492 } 493 494 int __init tcpv4_offload_init(void) 495 { 496 net_hotdata.tcpv4_offload = (struct net_offload) { 497 .callbacks = { 498 .gso_segment = tcp4_gso_segment, 499 .gro_receive = tcp4_gro_receive, 500 .gro_complete = tcp4_gro_complete, 501 }, 502 }; 503 return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP); 504 } 505