Lines Matching full:skb
16 static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb, in tcp_gso_tstamp() argument
22 while (skb) { in tcp_gso_tstamp()
24 skb_shinfo(skb)->tx_flags |= flags; in tcp_gso_tstamp()
25 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp()
29 skb = skb->next; in tcp_gso_tstamp()
88 static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb, in __tcp4_gso_segment_list() argument
91 skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); in __tcp4_gso_segment_list()
92 if (IS_ERR(skb)) in __tcp4_gso_segment_list()
93 return skb; in __tcp4_gso_segment_list()
95 return __tcpv4_gso_segment_list_csum(skb); in __tcp4_gso_segment_list()
98 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, in tcp4_gso_segment() argument
101 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) in tcp4_gso_segment()
104 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) in tcp4_gso_segment()
107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { in tcp4_gso_segment()
108 struct tcphdr *th = tcp_hdr(skb); in tcp4_gso_segment()
110 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) in tcp4_gso_segment()
111 return __tcp4_gso_segment_list(skb, features); in tcp4_gso_segment()
113 skb->ip_summed = CHECKSUM_NONE; in tcp4_gso_segment()
116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { in tcp4_gso_segment()
117 const struct iphdr *iph = ip_hdr(skb); in tcp4_gso_segment()
118 struct tcphdr *th = tcp_hdr(skb); in tcp4_gso_segment()
125 skb->ip_summed = CHECKSUM_PARTIAL; in tcp4_gso_segment()
126 __tcp_v4_send_check(skb, iph->saddr, iph->daddr); in tcp4_gso_segment()
129 return tcp_gso_segment(skb, features); in tcp4_gso_segment()
132 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, in tcp_gso_segment() argument
142 struct sk_buff *gso_skb = skb; in tcp_gso_segment()
148 th = tcp_hdr(skb); in tcp_gso_segment()
153 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb))) in tcp_gso_segment()
156 if (!pskb_may_pull(skb, thlen)) in tcp_gso_segment()
159 oldlen = ~skb->len; in tcp_gso_segment()
160 __skb_pull(skb, thlen); in tcp_gso_segment()
162 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment()
163 if (unlikely(skb->len <= mss)) in tcp_gso_segment()
166 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { in tcp_gso_segment()
169 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment()
178 skb->ooo_okay = 0; in tcp_gso_segment()
180 segs = skb_segment(skb, features); in tcp_gso_segment()
189 * cases return a GSO skb. So update the mss now. in tcp_gso_segment()
196 skb = segs; in tcp_gso_segment()
197 th = tcp_hdr(skb); in tcp_gso_segment()
207 while (skb->next) { in tcp_gso_segment()
211 if (skb->ip_summed == CHECKSUM_PARTIAL) in tcp_gso_segment()
212 gso_reset_checksum(skb, ~th->check); in tcp_gso_segment()
214 th->check = gso_make_checksum(skb, ~th->check); in tcp_gso_segment()
218 skb->destructor = gso_skb->destructor; in tcp_gso_segment()
219 skb->sk = gso_skb->sk; in tcp_gso_segment()
220 sum_truesize += skb->truesize; in tcp_gso_segment()
222 skb = skb->next; in tcp_gso_segment()
223 th = tcp_hdr(skb); in tcp_gso_segment()
238 swap(gso_skb->sk, skb->sk); in tcp_gso_segment()
239 swap(gso_skb->destructor, skb->destructor); in tcp_gso_segment()
240 sum_truesize += skb->truesize; in tcp_gso_segment()
246 refcount_add(delta, &skb->sk->sk_wmem_alloc); in tcp_gso_segment()
248 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); in tcp_gso_segment()
252 (skb_tail_pointer(skb) - in tcp_gso_segment()
253 skb_transport_header(skb)) + in tcp_gso_segment()
254 skb->data_len); in tcp_gso_segment()
256 if (skb->ip_summed == CHECKSUM_PARTIAL) in tcp_gso_segment()
257 gso_reset_checksum(skb, ~th->check); in tcp_gso_segment()
259 th->check = gso_make_checksum(skb, ~th->check); in tcp_gso_segment()
285 struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb) in tcp_gro_pull_header() argument
290 off = skb_gro_offset(skb); in tcp_gro_pull_header()
292 th = skb_gro_header(skb, hlen, off); in tcp_gro_pull_header()
301 if (!skb_gro_may_pull(skb, hlen)) { in tcp_gro_pull_header()
302 th = skb_gro_header_slow(skb, hlen, off); in tcp_gro_pull_header()
307 skb_gro_pull(skb, thlen); in tcp_gro_pull_header()
312 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, in tcp_gro_receive() argument
325 len = skb_gro_len(skb); in tcp_gro_receive()
345 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss. in tcp_gro_receive()
349 if (unlikely(skb_is_gso(skb))) in tcp_gro_receive()
350 flush |= (mss != skb_shinfo(skb)->gso_size); in tcp_gro_receive()
355 flush |= skb_cmp_decrypted(p, skb); in tcp_gro_receive()
359 flush |= skb->ip_summed != p->ip_summed; in tcp_gro_receive()
360 flush |= skb->csum_level != p->csum_level; in tcp_gro_receive()
363 if (flush || skb_gro_receive_list(p, skb)) in tcp_gro_receive()
369 if (flush || skb_gro_receive(p, skb)) { in tcp_gro_receive()
378 if (unlikely(skb_is_gso(skb))) in tcp_gro_receive()
379 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size; in tcp_gro_receive()
387 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) in tcp_gro_receive()
390 NAPI_GRO_CB(skb)->flush |= (flush != 0); in tcp_gro_receive()
395 void tcp_gro_complete(struct sk_buff *skb) in tcp_gro_complete() argument
397 struct tcphdr *th = tcp_hdr(skb); in tcp_gro_complete()
400 if (skb->encapsulation) in tcp_gro_complete()
401 skb->inner_transport_header = skb->transport_header; in tcp_gro_complete()
403 skb->csum_start = (unsigned char *)th - skb->head; in tcp_gro_complete()
404 skb->csum_offset = offsetof(struct tcphdr, check); in tcp_gro_complete()
405 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_gro_complete()
407 shinfo = skb_shinfo(skb); in tcp_gro_complete()
408 shinfo->gso_segs = NAPI_GRO_CB(skb)->count; in tcp_gro_complete()
415 static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, in tcp4_check_fraglist_gro() argument
424 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST))) in tcp4_check_fraglist_gro()
429 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; in tcp4_check_fraglist_gro()
433 inet_get_iif_sdif(skb, &iif, &sdif); in tcp4_check_fraglist_gro()
434 iph = skb_gro_network_header(skb); in tcp4_check_fraglist_gro()
435 net = dev_net_rcu(skb->dev); in tcp4_check_fraglist_gro()
440 NAPI_GRO_CB(skb)->is_flist = !sk; in tcp4_check_fraglist_gro()
446 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) in tcp4_gro_receive() argument
451 if (!NAPI_GRO_CB(skb)->flush && in tcp4_gro_receive()
452 skb_gro_checksum_validate(skb, IPPROTO_TCP, in tcp4_gro_receive()
456 th = tcp_gro_pull_header(skb); in tcp4_gro_receive()
460 tcp4_check_fraglist_gro(head, skb, th); in tcp4_gro_receive()
462 return tcp_gro_receive(head, skb, th); in tcp4_gro_receive()
465 NAPI_GRO_CB(skb)->flush = 1; in tcp4_gro_receive()
469 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff) in tcp4_gro_complete() argument
471 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; in tcp4_gro_complete()
472 const struct iphdr *iph = (struct iphdr *)(skb->data + offset); in tcp4_gro_complete()
473 struct tcphdr *th = tcp_hdr(skb); in tcp4_gro_complete()
475 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) { in tcp4_gro_complete()
476 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4; in tcp4_gro_complete()
477 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in tcp4_gro_complete()
479 __skb_incr_checksum_unnecessary(skb); in tcp4_gro_complete()
484 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, in tcp4_gro_complete()
487 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 | in tcp4_gro_complete()
488 (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID); in tcp4_gro_complete()
490 tcp_gro_complete(skb); in tcp4_gro_complete()