Lines Matching full:skb

91 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)  in skb_gro_receive()  argument
93 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
94 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
95 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
96 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
108 if (p->pp_recycle != skb->pp_recycle) in skb_gro_receive()
112 NAPI_GRO_CB(skb)->flush)) in skb_gro_receive()
116 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP || in skb_gro_receive()
123 segs = NAPI_GRO_CB(skb)->count; in skb_gro_receive()
150 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
151 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
153 skb->truesize = new_truesize; in skb_gro_receive()
154 skb->len -= skb->data_len; in skb_gro_receive()
155 skb->data_len = 0; in skb_gro_receive()
157 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
159 } else if (skb->head_frag) { in skb_gro_receive()
162 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
169 first_offset = skb->data - in skb_gro_receive()
181 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
182 skb->truesize = new_truesize; in skb_gro_receive()
183 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
189 skb->destructor = NULL; in skb_gro_receive()
190 skb->sk = NULL; in skb_gro_receive()
191 delta_truesize = skb->truesize; in skb_gro_receive()
197 skb->data_len -= eat; in skb_gro_receive()
198 skb->len -= eat; in skb_gro_receive()
202 __skb_pull(skb, offset); in skb_gro_receive()
205 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
207 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
208 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
209 __skb_header_release(skb); in skb_gro_receive()
222 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
226 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive_list() argument
228 if (unlikely(p->len + skb->len >= 65536)) in skb_gro_receive_list()
232 skb_shinfo(p)->frag_list = skb; in skb_gro_receive_list()
234 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive_list()
236 skb_pull(skb, skb_gro_offset(skb)); in skb_gro_receive_list()
238 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive_list()
240 p->data_len += skb->len; in skb_gro_receive_list()
243 skb->destructor = NULL; in skb_gro_receive_list()
244 skb->sk = NULL; in skb_gro_receive_list()
245 p->truesize += skb->truesize; in skb_gro_receive_list()
246 p->len += skb->len; in skb_gro_receive_list()
248 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive_list()
253 static void gro_complete(struct gro_node *gro, struct sk_buff *skb) in gro_complete() argument
257 __be16 type = skb->protocol; in gro_complete()
260 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); in gro_complete()
262 if (NAPI_GRO_CB(skb)->count == 1) { in gro_complete()
263 skb_shinfo(skb)->gso_size = 0; in gro_complete()
274 skb, 0); in gro_complete()
281 kfree_skb(skb); in gro_complete()
286 gro_normal_one(gro, skb, NAPI_GRO_CB(skb)->count); in gro_complete()
292 struct sk_buff *skb, *p; in __gro_flush_chain() local
294 list_for_each_entry_safe_reverse(skb, p, head, list) { in __gro_flush_chain()
295 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) in __gro_flush_chain()
297 skb_list_del_init(skb); in __gro_flush_chain()
298 gro_complete(gro, skb); in __gro_flush_chain()
324 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb, in gro_list_prepare_tc_ext() argument
332 skb_ext = skb_ext_find(skb, TC_SKB_EXT); in gro_list_prepare_tc_ext()
343 const struct sk_buff *skb) in gro_list_prepare() argument
345 unsigned int maclen = skb->dev->hard_header_len; in gro_list_prepare()
346 u32 hash = skb_get_hash_raw(skb); in gro_list_prepare()
357 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; in gro_list_prepare()
358 diffs |= p->vlan_all ^ skb->vlan_all; in gro_list_prepare()
359 diffs |= skb_metadata_differs(p, skb); in gro_list_prepare()
362 skb_mac_header(skb)); in gro_list_prepare()
365 skb_mac_header(skb), in gro_list_prepare()
373 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { in gro_list_prepare()
374 diffs |= p->sk != skb->sk; in gro_list_prepare()
375 diffs |= skb_metadata_dst_cmp(p, skb); in gro_list_prepare()
376 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); in gro_list_prepare()
378 diffs |= gro_list_prepare_tc_ext(skb, p, diffs); in gro_list_prepare()
385 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) in skb_gro_reset_offset() argument
391 NAPI_GRO_CB(skb)->network_offset = 0; in skb_gro_reset_offset()
392 NAPI_GRO_CB(skb)->data_offset = 0; in skb_gro_reset_offset()
393 headlen = skb_headlen(skb); in skb_gro_reset_offset()
394 NAPI_GRO_CB(skb)->frag0 = skb->data; in skb_gro_reset_offset()
395 NAPI_GRO_CB(skb)->frag0_len = headlen; in skb_gro_reset_offset()
399 pinfo = skb_shinfo(skb); in skb_gro_reset_offset()
405 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); in skb_gro_reset_offset()
406 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, in skb_gro_reset_offset()
408 skb->end - skb->tail); in skb_gro_reset_offset()
412 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) in gro_pull_from_frag0() argument
414 struct skb_shared_info *pinfo = skb_shinfo(skb); in gro_pull_from_frag0()
416 BUG_ON(skb->end - skb->tail < grow); in gro_pull_from_frag0()
418 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); in gro_pull_from_frag0()
420 skb->data_len -= grow; in gro_pull_from_frag0()
421 skb->tail += grow; in gro_pull_from_frag0()
427 skb_frag_unref(skb, 0); in gro_pull_from_frag0()
433 static void gro_try_pull_from_frag0(struct sk_buff *skb) in gro_try_pull_from_frag0() argument
435 int grow = skb_gro_offset(skb) - skb_headlen(skb); in gro_try_pull_from_frag0()
438 gro_pull_from_frag0(skb, grow); in gro_try_pull_from_frag0()
454 * SKB to the chain. in gro_flush_oldest()
461 struct sk_buff *skb) in dev_gro_receive() argument
463 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); in dev_gro_receive()
467 __be16 type = skb->protocol; in dev_gro_receive()
472 if (netif_elide_gro(skb->dev)) in dev_gro_receive()
475 gro_list_prepare(&gro_list->list, skb); in dev_gro_receive()
486 skb_set_network_header(skb, skb_gro_offset(skb)); in dev_gro_receive()
487 skb_reset_mac_len(skb); in dev_gro_receive()
491 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0; in dev_gro_receive()
492 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb); in dev_gro_receive()
493 NAPI_GRO_CB(skb)->count = 1; in dev_gro_receive()
494 if (unlikely(skb_is_gso(skb))) { in dev_gro_receive()
495 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; in dev_gro_receive()
497 if (!skb_is_gso_tcp(skb) || in dev_gro_receive()
498 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) in dev_gro_receive()
499 NAPI_GRO_CB(skb)->flush = 1; in dev_gro_receive()
503 switch (skb->ip_summed) { in dev_gro_receive()
505 NAPI_GRO_CB(skb)->csum = skb->csum; in dev_gro_receive()
506 NAPI_GRO_CB(skb)->csum_valid = 1; in dev_gro_receive()
509 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; in dev_gro_receive()
515 &gro_list->list, skb); in dev_gro_receive()
524 same_flow = NAPI_GRO_CB(skb)->same_flow; in dev_gro_receive()
525 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; in dev_gro_receive()
536 if (NAPI_GRO_CB(skb)->flush) in dev_gro_receive()
544 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */ in dev_gro_receive()
545 gro_try_pull_from_frag0(skb); in dev_gro_receive()
546 NAPI_GRO_CB(skb)->age = jiffies; in dev_gro_receive()
547 NAPI_GRO_CB(skb)->last = skb; in dev_gro_receive()
548 if (!skb_is_gso(skb)) in dev_gro_receive()
549 skb_shinfo(skb)->gso_size = skb_gro_len(skb); in dev_gro_receive()
550 list_add(&skb->list, &gro_list->list); in dev_gro_receive()
564 gro_try_pull_from_frag0(skb); in dev_gro_receive()
596 static gro_result_t gro_skb_finish(struct gro_node *gro, struct sk_buff *skb, in gro_skb_finish() argument
601 gro_normal_one(gro, skb, 1); in gro_skb_finish()
605 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) in gro_skb_finish()
606 napi_skb_free_stolen_head(skb); in gro_skb_finish()
607 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) in gro_skb_finish()
608 __kfree_skb(skb); in gro_skb_finish()
610 __napi_kfree_skb(skb, SKB_CONSUMED); in gro_skb_finish()
622 gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb) in gro_receive_skb() argument
626 __skb_mark_napi_id(skb, gro); in gro_receive_skb()
627 trace_napi_gro_receive_entry(skb); in gro_receive_skb()
629 skb_gro_reset_offset(skb, 0); in gro_receive_skb()
631 ret = gro_skb_finish(gro, skb, dev_gro_receive(gro, skb)); in gro_receive_skb()
638 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) in napi_reuse_skb() argument
640 if (unlikely(skb->pfmemalloc)) { in napi_reuse_skb()
641 consume_skb(skb); in napi_reuse_skb()
644 __skb_pull(skb, skb_headlen(skb)); in napi_reuse_skb()
646 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); in napi_reuse_skb()
647 __vlan_hwaccel_clear_tag(skb); in napi_reuse_skb()
648 skb->dev = napi->dev; in napi_reuse_skb()
649 skb->skb_iif = 0; in napi_reuse_skb()
652 skb->pkt_type = PACKET_HOST; in napi_reuse_skb()
654 skb->encapsulation = 0; in napi_reuse_skb()
655 skb->ip_summed = CHECKSUM_NONE; in napi_reuse_skb()
656 skb_shinfo(skb)->gso_type = 0; in napi_reuse_skb()
657 skb_shinfo(skb)->gso_size = 0; in napi_reuse_skb()
658 if (unlikely(skb->slow_gro)) { in napi_reuse_skb()
659 skb_orphan(skb); in napi_reuse_skb()
660 skb_ext_reset(skb); in napi_reuse_skb()
661 nf_reset_ct(skb); in napi_reuse_skb()
662 skb->slow_gro = 0; in napi_reuse_skb()
665 napi->skb = skb; in napi_reuse_skb()
670 struct sk_buff *skb = napi->skb; in napi_get_frags() local
672 if (!skb) { in napi_get_frags()
673 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); in napi_get_frags()
674 if (skb) { in napi_get_frags()
675 napi->skb = skb; in napi_get_frags()
676 skb_mark_napi_id(skb, napi); in napi_get_frags()
679 return skb; in napi_get_frags()
684 struct sk_buff *skb, in napi_frags_finish() argument
690 __skb_push(skb, ETH_HLEN); in napi_frags_finish()
691 skb->protocol = eth_type_trans(skb, skb->dev); in napi_frags_finish()
693 gro_normal_one(&napi->gro, skb, 1); in napi_frags_finish()
697 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) in napi_frags_finish()
698 napi_skb_free_stolen_head(skb); in napi_frags_finish()
700 napi_reuse_skb(napi, skb); in napi_frags_finish()
713 * We copy ethernet header into skb->data to have a common layout.
717 struct sk_buff *skb = napi->skb; in napi_frags_skb() local
721 napi->skb = NULL; in napi_frags_skb()
723 skb_reset_mac_header(skb); in napi_frags_skb()
724 skb_gro_reset_offset(skb, hlen); in napi_frags_skb()
726 if (unlikely(!skb_gro_may_pull(skb, hlen))) { in napi_frags_skb()
727 eth = skb_gro_header_slow(skb, hlen, 0); in napi_frags_skb()
729 net_warn_ratelimited("%s: dropping impossible skb from %s\n", in napi_frags_skb()
731 napi_reuse_skb(napi, skb); in napi_frags_skb()
735 eth = (const struct ethhdr *)skb->data; in napi_frags_skb()
737 if (NAPI_GRO_CB(skb)->frag0 != skb->data) in napi_frags_skb()
738 gro_pull_from_frag0(skb, hlen); in napi_frags_skb()
740 NAPI_GRO_CB(skb)->frag0 += hlen; in napi_frags_skb()
741 NAPI_GRO_CB(skb)->frag0_len -= hlen; in napi_frags_skb()
743 __skb_pull(skb, hlen); in napi_frags_skb()
750 skb->protocol = eth->h_proto; in napi_frags_skb()
752 return skb; in napi_frags_skb()
758 struct sk_buff *skb = napi_frags_skb(napi); in napi_gro_frags() local
760 trace_napi_gro_frags_entry(skb); in napi_gro_frags()
762 ret = napi_frags_finish(napi, skb, dev_gro_receive(&napi->gro, skb)); in napi_gro_frags()
772 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) in __skb_gro_checksum_complete() argument
777 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); in __skb_gro_checksum_complete()
779 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ in __skb_gro_checksum_complete()
780 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); in __skb_gro_checksum_complete()
783 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_gro_checksum_complete()
784 !skb->csum_complete_sw) in __skb_gro_checksum_complete()
785 netdev_rx_csum_fault(skb->dev, skb); in __skb_gro_checksum_complete()
788 NAPI_GRO_CB(skb)->csum = wsum; in __skb_gro_checksum_complete()
789 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_complete()
811 struct sk_buff *skb, *n; in gro_cleanup() local
814 list_for_each_entry_safe(skb, n, &gro->hash[i].list, list) in gro_cleanup()
815 kfree_skb(skb); in gro_cleanup()
823 list_for_each_entry_safe(skb, n, &gro->rx_list, list) in gro_cleanup()
824 kfree_skb(skb); in gro_cleanup()