Lines Matching full:skb

19  *		Ray VanTassle	:	Fixed --skb->lock in free
84 #include <trace/events/skb.h>
192 * @skb: buffer
202 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
206 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
207 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
208 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
212 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
214 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
217 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
219 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
277 struct sk_buff *skb; in napi_skb_cache_get() local
291 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
293 kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); in napi_skb_cache_get()
295 return skb; in napi_skb_cache_get()
299 * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache
300 * @skbs: pointer to an at least @n-sized array to fill with skb pointers
360 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, in __finalize_skb_around() argument
367 /* Assumes caller memset cleared SKB */ in __finalize_skb_around()
368 skb->truesize = SKB_TRUESIZE(size); in __finalize_skb_around()
369 refcount_set(&skb->users, 1); in __finalize_skb_around()
370 skb->head = data; in __finalize_skb_around()
371 skb->data = data; in __finalize_skb_around()
372 skb_reset_tail_pointer(skb); in __finalize_skb_around()
373 skb_set_end_offset(skb, size); in __finalize_skb_around()
374 skb->mac_header = (typeof(skb->mac_header))~0U; in __finalize_skb_around()
375 skb->transport_header = (typeof(skb->transport_header))~0U; in __finalize_skb_around()
376 skb->alloc_cpu = raw_smp_processor_id(); in __finalize_skb_around()
378 shinfo = skb_shinfo(skb); in __finalize_skb_around()
382 skb_set_kcov_handle(skb, kcov_common_handle()); in __finalize_skb_around()
385 static inline void *__slab_build_skb(struct sk_buff *skb, void *data, in __slab_build_skb() argument
410 struct sk_buff *skb; in slab_build_skb() local
413 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in slab_build_skb()
415 if (unlikely(!skb)) in slab_build_skb()
418 memset(skb, 0, offsetof(struct sk_buff, tail)); in slab_build_skb()
419 data = __slab_build_skb(skb, data, &size); in slab_build_skb()
420 __finalize_skb_around(skb, data, size); in slab_build_skb()
422 return skb; in slab_build_skb()
426 /* Caller must provide SKB that is memset cleared */
427 static void __build_skb_around(struct sk_buff *skb, void *data, in __build_skb_around() argument
436 data = __slab_build_skb(skb, data, &size); in __build_skb_around()
438 __finalize_skb_around(skb, data, size); in __build_skb_around()
451 * The return is the new skb buffer.
463 struct sk_buff *skb; in __build_skb() local
465 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in __build_skb()
467 if (unlikely(!skb)) in __build_skb()
470 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
471 __build_skb_around(skb, data, frag_size); in __build_skb()
473 return skb; in __build_skb()
477 * takes care of skb->head and skb->pfmemalloc
481 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
483 if (likely(skb && frag_size)) { in build_skb()
484 skb->head_frag = 1; in build_skb()
485 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb()
487 return skb; in build_skb()
492 * build_skb_around - build a network buffer around provided skb
493 * @skb: sk_buff provide by caller, must be memset cleared
497 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
500 if (unlikely(!skb)) in build_skb_around()
503 __build_skb_around(skb, data, frag_size); in build_skb_around()
506 skb->head_frag = 1; in build_skb_around()
507 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb_around()
509 return skb; in build_skb_around()
525 struct sk_buff *skb; in __napi_build_skb() local
527 skb = napi_skb_cache_get(); in __napi_build_skb()
528 if (unlikely(!skb)) in __napi_build_skb()
531 memset(skb, 0, offsetof(struct sk_buff, tail)); in __napi_build_skb()
532 __build_skb_around(skb, data, frag_size); in __napi_build_skb()
534 return skb; in __napi_build_skb()
542 * Version of __napi_build_skb() that takes care of skb->head_frag
543 * and skb->pfmemalloc when the data is a page or page fragment.
549 struct sk_buff *skb = __napi_build_skb(data, frag_size); in napi_build_skb() local
551 if (likely(skb) && frag_size) { in napi_build_skb()
552 skb->head_frag = 1; in napi_build_skb()
553 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in napi_build_skb()
556 return skb; in napi_build_skb()
627 * instead of head cache and allocate a cloned (child) skb.
643 struct sk_buff *skb; in __alloc_skb() local
656 skb = napi_skb_cache_get(); in __alloc_skb()
658 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); in __alloc_skb()
659 if (unlikely(!skb)) in __alloc_skb()
661 prefetchw(skb); in __alloc_skb()
666 * Both skb->head and skb_shared_info are cache line aligned. in __alloc_skb()
682 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
683 __build_skb_around(skb, data, size); in __alloc_skb()
684 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
689 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
691 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
695 return skb; in __alloc_skb()
698 kmem_cache_free(cache, skb); in __alloc_skb()
720 struct sk_buff *skb; in __netdev_alloc_skb() local
727 * we use kmalloc() for skb->head allocation. in __netdev_alloc_skb()
732 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
733 if (!skb) in __netdev_alloc_skb()
762 skb = __build_skb(data, len); in __netdev_alloc_skb()
763 if (unlikely(!skb)) { in __netdev_alloc_skb()
769 skb->pfmemalloc = 1; in __netdev_alloc_skb()
770 skb->head_frag = 1; in __netdev_alloc_skb()
773 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
774 skb->dev = dev; in __netdev_alloc_skb()
777 return skb; in __netdev_alloc_skb()
797 struct sk_buff *skb; in napi_alloc_skb() local
805 * we use kmalloc() for skb->head allocation. in napi_alloc_skb()
810 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, in napi_alloc_skb()
812 if (!skb) in napi_alloc_skb()
832 skb = __napi_build_skb(data, len); in napi_alloc_skb()
833 if (unlikely(!skb)) { in napi_alloc_skb()
839 skb->pfmemalloc = 1; in napi_alloc_skb()
840 skb->head_frag = 1; in napi_alloc_skb()
843 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in napi_alloc_skb()
844 skb->dev = napi->dev; in napi_alloc_skb()
847 return skb; in napi_alloc_skb()
851 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, in skb_add_rx_frag_netmem() argument
856 skb_fill_netmem_desc(skb, i, netmem, off, size); in skb_add_rx_frag_netmem()
857 skb->len += size; in skb_add_rx_frag_netmem()
858 skb->data_len += size; in skb_add_rx_frag_netmem()
859 skb->truesize += truesize; in skb_add_rx_frag_netmem()
863 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
866 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
871 skb->len += size; in skb_coalesce_rx_frag()
872 skb->data_len += size; in skb_coalesce_rx_frag()
873 skb->truesize += truesize; in skb_coalesce_rx_frag()
883 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
885 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
888 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
892 skb_walk_frags(skb, list) in skb_clone_fraglist()
906 struct sk_buff *skb = *pskb, *nskb; in skb_pp_cow_data() local
911 * the skb. in skb_pp_cow_data()
913 if (skb_has_frag_list(skb)) in skb_pp_cow_data()
917 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) in skb_pp_cow_data()
920 size = min_t(u32, skb->len, max_head_size); in skb_pp_cow_data()
933 skb_copy_header(nskb, skb); in skb_pp_cow_data()
936 err = skb_copy_bits(skb, 0, nskb->data, size); in skb_pp_cow_data()
943 head_off = skb_headroom(nskb) - skb_headroom(skb); in skb_pp_cow_data()
947 len = skb->len - off; in skb_pp_cow_data()
948 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in skb_pp_cow_data()
962 err = skb_copy_bits(skb, off, page_address(page) + page_off, in skb_pp_cow_data()
973 consume_skb(skb); in skb_pp_cow_data()
1015 static bool skb_pp_recycle(struct sk_buff *skb, void *data) in skb_pp_recycle() argument
1017 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) in skb_pp_recycle()
1023 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
1024 * @skb: page pool aware skb
1026 * Increase the fragment reference count (pp_ref_count) of a skb. This is
1028 * i.e. when skb->pp_recycle is true, and not for fragments in a
1029 * non-pp-recycling skb. It has a fallback to increase references on normal
1032 static int skb_pp_frag_ref(struct sk_buff *skb) in skb_pp_frag_ref() argument
1038 if (!skb->pp_recycle) in skb_pp_frag_ref()
1041 shinfo = skb_shinfo(skb); in skb_pp_frag_ref()
1061 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
1063 unsigned char *head = skb->head; in skb_free_head()
1065 if (skb->head_frag) { in skb_free_head()
1066 if (skb_pp_recycle(skb, head)) in skb_free_head()
1070 skb_kfree_head(head, skb_end_offset(skb)); in skb_free_head()
1074 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_data() argument
1076 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
1079 if (!skb_data_unref(skb, shinfo)) in skb_release_data()
1082 if (skb_zcopy(skb)) { in skb_release_data()
1085 skb_zcopy_clear(skb, true); in skb_release_data()
1091 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
1097 skb_free_head(skb); in skb_release_data()
1099 /* When we clone an SKB we copy the reycling bit. The pp_recycle in skb_release_data()
1102 * to make one SKB responsible for triggering the recycle path. in skb_release_data()
1103 * So disable the recycling bit if an SKB is cloned and we have in skb_release_data()
1104 * additional references to the fragmented part of the SKB. in skb_release_data()
1105 * Eventually the last SKB will have the recycling bit set and it's in skb_release_data()
1108 skb->pp_recycle = 0; in skb_release_data()
1114 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
1118 switch (skb->fclone) { in kfree_skbmem()
1120 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skbmem()
1124 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
1126 /* We usually free the clone (TX completion) before original skb in kfree_skbmem()
1135 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
1144 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
1146 skb_dst_drop(skb); in skb_release_head_state()
1147 if (skb->destructor) { in skb_release_head_state()
1149 skb->destructor(skb); in skb_release_head_state()
1152 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
1154 skb_ext_put(skb); in skb_release_head_state()
1158 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_all() argument
1160 skb_release_head_state(skb); in skb_release_all()
1161 if (likely(skb->head)) in skb_release_all()
1162 skb_release_data(skb, reason); in skb_release_all()
1167 * @skb: buffer
1174 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
1176 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); in __kfree_skb()
1177 kfree_skbmem(skb); in __kfree_skb()
1182 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, in __sk_skb_reason_drop() argument
1185 if (unlikely(!skb_unref(skb))) in __sk_skb_reason_drop()
1194 trace_consume_skb(skb, __builtin_return_address(0)); in __sk_skb_reason_drop()
1196 trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); in __sk_skb_reason_drop()
1202 * @sk: the socket to receive @skb, or NULL if not applicable
1203 * @skb: buffer to free
1204 * @reason: reason why this skb is dropped
1211 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) in sk_skb_reason_drop() argument
1213 if (__sk_skb_reason_drop(sk, skb, reason)) in sk_skb_reason_drop()
1214 __kfree_skb(skb); in sk_skb_reason_drop()
1225 static void kfree_skb_add_bulk(struct sk_buff *skb, in kfree_skb_add_bulk() argument
1229 /* if SKB is a clone, don't handle this case */ in kfree_skb_add_bulk()
1230 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { in kfree_skb_add_bulk()
1231 __kfree_skb(skb); in kfree_skb_add_bulk()
1235 skb_release_all(skb, reason); in kfree_skb_add_bulk()
1236 sa->skb_array[sa->skb_count++] = skb; in kfree_skb_add_bulk()
1268 /* Dump skb information and contents.
1274 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
1276 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
1277 struct net_device *dev = skb->dev; in skb_dump()
1278 struct sock *sk = skb->sk; in skb_dump()
1285 len = skb->len; in skb_dump()
1287 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
1289 headroom = skb_headroom(skb); in skb_dump()
1290 tailroom = skb_tailroom(skb); in skb_dump()
1292 has_mac = skb_mac_header_was_set(skb); in skb_dump()
1293 has_trans = skb_transport_header_was_set(skb); in skb_dump()
1302 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
1303 has_mac ? skb->mac_header : -1, in skb_dump()
1304 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
1305 skb->mac_len, in skb_dump()
1306 skb->network_header, in skb_dump()
1307 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
1308 has_trans ? skb->transport_header : -1, in skb_dump()
1311 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, in skb_dump()
1312 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, in skb_dump()
1313 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1314 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, in skb_dump()
1315 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, in skb_dump()
1316 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, in skb_dump()
1317 skb->inner_network_header, skb->inner_transport_header); in skb_dump()
1327 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1328 16, 1, skb->head, headroom, false); in skb_dump()
1330 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
1332 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, in skb_dump()
1333 16, 1, skb->data, seg_len, false); in skb_dump()
1337 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1338 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
1340 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1341 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1359 print_hex_dump(level, "skb frag: ", in skb_dump()
1369 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
1370 printk("skb fraglist:\n"); in skb_dump()
1371 skb_walk_frags(skb, list_skb) in skb_dump()
1379 * @skb: buffer that triggered an error
1381 * Report xmit error if a device callback is tracking this skb.
1382 * skb must be freed afterwards.
1384 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
1386 if (skb) { in skb_tx_error()
1387 skb_zcopy_downgrade_managed(skb); in skb_tx_error()
1388 skb_zcopy_clear(skb, true); in skb_tx_error()
1396 * @skb: buffer to free
1402 void consume_skb(struct sk_buff *skb) in consume_skb() argument
1404 if (!skb_unref(skb)) in consume_skb()
1407 trace_consume_skb(skb, __builtin_return_address(0)); in consume_skb()
1408 __kfree_skb(skb); in consume_skb()
1415 * @skb: buffer to free
1418 * skb reference and all the head states have been already dropped
1420 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
1422 trace_consume_skb(skb, __builtin_return_address(0)); in __consume_stateless_skb()
1423 skb_release_data(skb, SKB_CONSUMED); in __consume_stateless_skb()
1424 kfree_skbmem(skb); in __consume_stateless_skb()
1427 static void napi_skb_cache_put(struct sk_buff *skb) in napi_skb_cache_put() argument
1432 if (!kasan_mempool_poison_object(skb)) in napi_skb_cache_put()
1436 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1450 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) in __napi_kfree_skb() argument
1452 skb_release_all(skb, reason); in __napi_kfree_skb()
1453 napi_skb_cache_put(skb); in __napi_kfree_skb()
1456 void napi_skb_free_stolen_head(struct sk_buff *skb) in napi_skb_free_stolen_head() argument
1458 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1459 nf_reset_ct(skb); in napi_skb_free_stolen_head()
1460 skb_dst_drop(skb); in napi_skb_free_stolen_head()
1461 skb_ext_put(skb); in napi_skb_free_stolen_head()
1462 skb_orphan(skb); in napi_skb_free_stolen_head()
1463 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1465 napi_skb_cache_put(skb); in napi_skb_free_stolen_head()
1468 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
1472 dev_consume_skb_any(skb); in napi_consume_skb()
1478 if (!skb_unref(skb)) in napi_consume_skb()
1481 /* if reaching here SKB is ready to free */ in napi_consume_skb()
1482 trace_consume_skb(skb, __builtin_return_address(0)); in napi_consume_skb()
1484 /* if SKB is a clone, don't handle this case */ in napi_consume_skb()
1485 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1486 __kfree_skb(skb); in napi_consume_skb()
1490 skb_release_all(skb, SKB_CONSUMED); in napi_consume_skb()
1491 napi_skb_cache_put(skb); in napi_consume_skb()
1551 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
1553 #define C(x) n->x = skb->x in __skb_clone()
1557 __copy_skb_header(n, skb); in __skb_clone()
1562 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1577 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1578 skb->cloned = 1; in __skb_clone()
1610 * skb_morph - morph one skb into another
1611 * @dst: the skb to receive the contents
1612 * @src: the skb to supply the contents
1614 * This is identical to skb_clone except that the target skb is
1617 * The target skb is returned upon exit.
1672 struct sk_buff *skb; in msg_zerocopy_alloc() local
1676 skb = sock_omalloc(sk, 0, GFP_KERNEL); in msg_zerocopy_alloc()
1677 if (!skb) in msg_zerocopy_alloc()
1680 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1681 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1685 kfree_skb(skb); in msg_zerocopy_alloc()
1729 /* TCP can create new skb to attach new uarg */ in msg_zerocopy_realloc()
1756 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1758 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1778 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1780 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1800 serr = SKB_EXT_ERR(skb); in __msg_zerocopy_callback()
1814 __skb_queue_tail(q, skb); in __msg_zerocopy_callback()
1815 skb = NULL; in __msg_zerocopy_callback()
1822 consume_skb(skb); in __msg_zerocopy_callback()
1826 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, in msg_zerocopy_complete() argument
1854 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1858 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1861 err = uarg->ops->link_skb(skb, uarg); in skb_zerocopy_iter_stream()
1865 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1867 /* An skb can only point to one uarg. This edge case happens in skb_zerocopy_iter_stream()
1868 * when TCP appends to an skb, but zerocopy_realloc triggered in skb_zerocopy_iter_stream()
1875 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1876 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1877 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1879 /* Streams do not free skb on error. Reset to prev state. */ in skb_zerocopy_iter_stream()
1880 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1881 skb->sk = sk; in skb_zerocopy_iter_stream()
1882 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1883 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1887 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1888 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1892 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) in __skb_zcopy_downgrade_managed() argument
1896 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1897 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1898 skb_frag_ref(skb, i); in __skb_zcopy_downgrade_managed()
1923 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1924 * @skb: the skb to modify
1927 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1937 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1939 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1944 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1947 if (!skb_frags_readable(skb)) in skb_copy_ubufs()
1957 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) in skb_copy_ubufs()
1961 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs()
1979 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
2004 /* skb frags release userspace buffers */ in skb_copy_ubufs()
2006 skb_frag_unref(skb, i); in skb_copy_ubufs()
2008 /* skb frags point to kernel buffers */ in skb_copy_ubufs()
2010 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); in skb_copy_ubufs()
2013 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, in skb_copy_ubufs()
2015 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
2018 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
2025 * @skb: buffer to clone
2037 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
2039 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
2044 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
2047 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
2053 if (skb_pfmemalloc(skb)) in skb_clone()
2063 return __skb_clone(n, skb); in skb_clone()
2067 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
2070 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
2071 skb->csum_start += off; in skb_headers_offset_update()
2072 /* {transport,network,mac}_header and tail are relative to skb->head */ in skb_headers_offset_update()
2073 skb->transport_header += off; in skb_headers_offset_update()
2074 skb->network_header += off; in skb_headers_offset_update()
2075 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
2076 skb->mac_header += off; in skb_headers_offset_update()
2077 skb->inner_transport_header += off; in skb_headers_offset_update()
2078 skb->inner_network_header += off; in skb_headers_offset_update()
2079 skb->inner_mac_header += off; in skb_headers_offset_update()
2093 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
2095 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
2102 * @skb: buffer to copy
2117 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
2123 if (!skb_frags_readable(skb)) in skb_copy()
2126 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy()
2129 headerlen = skb_headroom(skb); in skb_copy()
2130 size = skb_end_offset(skb) + skb->data_len; in skb_copy()
2132 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
2139 skb_put(n, skb->len); in skb_copy()
2141 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
2143 skb_copy_header(n, skb); in skb_copy()
2150 * @skb: buffer to copy
2151 * @headroom: headroom of new skb
2153 * @fclone: if true allocate the copy of the skb from the fclone
2165 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
2168 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
2169 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
2178 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
2180 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
2182 n->truesize += skb->data_len; in __pskb_copy_fclone()
2183 n->data_len = skb->data_len; in __pskb_copy_fclone()
2184 n->len = skb->len; in __pskb_copy_fclone()
2186 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
2189 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
2190 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
2195 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
2196 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
2197 skb_frag_ref(skb, i); in __pskb_copy_fclone()
2202 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
2203 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
2207 skb_copy_header(n, skb); in __pskb_copy_fclone()
2215 * @skb: buffer to reallocate
2221 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2225 * All the pointers pointing into skb header may change and must be
2229 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
2232 unsigned int osize = skb_end_offset(skb); in pskb_expand_head()
2240 BUG_ON(skb_shared(skb)); in pskb_expand_head()
2242 skb_zcopy_downgrade_managed(skb); in pskb_expand_head()
2244 if (skb_pfmemalloc(skb)) in pskb_expand_head()
2255 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
2258 skb_shinfo(skb), in pskb_expand_head()
2259 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
2266 if (skb_cloned(skb)) { in pskb_expand_head()
2267 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
2269 if (skb_zcopy(skb)) in pskb_expand_head()
2270 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
2271 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
2272 skb_frag_ref(skb, i); in pskb_expand_head()
2274 if (skb_has_frag_list(skb)) in pskb_expand_head()
2275 skb_clone_fraglist(skb); in pskb_expand_head()
2277 skb_release_data(skb, SKB_CONSUMED); in pskb_expand_head()
2279 skb_free_head(skb); in pskb_expand_head()
2281 off = (data + nhead) - skb->head; in pskb_expand_head()
2283 skb->head = data; in pskb_expand_head()
2284 skb->head_frag = 0; in pskb_expand_head()
2285 skb->data += off; in pskb_expand_head()
2287 skb_set_end_offset(skb, size); in pskb_expand_head()
2291 skb->tail += off; in pskb_expand_head()
2292 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
2293 skb->cloned = 0; in pskb_expand_head()
2294 skb->hdr_len = 0; in pskb_expand_head()
2295 skb->nohdr = 0; in pskb_expand_head()
2296 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
2298 skb_metadata_clear(skb); in pskb_expand_head()
2300 /* It is not generally safe to change skb->truesize. in pskb_expand_head()
2302 * when skb is orphaned (not attached to a socket). in pskb_expand_head()
2304 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
2305 skb->truesize += size - osize; in pskb_expand_head()
2316 /* Make private copy of skb with writable head and some headroom */
2318 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
2321 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
2324 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
2326 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
2338 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in __skb_unclone_keeptruesize() argument
2344 saved_end_offset = skb_end_offset(skb); in __skb_unclone_keeptruesize()
2345 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
2347 res = pskb_expand_head(skb, 0, 0, pri); in __skb_unclone_keeptruesize()
2351 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
2353 if (likely(skb_end_offset(skb) == saved_end_offset)) in __skb_unclone_keeptruesize()
2356 /* We can not change skb->end if the original or new value in __skb_unclone_keeptruesize()
2360 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { in __skb_unclone_keeptruesize()
2365 saved_end_offset, skb_end_offset(skb)); in __skb_unclone_keeptruesize()
2370 shinfo = skb_shinfo(skb); in __skb_unclone_keeptruesize()
2372 /* We are about to change back skb->end, in __skb_unclone_keeptruesize()
2375 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
2379 skb_set_end_offset(skb, saved_end_offset); in __skb_unclone_keeptruesize()
2386 * @skb: buffer to reallocate
2389 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2390 * if possible; copies skb->sk to new skb as needed
2391 * and frees original skb in case of failures.
2396 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) in skb_expand_head() argument
2398 int delta = headroom - skb_headroom(skb); in skb_expand_head()
2399 int osize = skb_end_offset(skb); in skb_expand_head()
2400 struct sock *sk = skb->sk; in skb_expand_head()
2404 return skb; in skb_expand_head()
2407 /* pskb_expand_head() might crash, if skb is shared. */ in skb_expand_head()
2408 if (skb_shared(skb) || !is_skb_wmem(skb)) { in skb_expand_head()
2409 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head()
2416 consume_skb(skb); in skb_expand_head()
2417 skb = nskb; in skb_expand_head()
2419 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) in skb_expand_head()
2422 if (sk && is_skb_wmem(skb)) { in skb_expand_head()
2423 delta = skb_end_offset(skb) - osize; in skb_expand_head()
2425 skb->truesize += delta; in skb_expand_head()
2427 return skb; in skb_expand_head()
2430 kfree_skb(skb); in skb_expand_head()
2437 * @skb: buffer to copy
2453 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
2464 if (!skb_frags_readable(skb)) in skb_copy_expand()
2467 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy_expand()
2470 oldheadroom = skb_headroom(skb); in skb_copy_expand()
2471 n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2472 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
2480 skb_put(n, skb->len); in skb_copy_expand()
2490 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2491 skb->len + head_copy_len)); in skb_copy_expand()
2493 skb_copy_header(n, skb); in skb_copy_expand()
2502 * __skb_pad - zero pad the tail of an skb
2503 * @skb: buffer to pad
2511 * May return error in out of memory cases. The skb is freed on error
2515 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
2521 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
2522 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2526 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2527 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
2528 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
2533 /* FIXME: The use of this function with non-linear skb's really needs in __skb_pad()
2536 err = skb_linearize(skb); in __skb_pad()
2540 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2545 kfree_skb(skb); in __skb_pad()
2552 * @skb: start of the buffer to use
2557 * fragmented buffer. @tail must be the last fragment of @skb -- or
2558 * @skb itself. If this would exceed the total buffer size the kernel
2563 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
2565 if (tail != skb) { in pskb_put()
2566 skb->data_len += len; in pskb_put()
2567 skb->len += len; in pskb_put()
2575 * @skb: buffer to use
2582 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
2584 void *tmp = skb_tail_pointer(skb); in skb_put()
2585 SKB_LINEAR_ASSERT(skb); in skb_put()
2586 skb->tail += len; in skb_put()
2587 skb->len += len; in skb_put()
2588 if (unlikely(skb->tail > skb->end)) in skb_put()
2589 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
2596 * @skb: buffer to use
2603 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
2605 skb->data -= len; in skb_push()
2606 skb->len += len; in skb_push()
2607 if (unlikely(skb->data < skb->head)) in skb_push()
2608 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
2609 return skb->data; in skb_push()
2615 * @skb: buffer to use
2623 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
2625 return skb_pull_inline(skb, len); in skb_pull()
2632 * @skb: buffer to use
2640 void *skb_pull_data(struct sk_buff *skb, size_t len) in skb_pull_data() argument
2642 void *data = skb->data; in skb_pull_data()
2644 if (skb->len < len) in skb_pull_data()
2647 skb_pull(skb, len); in skb_pull_data()
2655 * @skb: buffer to alter
2660 * The skb must be linear.
2662 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
2664 if (skb->len > len) in skb_trim()
2665 __skb_trim(skb, len); in skb_trim()
2669 /* Trims skb to length len. It can change skb pointers.
2672 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
2676 int offset = skb_headlen(skb); in ___pskb_trim()
2677 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2681 if (skb_cloned(skb) && in ___pskb_trim()
2682 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
2690 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2697 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2700 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2703 skb_frag_unref(skb, i); in ___pskb_trim()
2705 if (skb_has_frag_list(skb)) in ___pskb_trim()
2706 skb_drop_fraglist(skb); in ___pskb_trim()
2710 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2742 if (len > skb_headlen(skb)) { in ___pskb_trim()
2743 skb->data_len -= skb->len - len; in ___pskb_trim()
2744 skb->len = len; in ___pskb_trim()
2746 skb->len = len; in ___pskb_trim()
2747 skb->data_len = 0; in ___pskb_trim()
2748 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2751 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2752 skb_condense(skb); in ___pskb_trim()
2759 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2761 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2762 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2764 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2765 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2767 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2768 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2769 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2774 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2779 * __pskb_pull_tail - advance tail of skb header
2780 * @skb: buffer to reallocate
2790 * or value of new tail of skb in the case of success.
2792 * All the pointers pointing into skb header may change and must be
2796 /* Moves tail of skb head forward, copying data from fragmented part,
2799 * 2. It may change skb pointers.
2803 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2805 /* If skb has not enough free space at tail, get new one in __pskb_pull_tail()
2807 * room at tail, reallocate without expansion only if skb is cloned. in __pskb_pull_tail()
2809 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2811 if (!skb_frags_readable(skb)) in __pskb_pull_tail()
2814 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2815 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2820 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2821 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2826 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2831 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2832 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2840 * Certainly, it is possible to add an offset to skb data, in __pskb_pull_tail()
2843 * further bloating skb head and crucify ourselves here instead. in __pskb_pull_tail()
2847 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2859 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2861 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2884 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2885 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2891 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2894 /* Success! Now we may commit changes to skb data. */ in __pskb_pull_tail()
2899 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2900 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2903 skb_frag_unref(skb, i); in __pskb_pull_tail()
2906 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2908 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2919 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2922 skb->tail += delta; in __pskb_pull_tail()
2923 skb->data_len -= delta; in __pskb_pull_tail()
2925 if (!skb->data_len) in __pskb_pull_tail()
2926 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2928 return skb_tail_pointer(skb); in __pskb_pull_tail()
2933 * skb_copy_bits - copy bits from skb to kernel buffer
2934 * @skb: source skb
2939 * Copy the specified number of bytes from the source skb to the
2947 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2949 int start = skb_headlen(skb); in skb_copy_bits()
2953 if (offset > (int)skb->len - len) in skb_copy_bits()
2960 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2967 if (!skb_frags_readable(skb)) in skb_copy_bits()
2970 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2972 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
3001 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
3132 * Map linear and fragment data from the skb to spd. It reports true if the
3135 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
3143 * If skb->head_frag is set, this 'linear' part is backed by a in __skb_splice_bits()
3147 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
3148 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
3149 skb_headlen(skb), in __skb_splice_bits()
3151 skb_head_is_locked(skb), in __skb_splice_bits()
3158 if (!skb_frags_readable(skb)) in __skb_splice_bits()
3161 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
3162 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
3173 skb_walk_frags(skb, iter) { in __skb_splice_bits()
3190 * Map data from the skb to a pipe. Should handle both the linear part,
3193 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
3208 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
3241 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
3245 struct sk_buff *head = skb; in __skb_send_sock()
3252 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
3256 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3257 kv.iov_base = skb->data + offset; in __skb_send_sock()
3272 /* All the data was skb head? */ in __skb_send_sock()
3277 offset -= skb_headlen(skb); in __skb_send_sock()
3280 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3281 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3289 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3290 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3321 if (skb == head) { in __skb_send_sock()
3322 if (skb_has_frag_list(skb)) { in __skb_send_sock()
3323 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
3326 } else if (skb->next) { in __skb_send_sock()
3327 skb = skb->next; in __skb_send_sock()
3339 /* Send skb data on a socket. Socket must be locked. */
3340 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
3343 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); in skb_send_sock_locked()
3347 /* Send skb data on a socket. Socket must be unlocked. */
3348 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
3350 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); in skb_send_sock()
3354 * skb_store_bits - store bits from kernel buffer to skb
3355 * @skb: destination buffer
3361 * destination skb. This function handles all the messy bits of
3365 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
3367 int start = skb_headlen(skb); in skb_store_bits()
3371 if (offset > (int)skb->len - len) in skb_store_bits()
3377 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
3384 if (!skb_frags_readable(skb)) in skb_store_bits()
3387 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
3388 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
3418 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
3445 /* Checksum skb data. */
3446 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
3449 int start = skb_headlen(skb); in __skb_checksum()
3459 skb->data + offset, copy, csum); in __skb_checksum()
3466 if (WARN_ON_ONCE(!skb_frags_readable(skb))) in __skb_checksum()
3469 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
3471 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
3506 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
3533 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
3541 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
3547 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
3550 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
3560 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3569 if (!skb_frags_readable(skb)) in skb_copy_and_csum_bits()
3572 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3577 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3579 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3608 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
3635 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
3639 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3642 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3643 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3644 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3646 if (!skb_shared(skb)) in __skb_checksum_complete_head()
3647 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3652 /* This function assumes skb->csum already holds pseudo header's checksum,
3654 * __skb_checksum_validate_complete(). And, the original skb->csum must
3658 * zero. The new checksum is stored back into skb->csum unless the skb is
3661 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
3666 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3668 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3672 * between the original skb->csum and skb_checksum(). This means either in __skb_checksum_complete()
3673 * the original hardware checksum is incorrect or we screw up skb->csum in __skb_checksum_complete()
3674 * when moving skb->data around. in __skb_checksum_complete()
3677 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3678 !skb->csum_complete_sw) in __skb_checksum_complete()
3679 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3682 if (!skb_shared(skb)) { in __skb_checksum_complete()
3684 skb->csum = csum; in __skb_checksum_complete()
3685 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3686 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3687 skb->csum_valid = !sum; in __skb_checksum_complete()
3724 * Calculates the amount of linear headroom needed in the 'to' skb passed
3748 * skb_zerocopy - Zero copy skb to skb
3763 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3769 int plen = 0; /* length of skb->head fragment */ in skb_zerocopy()
3825 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3830 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3831 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3833 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3835 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3837 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3840 if (csstart != skb->len) in skb_copy_and_csum_dev()
3841 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3842 skb->len - csstart); in skb_copy_and_csum_dev()
3844 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3845 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3922 * skb_rbtree_purge - empty a skb rbtree
3937 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3940 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3941 sum += skb->truesize; in skb_rbtree_purge()
3942 kfree_skb(skb); in skb_rbtree_purge()
3949 struct sk_buff *skb, *next; in skb_errqueue_purge() local
3956 skb_queue_walk_safe(list, skb, next) { in skb_errqueue_purge()
3957 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || in skb_errqueue_purge()
3958 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) in skb_errqueue_purge()
3960 __skb_unlink(skb, list); in skb_errqueue_purge()
3961 __skb_queue_tail(&kill, skb); in skb_errqueue_purge()
4012 * @skb: buffer to remove
4018 * You must know what list the SKB is on.
4020 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
4025 __skb_unlink(skb, list); in skb_unlink()
4050 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
4056 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
4059 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
4060 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
4062 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
4063 skb1->unreadable = skb->unreadable; in skb_split_inside_header()
4064 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
4065 skb1->data_len = skb->data_len; in skb_split_inside_header()
4067 skb->data_len = 0; in skb_split_inside_header()
4068 skb->len = len; in skb_split_inside_header()
4069 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
4072 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
4077 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
4079 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
4080 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
4081 skb->len = len; in skb_split_no_header()
4082 skb->data_len = len - pos; in skb_split_no_header()
4085 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
4088 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
4099 skb_frag_ref(skb, i); in skb_split_no_header()
4102 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
4103 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4107 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4112 skb1->unreadable = skb->unreadable; in skb_split_no_header()
4116 * skb_split - Split fragmented skb to two parts at length len.
4117 * @skb: the buffer to split
4119 * @len: new length for skb
4121 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
4123 int pos = skb_headlen(skb); in skb_split()
4126 skb_zcopy_downgrade_managed(skb); in skb_split()
4128 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
4129 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
4131 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
4133 skb_split_no_header(skb, skb1, len, pos); in skb_split()
4137 /* Shifting from/to a cloned skb is a no-go.
4141 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
4143 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
4147 * skb_shift - Shifts paged data partially from skb to another
4149 * @skb: buffer from which the paged data comes from
4153 * the length of the skb, from skb to tgt. Returns number bytes shifted.
4154 * It's up to caller to free skb if everything was shifted.
4158 * Skb cannot include anything else but paged data while tgt is allowed
4162 * specialized skb free'er to handle frags without up-to-date nr_frags.
4164 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
4169 BUG_ON(shiftlen > skb->len); in skb_shift()
4171 if (skb_headlen(skb)) in skb_shift()
4173 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
4176 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); in skb_shift()
4177 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); in skb_shift()
4182 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4195 if (skb_prepare_for_shift(skb) || in skb_shift()
4200 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4213 /* Skip full, not-fitting skb to avoid expensive operations */ in skb_shift()
4214 if ((shiftlen == skb->len) && in skb_shift()
4215 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4218 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
4221 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4225 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4253 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
4257 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
4260 /* Reposition in the original skb */ in skb_shift()
4262 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4263 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4264 skb_shinfo(skb)->nr_frags = to; in skb_shift()
4266 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
4269 /* Most likely the tgt won't ever need its checksum anymore, skb on in skb_shift()
4273 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4275 skb_len_add(skb, -shiftlen); in skb_shift()
4282 * skb_prepare_seq_read - Prepare a sequential read of skb data
4283 * @skb: the buffer to read
4291 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
4296 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
4304 * skb_seq_read - Sequentially read skb data
4309 * Reads a block of skb data at @consumed relative to the
4312 * of the block or 0 if the end of the skb data or the upper
4416 * skb_abort_seq_read - Abort a sequential read of skb data
4479 * skb_find_text - Find a text pattern in skb data
4480 * @skb: the buffer to look in
4485 * Finds a pattern in the skb data according to the specified
4490 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
4502 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
4509 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
4512 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
4514 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
4515 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
4517 skb_zcopy_downgrade_managed(skb); in skb_append_pagefrags()
4519 skb_fill_page_desc_noacc(skb, i, page, offset, size); in skb_append_pagefrags()
4529 * skb_pull_rcsum - pull skb and update receive checksum
4530 * @skb: buffer to update
4539 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
4541 unsigned char *data = skb->data; in skb_pull_rcsum()
4543 BUG_ON(len > skb->len); in skb_pull_rcsum()
4544 __skb_pull(skb, len); in skb_pull_rcsum()
4545 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
4546 return skb->data; in skb_pull_rcsum()
4562 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
4566 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4567 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
4574 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4577 err = skb_unclone(skb, GFP_ATOMIC); in skb_segment_list()
4581 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4601 skb->next = nskb; in skb_segment_list()
4617 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4618 __copy_skb_header(nskb, skb); in skb_segment_list()
4620 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4622 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4631 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
4632 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4633 skb->len = skb->len - delta_len; in skb_segment_list()
4635 skb_gso_reset(skb); in skb_segment_list()
4637 skb->prev = tail; in skb_segment_list()
4639 if (skb_needs_linearize(skb, features) && in skb_segment_list()
4640 __skb_linearize(skb)) in skb_segment_list()
4643 skb_get(skb); in skb_segment_list()
4645 return skb; in skb_segment_list()
4648 kfree_skb_list(skb->next); in skb_segment_list()
4649 skb->next = NULL; in skb_segment_list()
4655 * skb_segment - Perform protocol segmentation on skb.
4659 * This function performs segmentation on the given skb. It returns
4724 * Try to split the SKB to multiple GSO SKBs in skb_segment()
5064 /* The SKB kmem_cache slab is critical for network performance. Never
5090 * struct skb_shared_info is located at the end of skb->head, in skb_init()
5104 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
5107 int start = skb_headlen(skb); in __skb_to_sgvec()
5118 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
5125 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
5130 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
5132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
5148 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
5177 * @skb: Socket buffer containing the buffers to be mapped
5187 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
5189 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
5200 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
5201 * sglist without mark the sg which contain last skb data as the end.
5219 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
5222 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
5230 * @skb: The socket buffer to check.
5232 * @trailer: Returned pointer to the skb where the @tailbits space begins
5240 * set to point to the skb in which this space begins.
5245 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
5251 /* If skb is cloned or its head is paged, reallocate in skb_cow_data()
5255 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
5256 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
5260 if (!skb_has_frag_list(skb)) { in skb_cow_data()
5266 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
5267 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
5271 *trailer = skb; in skb_cow_data()
5278 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
5291 /* If the skb is the last, worry about trailer. */ in skb_cow_data()
5322 * OK, link new skb, drop old one */ in skb_cow_data()
5338 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
5340 struct sock *sk = skb->sk; in sock_rmem_free()
5342 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
5345 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
5350 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
5357 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
5359 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
5363 skb_orphan(skb); in sock_queue_err_skb()
5364 skb->sk = sk; in sock_queue_err_skb()
5365 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
5366 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
5367 skb_set_err_queue(skb); in sock_queue_err_skb()
5370 skb_dst_force(skb); in sock_queue_err_skb()
5372 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
5379 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
5381 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
5382 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
5388 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
5396 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
5397 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
5404 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
5410 return skb; in sock_dequeue_err_skb()
5415 * skb_clone_sk - create clone of skb, and take reference to socket
5416 * @skb: the skb to clone
5427 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
5429 struct sock *sk = skb->sk; in skb_clone_sk()
5435 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
5448 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
5456 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
5458 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
5464 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
5466 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
5471 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
5474 kfree_skb(skb); in __skb_complete_tx_timestamp()
5491 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
5494 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
5503 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
5504 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
5510 kfree_skb(skb); in skb_complete_tx_timestamp()
5514 static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb, in skb_tstamp_tx_report_so_timestamping() argument
5520 return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP; in skb_tstamp_tx_report_so_timestamping()
5522 return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF : in skb_tstamp_tx_report_so_timestamping()
5525 return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK; in skb_tstamp_tx_report_so_timestamping()
5527 return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP; in skb_tstamp_tx_report_so_timestamping()
5533 static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb, in skb_tstamp_tx_report_bpf_timestamping() argument
5547 *skb_hwtstamps(skb) = *hwtstamps; in skb_tstamp_tx_report_bpf_timestamping()
5559 bpf_skops_tx_timestamping(sk, skb, op); in skb_tstamp_tx_report_bpf_timestamping()
5567 struct sk_buff *skb; in __skb_tstamp_tx() local
5594 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, in __skb_tstamp_tx()
5599 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
5601 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
5603 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { in __skb_tstamp_tx()
5604 kfree_skb(skb); in __skb_tstamp_tx()
5608 if (!skb) in __skb_tstamp_tx()
5612 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5614 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5618 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
5620 __net_timestamp(skb); in __skb_tstamp_tx()
5622 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
5635 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
5637 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5641 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5642 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5644 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
5653 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
5657 kfree_skb(skb); in skb_complete_wifi_ack()
5664 * @skb: the skb to set
5665 * @start: the number of bytes after skb->data to start checksumming.
5669 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5671 * This function checks and sets those values and skb->ip_summed: if this
5674 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
5677 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
5679 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
5681 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
5684 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5685 skb->csum_start = csum_start; in skb_partial_csum_set()
5686 skb->csum_offset = off; in skb_partial_csum_set()
5687 skb->transport_header = csum_start; in skb_partial_csum_set()
5692 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
5695 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
5701 if (max > skb->len) in skb_maybe_pull_tail()
5702 max = skb->len; in skb_maybe_pull_tail()
5704 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5707 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
5715 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
5723 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
5725 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5729 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5732 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
5734 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5738 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5749 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
5758 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
5764 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
5767 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
5774 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5779 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5780 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5781 skb->len - off, in skb_checksum_setup_ipv4()
5782 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5794 #define OPT_HDR(type, skb, off) \ argument
5795 (type *)(skb_network_header(skb) + (off))
5797 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
5812 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
5816 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5818 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5826 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5833 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5841 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5848 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5856 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5863 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
5883 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
5888 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5889 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5890 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5899 * @skb: the skb to set up
5902 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
5906 switch (skb->protocol) { in skb_checksum_setup()
5908 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
5912 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
5925 * skb_checksum_maybe_trim - maybe trims the given skb
5926 * @skb: the skb to check
5929 * Checks whether the given skb has data beyond the given transport length.
5930 * If so, returns a cloned skb trimmed to this transport length.
5931 * Otherwise returns the provided skb. Returns NULL in error cases
5932 * (e.g. transport_len exceeds skb length or out-of-memory).
5934 * Caller needs to set the skb transport header and free any returned skb if it
5935 * differs from the provided skb.
5937 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
5941 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
5944 if (skb->len < len) in skb_checksum_maybe_trim()
5946 else if (skb->len == len) in skb_checksum_maybe_trim()
5947 return skb; in skb_checksum_maybe_trim()
5949 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
5963 * skb_checksum_trimmed - validate checksum of an skb
5964 * @skb: the skb to check
5968 * Applies the given checksum function skb_chkf to the provided skb.
5969 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5971 * If the skb has data beyond the given transport length, then a
5972 * trimmed & cloned skb is checked and returned.
5974 * Caller needs to set the skb transport header and free any returned skb if it
5975 * differs from the provided skb.
5977 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
5979 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
5982 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
5985 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
6002 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
6010 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
6013 skb->dev->name); in __skb_warn_lro_forwarding()
6017 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
6020 skb_release_head_state(skb); in kfree_skb_partial()
6021 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skb_partial()
6023 __kfree_skb(skb); in kfree_skb_partial()
6029 * skb_try_coalesce - try to merge skb to prior one
6047 * pages within the same SKB. In theory we could take full in skb_try_coalesce()
6109 /* if the skb is not cloned this does nothing in skb_try_coalesce()
6127 * skb_scrub_packet - scrub an skb
6129 * @skb: buffer to clean
6135 * skb_scrub_packet can also be used to clean a skb before injecting it in
6137 * skb that could impact namespace isolation.
6139 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
6141 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
6142 skb->skb_iif = 0; in skb_scrub_packet()
6143 skb->ignore_df = 0; in skb_scrub_packet()
6144 skb_dst_drop(skb); in skb_scrub_packet()
6145 skb_ext_reset(skb); in skb_scrub_packet()
6146 nf_reset_ct(skb); in skb_scrub_packet()
6147 nf_reset_trace(skb); in skb_scrub_packet()
6150 skb->offload_fwd_mark = 0; in skb_scrub_packet()
6151 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
6153 ipvs_reset(skb); in skb_scrub_packet()
6158 skb->mark = 0; in skb_scrub_packet()
6159 skb_clear_tstamp(skb); in skb_scrub_packet()
6163 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
6168 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
6169 kfree_skb(skb); in skb_reorder_vlan_header()
6173 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
6175 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
6179 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
6181 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
6185 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
6186 return skb; in skb_reorder_vlan_header()
6189 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
6194 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
6196 return skb; in skb_vlan_untag()
6199 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
6200 if (unlikely(!skb)) in skb_vlan_untag()
6203 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
6206 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
6208 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
6210 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
6211 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
6213 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
6214 if (unlikely(!skb)) in skb_vlan_untag()
6217 skb_reset_network_header(skb); in skb_vlan_untag()
6218 if (!skb_transport_header_was_set(skb)) in skb_vlan_untag()
6219 skb_reset_transport_header(skb); in skb_vlan_untag()
6220 skb_reset_mac_len(skb); in skb_vlan_untag()
6222 return skb; in skb_vlan_untag()
6225 kfree_skb(skb); in skb_vlan_untag()
6230 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) in skb_ensure_writable() argument
6232 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
6235 if (!skb_frags_readable(skb)) in skb_ensure_writable()
6238 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
6241 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
6245 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) in skb_ensure_writable_head_tail() argument
6255 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) in skb_ensure_writable_head_tail()
6256 needed_tailroom += ETH_ZLEN - skb->len; in skb_ensure_writable_head_tail()
6258 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); in skb_ensure_writable_head_tail()
6259 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); in skb_ensure_writable_head_tail()
6261 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) in skb_ensure_writable_head_tail()
6265 return pskb_expand_head(skb, needed_headroom, needed_tailroom, in skb_ensure_writable_head_tail()
6271 * expects a non skb_vlan_tag_present skb with a vlan tag payload
6273 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
6275 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
6279 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", in __skb_vlan_pop()
6284 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
6288 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
6290 vlan_remove_tag(skb, vlan_tci); in __skb_vlan_pop()
6292 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
6294 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
6295 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
6297 skb_reset_mac_len(skb); in __skb_vlan_pop()
6304 * Expects skb->data at mac header.
6306 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
6312 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
6313 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
6315 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6318 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6323 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6326 vlan_proto = skb->protocol; in skb_vlan_pop()
6327 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6331 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
6337 * Expects skb->data at mac header.
6339 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
6341 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
6342 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
6346 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", in skb_vlan_push()
6351 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
6352 skb_vlan_tag_get(skb)); in skb_vlan_push()
6356 skb->protocol = skb->vlan_proto; in skb_vlan_push()
6357 skb->network_header -= VLAN_HLEN; in skb_vlan_push()
6359 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
6361 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
6369 * @skb: Socket buffer to modify
6371 * Drop the Ethernet header of @skb.
6373 * Expects that skb->data points to the mac header and that no VLAN tags are
6378 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
6380 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
6381 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
6384 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
6385 skb_reset_mac_header(skb); in skb_eth_pop()
6386 skb_reset_mac_len(skb); in skb_eth_pop()
6395 * @skb: Socket buffer to modify
6399 * Prepend @skb with a new Ethernet header.
6401 * Expects that skb->data points to the mac header, which must be empty.
6405 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
6411 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
6414 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
6418 skb_push(skb, sizeof(*eth)); in skb_eth_push()
6419 skb_reset_mac_header(skb); in skb_eth_push()
6420 skb_reset_mac_len(skb); in skb_eth_push()
6422 eth = eth_hdr(skb); in skb_eth_push()
6425 eth->h_proto = skb->protocol; in skb_eth_push()
6427 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
6433 /* Update the ethertype of hdr and the skb csum value if required. */
6434 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
6437 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
6440 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
6450 * @skb: buffer
6457 * Expects skb->data at mac header.
6461 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
6471 if (skb->encapsulation) in skb_mpls_push()
6474 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
6478 if (!skb->inner_protocol) { in skb_mpls_push()
6479 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
6480 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
6483 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
6484 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6486 skb_reset_mac_header(skb); in skb_mpls_push()
6487 skb_set_network_header(skb, mac_len); in skb_mpls_push()
6488 skb_reset_mac_len(skb); in skb_mpls_push()
6490 lse = mpls_hdr(skb); in skb_mpls_push()
6492 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
6495 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
6496 skb->protocol = mpls_proto; in skb_mpls_push()
6505 * @skb: buffer
6510 * Expects skb->data at mac header.
6514 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
6519 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6522 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
6526 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
6527 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
6530 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
6531 skb_reset_mac_header(skb); in skb_mpls_pop()
6532 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
6538 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6539 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
6541 skb->protocol = next_proto; in skb_mpls_pop()
6550 * @skb: buffer
6553 * Expects skb->data at mac header.
6557 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
6561 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6564 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6568 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6569 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6571 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6574 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6583 * @skb: buffer
6585 * Expects skb->data at mac header.
6589 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
6594 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6597 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
6600 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6608 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
6613 * alloc_skb_with_frags - allocate skb with page frags
6621 * This can be used to allocate a paged skb, given a maximal order for frags.
6630 struct sk_buff *skb; in alloc_skb_with_frags() local
6639 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
6640 if (!skb) in alloc_skb_with_frags()
6665 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); in alloc_skb_with_frags()
6667 skb->truesize += (PAGE_SIZE << order); in alloc_skb_with_frags()
6670 return skb; in alloc_skb_with_frags()
6673 kfree_skb(skb); in alloc_skb_with_frags()
6678 /* carve out the first off bytes from skb when off < headlen */
6679 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
6683 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_header()
6687 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
6696 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
6697 skb->len -= off; in pskb_carve_inside_header()
6700 skb_shinfo(skb), in pskb_carve_inside_header()
6702 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6703 if (skb_cloned(skb)) { in pskb_carve_inside_header()
6705 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
6709 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6710 skb_frag_ref(skb, i); in pskb_carve_inside_header()
6711 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
6712 skb_clone_fraglist(skb); in pskb_carve_inside_header()
6713 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_header()
6718 skb_free_head(skb); in pskb_carve_inside_header()
6721 skb->head = data; in pskb_carve_inside_header()
6722 skb->data = data; in pskb_carve_inside_header()
6723 skb->head_frag = 0; in pskb_carve_inside_header()
6724 skb_set_end_offset(skb, size); in pskb_carve_inside_header()
6725 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6726 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6727 skb->cloned = 0; in pskb_carve_inside_header()
6728 skb->hdr_len = 0; in pskb_carve_inside_header()
6729 skb->nohdr = 0; in pskb_carve_inside_header()
6730 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6735 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6737 /* carve out the first eat bytes from skb's frag_list. May recurse into
6740 static int pskb_carve_frag_list(struct sk_buff *skb, in pskb_carve_frag_list() argument
6791 /* carve off first len bytes from skb. Split line (off) is in the
6792 * non-linear part of skb
6794 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6798 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6800 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6803 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6812 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6813 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6819 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6822 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6836 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6842 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6843 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6846 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6848 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6849 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6853 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_nonlinear()
6855 skb->head = data; in pskb_carve_inside_nonlinear()
6856 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6857 skb->data = data; in pskb_carve_inside_nonlinear()
6858 skb_set_end_offset(skb, size); in pskb_carve_inside_nonlinear()
6859 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6860 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6861 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6862 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6863 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6864 skb->len -= off; in pskb_carve_inside_nonlinear()
6865 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6866 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6870 /* remove len bytes from the beginning of the skb */
6871 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6873 int headlen = skb_headlen(skb); in pskb_carve()
6876 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6878 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
6881 /* Extract to_copy bytes starting at off from skb, and return this in
6882 * a new skb
6884 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
6887 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
6903 * @skb: buffer
6905 * Can be used to save memory before skb is added to a busy queue.
6906 * If packet has bytes in frags and enough tail room in skb->head,
6910 * We do not reallocate skb->head thus can not fail.
6911 * Caller must re-evaluate skb->truesize if needed.
6913 void skb_condense(struct sk_buff *skb) in skb_condense() argument
6915 if (skb->data_len) { in skb_condense()
6916 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6917 skb_cloned(skb) || !skb_frags_readable(skb)) in skb_condense()
6921 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6923 /* At this point, skb->truesize might be over estimated, in skb_condense()
6924 * because skb had a fragment, and fragments do not tell in skb_condense()
6926 * When we pulled its content into skb->head, fragment in skb_condense()
6928 * adjust skb->truesize, not knowing the frag truesize. in skb_condense()
6930 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6941 * __skb_ext_alloc - allocate a new skb extensions storage
6946 * skb via __skb_ext_set().
6998 * __skb_ext_set - attach the specified extension storage to this skb
6999 * @skb: buffer
7007 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
7012 skb_ext_put(skb); in __skb_ext_set()
7016 skb->extensions = ext; in __skb_ext_set()
7017 skb->active_extensions = 1 << id; in __skb_ext_set()
7023 * @skb: buffer
7030 * If the skb was cloned, COW applies and the returned memory can be
7035 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
7040 if (skb->active_extensions) { in skb_ext_add()
7041 old = skb->extensions; in skb_ext_add()
7043 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
7063 skb->slow_gro = 1; in skb_ext_add()
7064 skb->extensions = new; in skb_ext_add()
7065 skb->active_extensions |= 1 << id; in skb_ext_add()
7088 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
7090 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
7092 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
7093 if (skb->active_extensions == 0) { in __skb_ext_del()
7094 skb->extensions = NULL; in __skb_ext_del()
7133 static void kfree_skb_napi_cache(struct sk_buff *skb) in kfree_skb_napi_cache() argument
7135 /* if SKB is a clone, don't handle this case */ in kfree_skb_napi_cache()
7136 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in kfree_skb_napi_cache()
7137 __kfree_skb(skb); in kfree_skb_napi_cache()
7142 __napi_kfree_skb(skb, SKB_CONSUMED); in kfree_skb_napi_cache()
7147 * skb_attempt_defer_free - queue skb for remote freeing
7148 * @skb: buffer
7150 * Put @skb in a per-cpu list, using the cpu which
7151 * allocated the skb/pages to reduce false sharing
7154 void skb_attempt_defer_free(struct sk_buff *skb) in skb_attempt_defer_free() argument
7156 int cpu = skb->alloc_cpu; in skb_attempt_defer_free()
7164 nodefer: kfree_skb_napi_cache(skb); in skb_attempt_defer_free()
7168 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); in skb_attempt_defer_free()
7169 DEBUG_NET_WARN_ON_ONCE(skb->destructor); in skb_attempt_defer_free()
7182 skb->next = sd->defer_list; in skb_attempt_defer_free()
7184 WRITE_ONCE(sd->defer_list, skb); in skb_attempt_defer_free()
7194 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, in skb_splice_csum_page() argument
7203 skb->csum = csum_block_add(skb->csum, csum, skb->len); in skb_splice_csum_page()
7208 * @skb: The buffer to add pages to
7221 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, in skb_splice_from_iter() argument
7234 space = frag_limit - skb_shinfo(skb)->nr_frags; in skb_splice_from_iter()
7256 ret = skb_append_pagefrags(skb, page, off, part, in skb_splice_from_iter()
7263 if (skb->ip_summed == CHECKSUM_NONE) in skb_splice_from_iter()
7264 skb_splice_csum_page(skb, page, off, part); in skb_splice_from_iter()
7277 skb_len_add(skb, spliced); in skb_splice_from_iter()