Lines Matching full:skb
19 * Ray VanTassle : Fixed --skb->lock in free
88 #include <trace/events/skb.h>
197 * @skb: buffer
207 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
211 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
212 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
213 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
217 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
219 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
222 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
224 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
287 struct sk_buff *skb; in napi_skb_cache_get() local
302 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
306 kasan_mempool_unpoison_object(skb, skbuff_cache_size); in napi_skb_cache_get()
308 return skb; in napi_skb_cache_get()
316 static inline void skbuff_clear(struct sk_buff *skb) in skbuff_clear() argument
318 /* Replace memset(skb, 0, offsetof(struct sk_buff, tail)) in skbuff_clear()
323 memset(skb, 0, 128); in skbuff_clear()
325 memset((void *)skb + 128, 0, offsetof(struct sk_buff, tail) - 128); in skbuff_clear()
329 * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache
330 * @skbs: pointer to an at least @n-sized array to fill with skb pointers
388 static inline void __finalize_skb_around(struct sk_buff *skb, void *data, in __finalize_skb_around() argument
395 /* Assumes caller memset cleared SKB */ in __finalize_skb_around()
396 skb->truesize = SKB_TRUESIZE(size); in __finalize_skb_around()
397 refcount_set(&skb->users, 1); in __finalize_skb_around()
398 skb->head = data; in __finalize_skb_around()
399 skb->data = data; in __finalize_skb_around()
400 skb_reset_tail_pointer(skb); in __finalize_skb_around()
401 skb_set_end_offset(skb, size); in __finalize_skb_around()
402 skb->mac_header = (typeof(skb->mac_header))~0U; in __finalize_skb_around()
403 skb->transport_header = (typeof(skb->transport_header))~0U; in __finalize_skb_around()
404 skb->alloc_cpu = raw_smp_processor_id(); in __finalize_skb_around()
406 shinfo = skb_shinfo(skb); in __finalize_skb_around()
410 skb_set_kcov_handle(skb, kcov_common_handle()); in __finalize_skb_around()
437 struct sk_buff *skb; in slab_build_skb() local
440 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in slab_build_skb()
442 if (unlikely(!skb)) in slab_build_skb()
445 skbuff_clear(skb); in slab_build_skb()
447 __finalize_skb_around(skb, data, size); in slab_build_skb()
449 return skb; in slab_build_skb()
453 /* Caller must provide SKB that is memset cleared */
454 static void __build_skb_around(struct sk_buff *skb, void *data, in __build_skb_around() argument
465 __finalize_skb_around(skb, data, size); in __build_skb_around()
478 * The return is the new skb buffer.
490 struct sk_buff *skb; in __build_skb() local
492 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in __build_skb()
494 if (unlikely(!skb)) in __build_skb()
497 skbuff_clear(skb); in __build_skb()
498 __build_skb_around(skb, data, frag_size); in __build_skb()
500 return skb; in __build_skb()
504 * takes care of skb->head and skb->pfmemalloc
508 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
510 if (likely(skb && frag_size)) { in build_skb()
511 skb->head_frag = 1; in build_skb()
512 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb()
514 return skb; in build_skb()
519 * build_skb_around - build a network buffer around provided skb
520 * @skb: sk_buff provide by caller, must be memset cleared
524 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
527 if (unlikely(!skb)) in build_skb_around()
530 __build_skb_around(skb, data, frag_size); in build_skb_around()
533 skb->head_frag = 1; in build_skb_around()
534 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in build_skb_around()
536 return skb; in build_skb_around()
552 struct sk_buff *skb; in __napi_build_skb() local
554 skb = napi_skb_cache_get(true); in __napi_build_skb()
555 if (unlikely(!skb)) in __napi_build_skb()
558 skbuff_clear(skb); in __napi_build_skb()
559 __build_skb_around(skb, data, frag_size); in __napi_build_skb()
561 return skb; in __napi_build_skb()
569 * Version of __napi_build_skb() that takes care of skb->head_frag
570 * and skb->pfmemalloc when the data is a page or page fragment.
576 struct sk_buff *skb = __napi_build_skb(data, frag_size); in napi_build_skb() local
578 if (likely(skb) && frag_size) { in napi_build_skb()
579 skb->head_frag = 1; in napi_build_skb()
580 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in napi_build_skb()
583 return skb; in napi_build_skb()
605 struct sk_buff *skb) in kmalloc_reserve() argument
620 if (skb) in kmalloc_reserve()
621 skb->pfmemalloc = true; in kmalloc_reserve()
642 if (skb) in kmalloc_reserve()
643 skb->pfmemalloc = true; in kmalloc_reserve()
660 * instead of head cache and allocate a cloned (child) skb.
675 struct sk_buff *skb = NULL; in __alloc_skb() local
691 skb = napi_skb_cache_get(true); in __alloc_skb()
692 if (unlikely(!skb)) in __alloc_skb()
696 skb = napi_skb_cache_get(false); in __alloc_skb()
700 if (!skb) { in __alloc_skb()
702 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); in __alloc_skb()
703 if (unlikely(!skb)) in __alloc_skb()
706 skbuff_clear(skb); in __alloc_skb()
711 * Both skb->head and skb_shared_info are cache line aligned. in __alloc_skb()
713 data = kmalloc_reserve(&size, gfp_mask, node, skb); in __alloc_skb()
720 __finalize_skb_around(skb, data, size); in __alloc_skb()
725 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
727 /* skb->fclone is a 2bits field. in __alloc_skb()
728 * Replace expensive RMW (skb->fclone = SKB_FCLONE_ORIG) in __alloc_skb()
732 DEBUG_NET_WARN_ON_ONCE(skb->fclone != SKB_FCLONE_UNAVAILABLE); in __alloc_skb()
733 skb->fclone |= SKB_FCLONE_ORIG; in __alloc_skb()
738 return skb; in __alloc_skb()
741 kmem_cache_free(cache, skb); in __alloc_skb()
763 struct sk_buff *skb; in __netdev_alloc_skb() local
770 * we use kmalloc() for skb->head allocation. in __netdev_alloc_skb()
775 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
776 if (!skb) in __netdev_alloc_skb()
805 skb = __build_skb(data, len); in __netdev_alloc_skb()
806 if (unlikely(!skb)) { in __netdev_alloc_skb()
812 skb->pfmemalloc = 1; in __netdev_alloc_skb()
813 skb->head_frag = 1; in __netdev_alloc_skb()
816 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
817 skb->dev = dev; in __netdev_alloc_skb()
820 return skb; in __netdev_alloc_skb()
840 struct sk_buff *skb; in napi_alloc_skb() local
848 * we use kmalloc() for skb->head allocation. in napi_alloc_skb()
853 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, in napi_alloc_skb()
855 if (!skb) in napi_alloc_skb()
875 skb = __napi_build_skb(data, len); in napi_alloc_skb()
876 if (unlikely(!skb)) { in napi_alloc_skb()
882 skb->pfmemalloc = 1; in napi_alloc_skb()
883 skb->head_frag = 1; in napi_alloc_skb()
886 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in napi_alloc_skb()
887 skb->dev = napi->dev; in napi_alloc_skb()
890 return skb; in napi_alloc_skb()
894 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, in skb_add_rx_frag_netmem() argument
899 skb_fill_netmem_desc(skb, i, netmem, off, size); in skb_add_rx_frag_netmem()
900 skb->len += size; in skb_add_rx_frag_netmem()
901 skb->data_len += size; in skb_add_rx_frag_netmem()
902 skb->truesize += truesize; in skb_add_rx_frag_netmem()
906 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
909 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
914 skb->len += size; in skb_coalesce_rx_frag()
915 skb->data_len += size; in skb_coalesce_rx_frag()
916 skb->truesize += truesize; in skb_coalesce_rx_frag()
926 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
928 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
931 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
935 skb_walk_frags(skb, list) in skb_clone_fraglist()
944 struct sk_buff *skb = *pskb, *nskb; in skb_pp_cow_data() local
949 * the skb. in skb_pp_cow_data()
951 if (skb_has_frag_list(skb)) in skb_pp_cow_data()
955 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) in skb_pp_cow_data()
958 size = min_t(u32, skb->len, max_head_size); in skb_pp_cow_data()
971 skb_copy_header(nskb, skb); in skb_pp_cow_data()
974 err = skb_copy_bits(skb, 0, nskb->data, size); in skb_pp_cow_data()
981 head_off = skb_headroom(nskb) - skb_headroom(skb); in skb_pp_cow_data()
985 len = skb->len - off; in skb_pp_cow_data()
986 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in skb_pp_cow_data()
1000 err = skb_copy_bits(skb, off, page_address(page) + page_off, in skb_pp_cow_data()
1011 consume_skb(skb); in skb_pp_cow_data()
1046 static bool skb_pp_recycle(struct sk_buff *skb, void *data) in skb_pp_recycle() argument
1048 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) in skb_pp_recycle()
1054 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
1055 * @skb: page pool aware skb
1057 * Increase the fragment reference count (pp_ref_count) of a skb. This is
1059 * i.e. when skb->pp_recycle is true, and not for fragments in a
1060 * non-pp-recycling skb. It has a fallback to increase references on normal
1063 static int skb_pp_frag_ref(struct sk_buff *skb) in skb_pp_frag_ref() argument
1069 if (!skb->pp_recycle) in skb_pp_frag_ref()
1072 shinfo = skb_shinfo(skb); in skb_pp_frag_ref()
1089 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
1091 unsigned char *head = skb->head; in skb_free_head()
1093 if (skb->head_frag) { in skb_free_head()
1094 if (skb_pp_recycle(skb, head)) in skb_free_head()
1098 skb_kfree_head(head, skb_end_offset(skb)); in skb_free_head()
1102 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_data() argument
1104 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
1107 if (!skb_data_unref(skb, shinfo)) in skb_release_data()
1110 if (skb_zcopy(skb)) { in skb_release_data()
1113 skb_zcopy_clear(skb, true); in skb_release_data()
1119 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
1125 skb_free_head(skb); in skb_release_data()
1127 /* When we clone an SKB we copy the reycling bit. The pp_recycle in skb_release_data()
1130 * to make one SKB responsible for triggering the recycle path. in skb_release_data()
1131 * So disable the recycling bit if an SKB is cloned and we have in skb_release_data()
1132 * additional references to the fragmented part of the SKB. in skb_release_data()
1133 * Eventually the last SKB will have the recycling bit set and it's in skb_release_data()
1136 skb->pp_recycle = 0; in skb_release_data()
1142 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
1146 switch (skb->fclone) { in kfree_skbmem()
1148 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skbmem()
1152 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
1154 /* We usually free the clone (TX completion) before original skb in kfree_skbmem()
1163 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
1172 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
1174 skb_dst_drop(skb); in skb_release_head_state()
1175 if (skb->destructor) { in skb_release_head_state()
1178 INDIRECT_CALL_4(skb->destructor, in skb_release_head_state()
1181 skb); in skb_release_head_state()
1183 INDIRECT_CALL_2(skb->destructor, in skb_release_head_state()
1185 skb); in skb_release_head_state()
1188 skb->destructor = NULL; in skb_release_head_state()
1189 skb->sk = NULL; in skb_release_head_state()
1191 nf_reset_ct(skb); in skb_release_head_state()
1192 skb_ext_reset(skb); in skb_release_head_state()
1196 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) in skb_release_all() argument
1198 skb_release_head_state(skb); in skb_release_all()
1199 if (likely(skb->head)) in skb_release_all()
1200 skb_release_data(skb, reason); in skb_release_all()
1205 * @skb: buffer
1212 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
1214 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); in __kfree_skb()
1215 kfree_skbmem(skb); in __kfree_skb()
1220 bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, in __sk_skb_reason_drop() argument
1223 if (unlikely(!skb_unref(skb))) in __sk_skb_reason_drop()
1232 trace_consume_skb(skb, __builtin_return_address(0)); in __sk_skb_reason_drop()
1234 trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); in __sk_skb_reason_drop()
1240 * @sk: the socket to receive @skb, or NULL if not applicable
1241 * @skb: buffer to free
1242 * @reason: reason why this skb is dropped
1249 sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) in sk_skb_reason_drop() argument
1251 if (__sk_skb_reason_drop(sk, skb, reason)) in sk_skb_reason_drop()
1252 __kfree_skb(skb); in sk_skb_reason_drop()
1263 static void kfree_skb_add_bulk(struct sk_buff *skb, in kfree_skb_add_bulk() argument
1267 /* if SKB is a clone, don't handle this case */ in kfree_skb_add_bulk()
1268 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { in kfree_skb_add_bulk()
1269 __kfree_skb(skb); in kfree_skb_add_bulk()
1273 skb_release_all(skb, reason); in kfree_skb_add_bulk()
1274 sa->skb_array[sa->skb_count++] = skb; in kfree_skb_add_bulk()
1306 /* Dump skb information and contents.
1312 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
1314 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
1315 struct net_device *dev = skb->dev; in skb_dump()
1316 struct sock *sk = skb->sk; in skb_dump()
1323 len = skb->len; in skb_dump()
1325 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
1327 headroom = skb_headroom(skb); in skb_dump()
1328 tailroom = skb_tailroom(skb); in skb_dump()
1330 has_mac = skb_mac_header_was_set(skb); in skb_dump()
1331 has_trans = skb_transport_header_was_set(skb); in skb_dump()
1340 level, skb->len, skb->data_len, headroom, skb_headlen(skb), in skb_dump()
1341 tailroom, skb->end - skb->tail, in skb_dump()
1342 has_mac ? skb->mac_header : -1, in skb_dump()
1343 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
1344 skb->mac_len, in skb_dump()
1345 skb->network_header, in skb_dump()
1346 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
1347 has_trans ? skb->transport_header : -1, in skb_dump()
1350 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, in skb_dump()
1351 skb->csum_complete_sw, skb->csum_valid, skb->csum_level, in skb_dump()
1352 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1353 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, in skb_dump()
1354 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, in skb_dump()
1355 skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, in skb_dump()
1356 skb->inner_network_header, skb->inner_transport_header); in skb_dump()
1366 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1367 16, 1, skb->head, headroom, false); in skb_dump()
1369 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
1371 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, in skb_dump()
1372 16, 1, skb->data, seg_len, false); in skb_dump()
1376 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, in skb_dump()
1377 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
1379 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1380 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1398 print_hex_dump(level, "skb frag: ", in skb_dump()
1408 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
1409 printk("skb fraglist:\n"); in skb_dump()
1410 skb_walk_frags(skb, list_skb) in skb_dump()
1418 * @skb: buffer that triggered an error
1420 * Report xmit error if a device callback is tracking this skb.
1421 * skb must be freed afterwards.
1423 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
1425 if (skb) { in skb_tx_error()
1426 skb_zcopy_downgrade_managed(skb); in skb_tx_error()
1427 skb_zcopy_clear(skb, true); in skb_tx_error()
1435 * @skb: buffer to free
1441 void consume_skb(struct sk_buff *skb) in consume_skb() argument
1443 if (!skb_unref(skb)) in consume_skb()
1446 trace_consume_skb(skb, __builtin_return_address(0)); in consume_skb()
1447 __kfree_skb(skb); in consume_skb()
1454 * @skb: buffer to free
1457 * skb reference and all the head states have been already dropped
1459 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
1461 trace_consume_skb(skb, __builtin_return_address(0)); in __consume_stateless_skb()
1462 skb_release_data(skb, SKB_CONSUMED); in __consume_stateless_skb()
1463 kfree_skbmem(skb); in __consume_stateless_skb()
1466 static void napi_skb_cache_put(struct sk_buff *skb) in napi_skb_cache_put() argument
1470 if (!kasan_mempool_poison_object(skb)) in napi_skb_cache_put()
1474 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1491 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) in __napi_kfree_skb() argument
1493 skb_release_all(skb, reason); in __napi_kfree_skb()
1494 napi_skb_cache_put(skb); in __napi_kfree_skb()
1497 void napi_skb_free_stolen_head(struct sk_buff *skb) in napi_skb_free_stolen_head() argument
1499 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1500 nf_reset_ct(skb); in napi_skb_free_stolen_head()
1501 skb_dst_drop(skb); in napi_skb_free_stolen_head()
1502 skb_ext_put(skb); in napi_skb_free_stolen_head()
1503 skb_orphan(skb); in napi_skb_free_stolen_head()
1504 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1506 napi_skb_cache_put(skb); in napi_skb_free_stolen_head()
1510 * napi_consume_skb() - consume skb in NAPI context, try to feed skb cache
1511 * @skb: buffer to free
1521 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
1523 if (unlikely(!budget || !skb)) { in napi_consume_skb()
1524 dev_consume_skb_any(skb); in napi_consume_skb()
1530 if (skb->alloc_cpu != smp_processor_id() && !skb_shared(skb)) { in napi_consume_skb()
1531 skb_release_head_state(skb); in napi_consume_skb()
1532 return skb_attempt_defer_free(skb); in napi_consume_skb()
1535 if (!skb_unref(skb)) in napi_consume_skb()
1538 /* if reaching here SKB is ready to free */ in napi_consume_skb()
1539 trace_consume_skb(skb, __builtin_return_address(0)); in napi_consume_skb()
1541 /* if SKB is a clone, don't handle this case */ in napi_consume_skb()
1542 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1543 __kfree_skb(skb); in napi_consume_skb()
1547 skb_release_all(skb, SKB_CONSUMED); in napi_consume_skb()
1548 napi_skb_cache_put(skb); in napi_consume_skb()
1608 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
1610 #define C(x) n->x = skb->x in __skb_clone()
1614 __copy_skb_header(n, skb); in __skb_clone()
1619 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1634 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1635 skb->cloned = 1; in __skb_clone()
1667 * skb_morph - morph one skb into another
1668 * @dst: the skb to receive the contents
1669 * @src: the skb to supply the contents
1671 * This is identical to skb_clone except that the target skb is
1674 * The target skb is returned upon exit.
1730 struct sk_buff *skb; in msg_zerocopy_alloc() local
1734 skb = sock_omalloc(sk, 0, GFP_KERNEL); in msg_zerocopy_alloc()
1735 if (!skb) in msg_zerocopy_alloc()
1738 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1739 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1743 kfree_skb(skb); in msg_zerocopy_alloc()
1787 /* TCP can create new skb to attach new uarg */ in msg_zerocopy_realloc()
1815 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1817 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1837 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1839 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1859 serr = SKB_EXT_ERR(skb); in __msg_zerocopy_callback()
1873 __skb_queue_tail(q, skb); in __msg_zerocopy_callback()
1874 skb = NULL; in __msg_zerocopy_callback()
1881 consume_skb(skb); in __msg_zerocopy_callback()
1885 static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, in msg_zerocopy_complete() argument
1913 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1918 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1921 err = uarg->ops->link_skb(skb, uarg); in skb_zerocopy_iter_stream()
1925 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1927 /* An skb can only point to one uarg. This edge case happens in skb_zerocopy_iter_stream()
1928 * when TCP appends to an skb, but zerocopy_realloc triggered in skb_zerocopy_iter_stream()
1935 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len, in skb_zerocopy_iter_stream()
1937 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1938 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1940 /* Streams do not free skb on error. Reset to prev state. */ in skb_zerocopy_iter_stream()
1941 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1942 skb->sk = sk; in skb_zerocopy_iter_stream()
1943 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1944 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1948 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1949 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1953 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) in __skb_zcopy_downgrade_managed() argument
1957 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1959 skb_frag_ref(skb, i); in __skb_zcopy_downgrade_managed()
1984 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1985 * @skb: the skb to modify
1988 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1998 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
2000 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
2005 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
2008 if (!skb_frags_readable(skb)) in skb_copy_ubufs()
2018 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) in skb_copy_ubufs()
2022 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs()
2040 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
2065 /* skb frags release userspace buffers */ in skb_copy_ubufs()
2067 skb_frag_unref(skb, i); in skb_copy_ubufs()
2069 /* skb frags point to kernel buffers */ in skb_copy_ubufs()
2071 __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize); in skb_copy_ubufs()
2074 __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0, in skb_copy_ubufs()
2076 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
2079 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
2086 * @skb: buffer to clone
2098 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
2100 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
2105 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
2108 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
2114 if (skb_pfmemalloc(skb)) in skb_clone()
2124 return __skb_clone(n, skb); in skb_clone()
2128 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
2131 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
2132 skb->csum_start += off; in skb_headers_offset_update()
2133 /* {transport,network,mac}_header and tail are relative to skb->head */ in skb_headers_offset_update()
2134 skb->transport_header += off; in skb_headers_offset_update()
2135 skb->network_header += off; in skb_headers_offset_update()
2136 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
2137 skb->mac_header += off; in skb_headers_offset_update()
2138 skb->inner_transport_header += off; in skb_headers_offset_update()
2139 skb->inner_network_header += off; in skb_headers_offset_update()
2140 skb->inner_mac_header += off; in skb_headers_offset_update()
2154 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
2156 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
2163 * @skb: buffer to copy
2178 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
2184 if (!skb_frags_readable(skb)) in skb_copy()
2187 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy()
2190 headerlen = skb_headroom(skb); in skb_copy()
2191 size = skb_end_offset(skb) + skb->data_len; in skb_copy()
2193 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
2200 skb_put(n, skb->len); in skb_copy()
2202 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
2204 skb_copy_header(n, skb); in skb_copy()
2211 * @skb: buffer to copy
2212 * @headroom: headroom of new skb
2214 * @fclone: if true allocate the copy of the skb from the fclone
2226 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
2229 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
2230 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
2239 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
2241 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
2243 n->truesize += skb->data_len; in __pskb_copy_fclone()
2244 n->data_len = skb->data_len; in __pskb_copy_fclone()
2245 n->len = skb->len; in __pskb_copy_fclone()
2247 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
2250 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
2251 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
2256 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
2257 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
2258 skb_frag_ref(skb, i); in __pskb_copy_fclone()
2263 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
2264 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
2268 skb_copy_header(n, skb); in __pskb_copy_fclone()
2276 * @skb: buffer to reallocate
2282 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2286 * All the pointers pointing into skb header may change and must be
2294 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
2297 unsigned int osize = skb_end_offset(skb); in pskb_expand_head()
2305 BUG_ON(skb_shared(skb)); in pskb_expand_head()
2307 skb_zcopy_downgrade_managed(skb); in pskb_expand_head()
2309 if (skb_pfmemalloc(skb)) in pskb_expand_head()
2320 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
2323 skb_shinfo(skb), in pskb_expand_head()
2324 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
2331 if (skb_cloned(skb)) { in pskb_expand_head()
2332 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
2334 if (skb_zcopy(skb)) in pskb_expand_head()
2335 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
2336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
2337 skb_frag_ref(skb, i); in pskb_expand_head()
2339 if (skb_has_frag_list(skb)) in pskb_expand_head()
2340 skb_clone_fraglist(skb); in pskb_expand_head()
2342 skb_release_data(skb, SKB_CONSUMED); in pskb_expand_head()
2344 skb_free_head(skb); in pskb_expand_head()
2346 off = (data + nhead) - skb->head; in pskb_expand_head()
2348 skb->head = data; in pskb_expand_head()
2349 skb->head_frag = 0; in pskb_expand_head()
2350 skb->data += off; in pskb_expand_head()
2352 skb_set_end_offset(skb, size); in pskb_expand_head()
2356 skb->tail += off; in pskb_expand_head()
2357 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
2358 skb->cloned = 0; in pskb_expand_head()
2359 skb->hdr_len = 0; in pskb_expand_head()
2360 skb->nohdr = 0; in pskb_expand_head()
2361 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
2363 /* It is not generally safe to change skb->truesize. in pskb_expand_head()
2365 * when skb is orphaned (not attached to a socket). in pskb_expand_head()
2367 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
2368 skb->truesize += size - osize; in pskb_expand_head()
2379 /* Make private copy of skb with writable head and some headroom */
2381 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
2384 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
2387 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
2389 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
2401 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in __skb_unclone_keeptruesize() argument
2407 saved_end_offset = skb_end_offset(skb); in __skb_unclone_keeptruesize()
2408 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
2410 res = pskb_expand_head(skb, 0, 0, pri); in __skb_unclone_keeptruesize()
2414 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
2416 if (likely(skb_end_offset(skb) == saved_end_offset)) in __skb_unclone_keeptruesize()
2419 /* We can not change skb->end if the original or new value in __skb_unclone_keeptruesize()
2423 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { in __skb_unclone_keeptruesize()
2428 saved_end_offset, skb_end_offset(skb)); in __skb_unclone_keeptruesize()
2433 shinfo = skb_shinfo(skb); in __skb_unclone_keeptruesize()
2435 /* We are about to change back skb->end, in __skb_unclone_keeptruesize()
2438 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
2442 skb_set_end_offset(skb, saved_end_offset); in __skb_unclone_keeptruesize()
2449 * @skb: buffer to reallocate
2452 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2453 * if possible; copies skb->sk to new skb as needed
2454 * and frees original skb in case of failures.
2459 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) in skb_expand_head() argument
2461 int delta = headroom - skb_headroom(skb); in skb_expand_head()
2462 int osize = skb_end_offset(skb); in skb_expand_head()
2463 struct sock *sk = skb->sk; in skb_expand_head()
2467 return skb; in skb_expand_head()
2470 /* pskb_expand_head() might crash, if skb is shared. */ in skb_expand_head()
2471 if (skb_shared(skb) || !is_skb_wmem(skb)) { in skb_expand_head()
2472 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head()
2479 consume_skb(skb); in skb_expand_head()
2480 skb = nskb; in skb_expand_head()
2482 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) in skb_expand_head()
2485 if (sk && is_skb_wmem(skb)) { in skb_expand_head()
2486 delta = skb_end_offset(skb) - osize; in skb_expand_head()
2488 skb->truesize += delta; in skb_expand_head()
2490 return skb; in skb_expand_head()
2493 kfree_skb(skb); in skb_expand_head()
2500 * @skb: buffer to copy
2516 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
2527 if (!skb_frags_readable(skb)) in skb_copy_expand()
2530 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) in skb_copy_expand()
2533 oldheadroom = skb_headroom(skb); in skb_copy_expand()
2534 n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2535 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
2543 skb_put(n, skb->len); in skb_copy_expand()
2553 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2554 skb->len + head_copy_len)); in skb_copy_expand()
2556 skb_copy_header(n, skb); in skb_copy_expand()
2565 * __skb_pad - zero pad the tail of an skb
2566 * @skb: buffer to pad
2574 * May return error in out of memory cases. The skb is freed on error
2578 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
2584 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
2585 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2589 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2590 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
2591 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
2596 /* FIXME: The use of this function with non-linear skb's really needs in __skb_pad()
2599 err = skb_linearize(skb); in __skb_pad()
2603 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2608 kfree_skb(skb); in __skb_pad()
2615 * @skb: start of the buffer to use
2620 * fragmented buffer. @tail must be the last fragment of @skb -- or
2621 * @skb itself. If this would exceed the total buffer size the kernel
2626 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
2628 if (tail != skb) { in pskb_put()
2629 skb->data_len += len; in pskb_put()
2630 skb->len += len; in pskb_put()
2638 * @skb: buffer to use
2645 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
2647 void *tmp = skb_tail_pointer(skb); in skb_put()
2648 SKB_LINEAR_ASSERT(skb); in skb_put()
2649 skb->tail += len; in skb_put()
2650 skb->len += len; in skb_put()
2651 if (unlikely(skb->tail > skb->end)) in skb_put()
2652 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
2659 * @skb: buffer to use
2666 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
2668 skb->data -= len; in skb_push()
2669 skb->len += len; in skb_push()
2670 if (unlikely(skb->data < skb->head)) in skb_push()
2671 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
2672 return skb->data; in skb_push()
2678 * @skb: buffer to use
2686 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
2688 return skb_pull_inline(skb, len); in skb_pull()
2695 * @skb: buffer to use
2703 void *skb_pull_data(struct sk_buff *skb, size_t len) in skb_pull_data() argument
2705 void *data = skb->data; in skb_pull_data()
2707 if (skb->len < len) in skb_pull_data()
2710 skb_pull(skb, len); in skb_pull_data()
2718 * @skb: buffer to alter
2723 * The skb must be linear.
2725 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
2727 if (skb->len > len) in skb_trim()
2728 __skb_trim(skb, len); in skb_trim()
2732 /* Trims skb to length len. It can change skb pointers.
2735 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
2739 int offset = skb_headlen(skb); in ___pskb_trim()
2740 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2744 if (skb_cloned(skb) && in ___pskb_trim()
2745 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
2753 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2760 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2763 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2766 skb_frag_unref(skb, i); in ___pskb_trim()
2768 if (skb_has_frag_list(skb)) in ___pskb_trim()
2769 skb_drop_fraglist(skb); in ___pskb_trim()
2773 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2805 if (len > skb_headlen(skb)) { in ___pskb_trim()
2806 skb->data_len -= skb->len - len; in ___pskb_trim()
2807 skb->len = len; in ___pskb_trim()
2809 skb->len = len; in ___pskb_trim()
2810 skb->data_len = 0; in ___pskb_trim()
2811 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2814 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2815 skb_condense(skb); in ___pskb_trim()
2822 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2824 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2825 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2827 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2828 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2830 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2831 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2832 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2837 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2842 * __pskb_pull_tail - advance tail of skb header
2843 * @skb: buffer to reallocate
2853 * or value of new tail of skb in the case of success.
2855 * All the pointers pointing into skb header may change and must be
2859 /* Moves tail of skb head forward, copying data from fragmented part,
2862 * 2. It may change skb pointers.
2866 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2868 /* If skb has not enough free space at tail, get new one in __pskb_pull_tail()
2870 * room at tail, reallocate without expansion only if skb is cloned. in __pskb_pull_tail()
2872 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2874 if (!skb_frags_readable(skb)) in __pskb_pull_tail()
2877 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2878 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2883 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2884 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2889 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2894 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2895 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2903 * Certainly, it is possible to add an offset to skb data, in __pskb_pull_tail()
2906 * further bloating skb head and crucify ourselves here instead. in __pskb_pull_tail()
2910 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2922 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2924 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2947 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2948 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2954 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2957 /* Success! Now we may commit changes to skb data. */ in __pskb_pull_tail()
2962 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2963 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2966 skb_frag_unref(skb, i); in __pskb_pull_tail()
2969 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2971 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2982 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2985 skb->tail += delta; in __pskb_pull_tail()
2986 skb->data_len -= delta; in __pskb_pull_tail()
2988 if (!skb->data_len) in __pskb_pull_tail()
2989 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2991 return skb_tail_pointer(skb); in __pskb_pull_tail()
2996 * skb_copy_bits - copy bits from skb to kernel buffer
2997 * @skb: source skb
3002 * Copy the specified number of bytes from the source skb to the
3010 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
3012 int start = skb_headlen(skb); in skb_copy_bits()
3016 if (offset > (int)skb->len - len) in skb_copy_bits()
3023 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
3030 if (!skb_frags_readable(skb)) in skb_copy_bits()
3033 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
3035 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
3064 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
3193 * Map linear and fragment data from the skb to spd. It reports true if the
3196 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
3204 * If skb->head_frag is set, this 'linear' part is backed by a in __skb_splice_bits()
3208 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
3209 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
3210 skb_headlen(skb), in __skb_splice_bits()
3212 skb_head_is_locked(skb), in __skb_splice_bits()
3219 if (!skb_frags_readable(skb)) in __skb_splice_bits()
3222 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
3223 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
3234 skb_walk_frags(skb, iter) { in __skb_splice_bits()
3251 * Map data from the skb to a pipe. Should handle both the linear part,
3254 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
3269 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
3302 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
3307 struct sk_buff *head = skb; in __skb_send_sock()
3314 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
3318 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3319 kv.iov_base = skb->data + offset; in __skb_send_sock()
3336 /* All the data was skb head? */ in __skb_send_sock()
3341 offset -= skb_headlen(skb); in __skb_send_sock()
3344 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3345 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3353 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3354 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3388 if (skb == head) { in __skb_send_sock()
3389 if (skb_has_frag_list(skb)) { in __skb_send_sock()
3390 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
3393 } else if (skb->next) { in __skb_send_sock()
3394 skb = skb->next; in __skb_send_sock()
3406 /* Send skb data on a socket. Socket must be locked. */
3407 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
3410 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0); in skb_send_sock_locked()
3414 int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb, in skb_send_sock_locked_with_flags() argument
3417 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags); in skb_send_sock_locked_with_flags()
3421 /* Send skb data on a socket. Socket must be unlocked. */
3422 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
3424 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0); in skb_send_sock()
3428 * skb_store_bits - store bits from kernel buffer to skb
3429 * @skb: destination buffer
3435 * destination skb. This function handles all the messy bits of
3439 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
3441 int start = skb_headlen(skb); in skb_store_bits()
3445 if (offset > (int)skb->len - len) in skb_store_bits()
3451 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
3458 if (!skb_frags_readable(skb)) in skb_store_bits()
3461 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
3462 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
3492 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
3519 /* Checksum skb data. */
3520 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) in skb_checksum() argument
3522 int start = skb_headlen(skb); in skb_checksum()
3531 csum = csum_partial(skb->data + offset, copy, csum); in skb_checksum()
3538 if (WARN_ON_ONCE(!skb_frags_readable(skb))) in skb_checksum()
3541 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_checksum()
3543 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_checksum()
3574 skb_walk_frags(skb, frag_iter) { in skb_checksum()
3602 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
3605 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
3615 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3624 if (!skb_frags_readable(skb)) in skb_copy_and_csum_bits()
3627 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3632 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3634 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3663 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
3691 u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc) in skb_crc32c() argument
3693 int start = skb_headlen(skb); in skb_crc32c()
3699 crc = crc32c(crc, skb->data + offset, copy); in skb_crc32c()
3706 if (WARN_ON_ONCE(!skb_frags_readable(skb))) in skb_crc32c()
3709 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_crc32c()
3711 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_crc32c()
3738 skb_walk_frags(skb, frag_iter) { in skb_crc32c()
3762 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
3766 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3769 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3770 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3771 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3773 if (!skb_shared(skb)) in __skb_checksum_complete_head()
3774 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3779 /* This function assumes skb->csum already holds pseudo header's checksum,
3781 * __skb_checksum_validate_complete(). And, the original skb->csum must
3785 * zero. The new checksum is stored back into skb->csum unless the skb is
3788 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
3793 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3795 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3799 * between the original skb->csum and skb_checksum(). This means either in __skb_checksum_complete()
3800 * the original hardware checksum is incorrect or we screw up skb->csum in __skb_checksum_complete()
3801 * when moving skb->data around. in __skb_checksum_complete()
3804 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3805 !skb->csum_complete_sw) in __skb_checksum_complete()
3806 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3809 if (!skb_shared(skb)) { in __skb_checksum_complete()
3811 skb->csum = csum; in __skb_checksum_complete()
3812 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3813 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3814 skb->csum_valid = !sum; in __skb_checksum_complete()
3825 * Calculates the amount of linear headroom needed in the 'to' skb passed
3849 * skb_zerocopy - Zero copy skb to skb
3864 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3870 int plen = 0; /* length of skb->head fragment */ in skb_zerocopy()
3926 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3931 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3932 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3934 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3936 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3938 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3941 if (csstart != skb->len) in skb_copy_and_csum_dev()
3942 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3943 skb->len - csstart); in skb_copy_and_csum_dev()
3945 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3946 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
4023 * skb_rbtree_purge - empty a skb rbtree
4038 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
4041 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
4042 sum += skb->truesize; in skb_rbtree_purge()
4043 kfree_skb(skb); in skb_rbtree_purge()
4050 struct sk_buff *skb, *next; in skb_errqueue_purge() local
4057 skb_queue_walk_safe(list, skb, next) { in skb_errqueue_purge()
4058 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || in skb_errqueue_purge()
4059 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) in skb_errqueue_purge()
4061 __skb_unlink(skb, list); in skb_errqueue_purge()
4062 __skb_queue_tail(&kill, skb); in skb_errqueue_purge()
4113 * @skb: buffer to remove
4119 * You must know what list the SKB is on.
4121 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
4126 __skb_unlink(skb, list); in skb_unlink()
4151 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
4157 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
4160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
4161 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
4163 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
4164 skb1->unreadable = skb->unreadable; in skb_split_inside_header()
4165 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
4166 skb1->data_len = skb->data_len; in skb_split_inside_header()
4168 skb->data_len = 0; in skb_split_inside_header()
4169 skb->len = len; in skb_split_inside_header()
4170 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
4173 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
4178 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
4180 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
4181 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
4182 skb->len = len; in skb_split_no_header()
4183 skb->data_len = len - pos; in skb_split_no_header()
4186 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
4189 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
4200 skb_frag_ref(skb, i); in skb_split_no_header()
4203 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
4204 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4208 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
4213 skb1->unreadable = skb->unreadable; in skb_split_no_header()
4217 * skb_split - Split fragmented skb to two parts at length len.
4218 * @skb: the buffer to split
4220 * @len: new length for skb
4222 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
4224 int pos = skb_headlen(skb); in skb_split()
4227 skb_zcopy_downgrade_managed(skb); in skb_split()
4229 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
4230 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
4232 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
4234 skb_split_no_header(skb, skb1, len, pos); in skb_split()
4238 /* Shifting from/to a cloned skb is a no-go.
4242 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
4244 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
4248 * skb_shift - Shifts paged data partially from skb to another
4250 * @skb: buffer from which the paged data comes from
4254 * the length of the skb, from skb to tgt. Returns number bytes shifted.
4255 * It's up to caller to free skb if everything was shifted.
4259 * Skb cannot include anything else but paged data while tgt is allowed
4263 * specialized skb free'er to handle frags without up-to-date nr_frags.
4265 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
4270 BUG_ON(shiftlen > skb->len); in skb_shift()
4272 if (skb_headlen(skb)) in skb_shift()
4274 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
4277 DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); in skb_shift()
4278 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); in skb_shift()
4283 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4296 if (skb_prepare_for_shift(skb) || in skb_shift()
4301 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4314 /* Skip full, not-fitting skb to avoid expensive operations */ in skb_shift()
4315 if ((shiftlen == skb->len) && in skb_shift()
4316 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4319 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
4322 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4326 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4354 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
4358 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
4361 /* Reposition in the original skb */ in skb_shift()
4363 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4364 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4365 skb_shinfo(skb)->nr_frags = to; in skb_shift()
4367 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
4370 /* Most likely the tgt won't ever need its checksum anymore, skb on in skb_shift()
4374 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4376 skb_len_add(skb, -shiftlen); in skb_shift()
4383 * skb_prepare_seq_read - Prepare a sequential read of skb data
4384 * @skb: the buffer to read
4392 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
4397 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
4405 * skb_seq_read - Sequentially read skb data
4410 * Reads a block of skb data at @consumed relative to the
4413 * of the block or 0 if the end of the skb data or the upper
4517 * skb_abort_seq_read - Abort a sequential read of skb data
4580 * skb_find_text - Find a text pattern in skb data
4581 * @skb: the buffer to look in
4586 * Finds a pattern in the skb data according to the specified
4591 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
4603 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
4610 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
4613 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
4615 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
4616 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
4618 skb_zcopy_downgrade_managed(skb); in skb_append_pagefrags()
4620 skb_fill_page_desc_noacc(skb, i, page, offset, size); in skb_append_pagefrags()
4630 * skb_pull_rcsum - pull skb and update receive checksum
4631 * @skb: buffer to update
4640 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
4642 unsigned char *data = skb->data; in skb_pull_rcsum()
4644 BUG_ON(len > skb->len); in skb_pull_rcsum()
4645 __skb_pull(skb, len); in skb_pull_rcsum()
4646 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
4647 return skb->data; in skb_pull_rcsum()
4663 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
4667 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4668 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
4675 DEBUG_NET_WARN_ON_ONCE(!(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)); in skb_segment_list()
4677 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4680 err = skb_unclone(skb, GFP_ATOMIC); in skb_segment_list()
4684 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4705 skb->next = nskb; in skb_segment_list()
4721 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4722 __copy_skb_header(nskb, skb); in skb_segment_list()
4724 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4726 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4735 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4736 skb->len = skb->len - delta_len; in skb_segment_list()
4738 skb_gso_reset(skb); in skb_segment_list()
4740 skb->prev = tail; in skb_segment_list()
4742 if (skb_needs_linearize(skb, features) && in skb_segment_list()
4743 __skb_linearize(skb)) in skb_segment_list()
4746 skb_get(skb); in skb_segment_list()
4748 return skb; in skb_segment_list()
4751 kfree_skb_list(skb->next); in skb_segment_list()
4752 skb->next = NULL; in skb_segment_list()
4758 * skb_segment - Perform protocol segmentation on skb.
4762 * This function performs segmentation on the given skb. It returns
4827 * Try to split the SKB to multiple GSO SKBs in skb_segment()
5173 /* The SKB kmem_cache slab is critical for network performance. Never
5201 * struct skb_shared_info is located at the end of skb->head, in skb_init()
5215 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
5218 int start = skb_headlen(skb); in __skb_to_sgvec()
5229 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
5236 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
5241 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
5243 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
5259 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
5288 * @skb: Socket buffer containing the buffers to be mapped
5298 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
5300 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
5311 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
5312 * sglist without mark the sg which contain last skb data as the end.
5330 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
5333 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
5341 * @skb: The socket buffer to check.
5343 * @trailer: Returned pointer to the skb where the @tailbits space begins
5351 * set to point to the skb in which this space begins.
5356 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
5362 /* If skb is cloned or its head is paged, reallocate in skb_cow_data()
5366 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
5367 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
5371 if (!skb_has_frag_list(skb)) { in skb_cow_data()
5377 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
5378 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
5382 *trailer = skb; in skb_cow_data()
5389 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
5402 /* If the skb is the last, worry about trailer. */ in skb_cow_data()
5433 * OK, link new skb, drop old one */ in skb_cow_data()
5449 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
5451 struct sock *sk = skb->sk; in sock_rmem_free()
5453 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
5456 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
5461 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
5468 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
5470 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
5474 skb_orphan(skb); in sock_queue_err_skb()
5475 skb->sk = sk; in sock_queue_err_skb()
5476 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
5477 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
5478 skb_set_err_queue(skb); in sock_queue_err_skb()
5481 skb_dst_force(skb); in sock_queue_err_skb()
5483 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
5490 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
5492 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
5493 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
5499 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
5507 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
5508 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
5515 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
5521 return skb; in sock_dequeue_err_skb()
5526 * skb_clone_sk - create clone of skb, and take reference to socket
5527 * @skb: the skb to clone
5538 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
5540 struct sock *sk = skb->sk; in skb_clone_sk()
5546 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
5559 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
5567 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
5569 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
5575 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
5577 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
5582 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
5585 kfree_skb(skb); in __skb_complete_tx_timestamp()
5597 /* The sk pointer remains valid as long as the skb is. The sk_socket and in skb_may_tx_timestamp()
5615 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
5618 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
5627 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
5628 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
5634 kfree_skb(skb); in skb_complete_tx_timestamp()
5638 static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb, in skb_tstamp_tx_report_so_timestamping() argument
5644 return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP; in skb_tstamp_tx_report_so_timestamping()
5646 return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF : in skb_tstamp_tx_report_so_timestamping()
5649 return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK; in skb_tstamp_tx_report_so_timestamping()
5651 return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP; in skb_tstamp_tx_report_so_timestamping()
5657 static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb, in skb_tstamp_tx_report_bpf_timestamping() argument
5671 *skb_hwtstamps(skb) = *hwtstamps; in skb_tstamp_tx_report_bpf_timestamping()
5683 bpf_skops_tx_timestamping(sk, skb, op); in skb_tstamp_tx_report_bpf_timestamping()
5691 struct sk_buff *skb; in __skb_tstamp_tx() local
5718 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, in __skb_tstamp_tx()
5723 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
5725 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
5727 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { in __skb_tstamp_tx()
5728 kfree_skb(skb); in __skb_tstamp_tx()
5732 if (!skb) in __skb_tstamp_tx()
5736 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5738 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5742 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
5744 __net_timestamp(skb); in __skb_tstamp_tx()
5746 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
5759 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
5761 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5765 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5766 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5768 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
5777 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
5781 kfree_skb(skb); in skb_complete_wifi_ack()
5788 * @skb: the skb to set
5789 * @start: the number of bytes after skb->data to start checksumming.
5793 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5795 * This function checks and sets those values and skb->ip_summed: if this
5798 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
5801 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
5803 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
5805 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
5808 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5809 skb->csum_start = csum_start; in skb_partial_csum_set()
5810 skb->csum_offset = off; in skb_partial_csum_set()
5811 skb->transport_header = csum_start; in skb_partial_csum_set()
5816 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
5819 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
5825 if (max > skb->len) in skb_maybe_pull_tail()
5826 max = skb->len; in skb_maybe_pull_tail()
5828 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5831 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
5839 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
5847 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
5849 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5853 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5856 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
5858 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5862 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5873 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
5882 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
5888 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
5891 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
5898 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5903 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5904 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5905 skb->len - off, in skb_checksum_setup_ipv4()
5906 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5918 #define OPT_HDR(type, skb, off) \ argument
5919 (type *)(skb_network_header(skb) + (off))
5921 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
5936 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
5940 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5942 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5950 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5957 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5965 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5972 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5980 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5987 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
6007 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
6012 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
6013 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
6014 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
6023 * @skb: the skb to set up
6026 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
6030 switch (skb->protocol) { in skb_checksum_setup()
6032 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
6036 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
6049 * skb_checksum_maybe_trim - maybe trims the given skb
6050 * @skb: the skb to check
6053 * Checks whether the given skb has data beyond the given transport length.
6054 * If so, returns a cloned skb trimmed to this transport length.
6055 * Otherwise returns the provided skb. Returns NULL in error cases
6056 * (e.g. transport_len exceeds skb length or out-of-memory).
6058 * Caller needs to set the skb transport header and free any returned skb if it
6059 * differs from the provided skb.
6061 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
6065 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
6068 if (skb->len < len) in skb_checksum_maybe_trim()
6070 else if (skb->len == len) in skb_checksum_maybe_trim()
6071 return skb; in skb_checksum_maybe_trim()
6073 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
6087 * skb_checksum_trimmed - validate checksum of an skb
6088 * @skb: the skb to check
6092 * Applies the given checksum function skb_chkf to the provided skb.
6093 * Returns a checked and maybe trimmed skb. Returns NULL on error.
6095 * If the skb has data beyond the given transport length, then a
6096 * trimmed & cloned skb is checked and returned.
6098 * Caller needs to set the skb transport header and free any returned skb if it
6099 * differs from the provided skb.
6101 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
6103 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
6106 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
6109 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
6126 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
6134 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
6137 skb->dev->name); in __skb_warn_lro_forwarding()
6141 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
6144 skb_release_head_state(skb); in kfree_skb_partial()
6145 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skb_partial()
6147 __kfree_skb(skb); in kfree_skb_partial()
6153 * skb_try_coalesce - try to merge skb to prior one
6171 * pages within the same SKB. In theory we could take full in skb_try_coalesce()
6233 /* if the skb is not cloned this does nothing in skb_try_coalesce()
6251 * skb_scrub_packet - scrub an skb
6253 * @skb: buffer to clean
6259 * skb_scrub_packet can also be used to clean a skb before injecting it in
6261 * skb that could impact namespace isolation.
6263 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
6265 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
6266 skb->skb_iif = 0; in skb_scrub_packet()
6267 skb->ignore_df = 0; in skb_scrub_packet()
6268 skb_dst_drop(skb); in skb_scrub_packet()
6269 skb_ext_reset(skb); in skb_scrub_packet()
6270 nf_reset_ct(skb); in skb_scrub_packet()
6271 nf_reset_trace(skb); in skb_scrub_packet()
6274 skb->offload_fwd_mark = 0; in skb_scrub_packet()
6275 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
6277 ipvs_reset(skb); in skb_scrub_packet()
6282 skb->mark = 0; in skb_scrub_packet()
6283 skb_clear_tstamp(skb); in skb_scrub_packet()
6287 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
6292 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
6293 kfree_skb(skb); in skb_reorder_vlan_header()
6297 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
6299 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
6303 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
6305 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
6309 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
6310 return skb; in skb_reorder_vlan_header()
6313 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
6318 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
6320 return skb; in skb_vlan_untag()
6323 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
6324 if (unlikely(!skb)) in skb_vlan_untag()
6327 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
6330 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
6332 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
6334 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
6335 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
6337 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
6338 if (unlikely(!skb)) in skb_vlan_untag()
6341 skb_reset_network_header(skb); in skb_vlan_untag()
6342 if (!skb_transport_header_was_set(skb)) in skb_vlan_untag()
6343 skb_reset_transport_header(skb); in skb_vlan_untag()
6344 skb_reset_mac_len(skb); in skb_vlan_untag()
6346 return skb; in skb_vlan_untag()
6349 kfree_skb(skb); in skb_vlan_untag()
6354 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) in skb_ensure_writable() argument
6356 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
6359 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
6362 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
6366 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) in skb_ensure_writable_head_tail() argument
6376 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) in skb_ensure_writable_head_tail()
6377 needed_tailroom += ETH_ZLEN - skb->len; in skb_ensure_writable_head_tail()
6379 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); in skb_ensure_writable_head_tail()
6380 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); in skb_ensure_writable_head_tail()
6382 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) in skb_ensure_writable_head_tail()
6386 return pskb_expand_head(skb, needed_headroom, needed_tailroom, in skb_ensure_writable_head_tail()
6392 * expects a non skb_vlan_tag_present skb with a vlan tag payload
6394 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
6396 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
6400 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", in __skb_vlan_pop()
6405 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
6409 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
6411 vlan_remove_tag(skb, vlan_tci); in __skb_vlan_pop()
6413 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
6415 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
6416 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
6418 skb_reset_mac_len(skb); in __skb_vlan_pop()
6425 * Expects skb->data at mac header.
6427 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
6433 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
6434 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
6436 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6439 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6444 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6447 vlan_proto = skb->protocol; in skb_vlan_pop()
6448 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
6452 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
6458 * Expects skb->data at mac header.
6460 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
6462 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
6463 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
6467 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", in skb_vlan_push()
6472 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
6473 skb_vlan_tag_get(skb)); in skb_vlan_push()
6477 skb->protocol = skb->vlan_proto; in skb_vlan_push()
6478 skb->network_header -= VLAN_HLEN; in skb_vlan_push()
6480 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
6482 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
6490 * @skb: Socket buffer to modify
6492 * Drop the Ethernet header of @skb.
6494 * Expects that skb->data points to the mac header and that no VLAN tags are
6499 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
6501 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
6502 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
6505 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
6506 skb_reset_mac_header(skb); in skb_eth_pop()
6507 skb_reset_mac_len(skb); in skb_eth_pop()
6516 * @skb: Socket buffer to modify
6520 * Prepend @skb with a new Ethernet header.
6522 * Expects that skb->data points to the mac header, which must be empty.
6526 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
6532 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
6535 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
6539 skb_push(skb, sizeof(*eth)); in skb_eth_push()
6540 skb_reset_mac_header(skb); in skb_eth_push()
6541 skb_reset_mac_len(skb); in skb_eth_push()
6543 eth = eth_hdr(skb); in skb_eth_push()
6546 eth->h_proto = skb->protocol; in skb_eth_push()
6548 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
6554 /* Update the ethertype of hdr and the skb csum value if required. */
6555 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
6558 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
6561 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
6571 * @skb: buffer
6578 * Expects skb->data at mac header.
6582 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
6592 if (skb->encapsulation) in skb_mpls_push()
6595 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
6599 if (!skb->inner_protocol) { in skb_mpls_push()
6600 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
6601 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
6604 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
6605 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6607 skb_reset_mac_header(skb); in skb_mpls_push()
6608 skb_set_network_header(skb, mac_len); in skb_mpls_push()
6609 skb_reset_mac_len(skb); in skb_mpls_push()
6611 lse = mpls_hdr(skb); in skb_mpls_push()
6613 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
6616 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
6617 skb->protocol = mpls_proto; in skb_mpls_push()
6626 * @skb: buffer
6631 * Expects skb->data at mac header.
6635 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
6640 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6643 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
6647 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
6648 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
6651 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
6652 skb_reset_mac_header(skb); in skb_mpls_pop()
6653 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
6659 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6660 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
6662 skb->protocol = next_proto; in skb_mpls_pop()
6671 * @skb: buffer
6674 * Expects skb->data at mac header.
6678 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
6682 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6685 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6689 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6690 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6692 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6695 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6704 * @skb: buffer
6706 * Expects skb->data at mac header.
6710 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
6715 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6718 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
6721 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6729 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
6734 * alloc_skb_with_frags - allocate skb with page frags
6742 * This can be used to allocate a paged skb, given a maximal order for frags.
6751 struct sk_buff *skb; in alloc_skb_with_frags() local
6760 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
6761 if (!skb) in alloc_skb_with_frags()
6786 skb_fill_page_desc(skb, nr_frags, page, 0, chunk); in alloc_skb_with_frags()
6788 skb->truesize += (PAGE_SIZE << order); in alloc_skb_with_frags()
6791 return skb; in alloc_skb_with_frags()
6794 kfree_skb(skb); in alloc_skb_with_frags()
6799 /* carve out the first off bytes from skb when off < headlen */
6800 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
6804 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_header()
6808 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
6817 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
6818 skb->len -= off; in pskb_carve_inside_header()
6821 skb_shinfo(skb), in pskb_carve_inside_header()
6823 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6824 if (skb_cloned(skb)) { in pskb_carve_inside_header()
6826 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
6830 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6831 skb_frag_ref(skb, i); in pskb_carve_inside_header()
6832 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
6833 skb_clone_fraglist(skb); in pskb_carve_inside_header()
6834 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_header()
6839 skb_free_head(skb); in pskb_carve_inside_header()
6842 skb->head = data; in pskb_carve_inside_header()
6843 skb->data = data; in pskb_carve_inside_header()
6844 skb->head_frag = 0; in pskb_carve_inside_header()
6845 skb_set_end_offset(skb, size); in pskb_carve_inside_header()
6846 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6847 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6848 skb->cloned = 0; in pskb_carve_inside_header()
6849 skb->hdr_len = 0; in pskb_carve_inside_header()
6850 skb->nohdr = 0; in pskb_carve_inside_header()
6851 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6856 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6858 /* carve out the first eat bytes from skb's frag_list. May recurse into
6911 /* carve off first len bytes from skb. Split line (off) is in the
6912 * non-linear part of skb
6914 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6918 unsigned int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6920 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6923 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6932 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6933 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6939 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6942 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6956 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6962 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6963 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6968 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6969 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6973 skb_release_data(skb, SKB_CONSUMED); in pskb_carve_inside_nonlinear()
6975 skb->head = data; in pskb_carve_inside_nonlinear()
6976 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6977 skb->data = data; in pskb_carve_inside_nonlinear()
6978 skb_set_end_offset(skb, size); in pskb_carve_inside_nonlinear()
6979 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6980 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6981 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6982 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6983 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6984 skb->len -= off; in pskb_carve_inside_nonlinear()
6985 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6986 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6990 /* remove len bytes from the beginning of the skb */
6991 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6993 int headlen = skb_headlen(skb); in pskb_carve()
6996 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6998 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
7001 /* Extract to_copy bytes starting at off from skb, and return this in
7002 * a new skb
7004 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
7007 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
7023 * @skb: buffer
7025 * Can be used to save memory before skb is added to a busy queue.
7026 * If packet has bytes in frags and enough tail room in skb->head,
7030 * We do not reallocate skb->head thus can not fail.
7031 * Caller must re-evaluate skb->truesize if needed.
7033 void skb_condense(struct sk_buff *skb) in skb_condense() argument
7035 if (skb->data_len) { in skb_condense()
7036 if (skb->data_len > skb->end - skb->tail || in skb_condense()
7037 skb_cloned(skb) || !skb_frags_readable(skb)) in skb_condense()
7041 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
7043 /* At this point, skb->truesize might be over estimated, in skb_condense()
7044 * because skb had a fragment, and fragments do not tell in skb_condense()
7046 * When we pulled its content into skb->head, fragment in skb_condense()
7048 * adjust skb->truesize, not knowing the frag truesize. in skb_condense()
7050 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
7061 * __skb_ext_alloc - allocate a new skb extensions storage
7066 * skb via __skb_ext_set().
7118 * __skb_ext_set - attach the specified extension storage to this skb
7119 * @skb: buffer
7127 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
7132 skb_ext_put(skb); in __skb_ext_set()
7136 skb->extensions = ext; in __skb_ext_set()
7137 skb->active_extensions = 1 << id; in __skb_ext_set()
7144 * @skb: buffer
7151 * If the skb was cloned, COW applies and the returned memory can be
7156 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
7161 if (skb->active_extensions) { in skb_ext_add()
7162 old = skb->extensions; in skb_ext_add()
7164 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
7184 skb->slow_gro = 1; in skb_ext_add()
7185 skb->extensions = new; in skb_ext_add()
7186 skb->active_extensions |= 1 << id; in skb_ext_add()
7209 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
7211 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
7213 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
7214 if (skb->active_extensions == 0) { in __skb_ext_del()
7215 skb->extensions = NULL; in __skb_ext_del()
7254 static void kfree_skb_napi_cache(struct sk_buff *skb) in kfree_skb_napi_cache() argument
7256 /* if SKB is a clone, don't handle this case */ in kfree_skb_napi_cache()
7257 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in kfree_skb_napi_cache()
7258 __kfree_skb(skb); in kfree_skb_napi_cache()
7263 __napi_kfree_skb(skb, SKB_CONSUMED); in kfree_skb_napi_cache()
7268 * skb_attempt_defer_free - queue skb for remote freeing
7269 * @skb: buffer
7271 * Put @skb in a per-cpu list, using the cpu which
7272 * allocated the skb/pages to reduce false sharing
7275 void skb_attempt_defer_free(struct sk_buff *skb) in skb_attempt_defer_free() argument
7284 if (skb_zcopy(skb)) in skb_attempt_defer_free()
7287 cpu = skb->alloc_cpu; in skb_attempt_defer_free()
7291 nodefer: kfree_skb_napi_cache(skb); in skb_attempt_defer_free()
7295 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); in skb_attempt_defer_free()
7296 DEBUG_NET_WARN_ON_ONCE(skb->destructor); in skb_attempt_defer_free()
7297 DEBUG_NET_WARN_ON_ONCE(skb_nfct(skb)); in skb_attempt_defer_free()
7307 llist_add(&skb->ll_node, &sdn->defer_list); in skb_attempt_defer_free()
7319 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, in skb_splice_csum_page() argument
7328 skb->csum = csum_block_add(skb->csum, csum, skb->len); in skb_splice_csum_page()
7333 * @skb: The buffer to add pages to
7345 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, in skb_splice_from_iter() argument
7358 space = frag_limit - skb_shinfo(skb)->nr_frags; in skb_splice_from_iter()
7380 ret = skb_append_pagefrags(skb, page, off, part, in skb_splice_from_iter()
7387 if (skb->ip_summed == CHECKSUM_NONE) in skb_splice_from_iter()
7388 skb_splice_csum_page(skb, page, off, part); in skb_splice_from_iter()
7401 skb_len_add(skb, spliced); in skb_splice_from_iter()
7463 struct vlan_type_depth __vlan_get_protocol_offset(const struct sk_buff *skb, in __vlan_get_protocol_offset() argument
7467 unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; in __vlan_get_protocol_offset()
7483 vh = skb_header_pointer(skb, mac_offset + vlan_depth, in __vlan_get_protocol_offset()