Lines Matching +full:px +full:- +full:supply

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
19 * Ray VanTassle : Fixed --skb->lock in free
137 * drop_reasons_register_subsys - register another drop reason subsystem
156 * drop_reasons_unregister_subsys - unregister a drop reason subsystem
175 * skb_panic - private function for out-of-line support
181 * Out-of-line support for skb_put() and skb_push().
189 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", in skb_panic()
190 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
191 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
192 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
218 * page - to avoid excessive truesize underestimation
232 offset = nc->offset - SZ_1K; in page_frag_alloc_1k()
240 nc->va = page_address(page); in page_frag_alloc_1k()
241 nc->pfmemalloc = page_is_pfmemalloc(page); in page_frag_alloc_1k()
242 offset = PAGE_SIZE - SZ_1K; in page_frag_alloc_1k()
246 nc->offset = offset; in page_frag_alloc_1k()
247 return nc->va + offset; in page_frag_alloc_1k()
278 * skb->head being backed by slab, not a page fragment.
280 * ("net: avoid 32 x truesize under-estimation for tiny skbs")
289 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); in napi_get_frags_check()
300 return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); in __napi_alloc_frag_align()
318 data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); in __netdev_alloc_frag_align()
330 if (unlikely(!nc->skb_count)) { in napi_skb_cache_get()
331 nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache, in napi_skb_cache_get()
334 nc->skb_cache); in napi_skb_cache_get()
335 if (unlikely(!nc->skb_count)) in napi_skb_cache_get()
339 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
350 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); in __finalize_skb_around()
353 skb->truesize = SKB_TRUESIZE(size); in __finalize_skb_around()
354 refcount_set(&skb->users, 1); in __finalize_skb_around()
355 skb->head = data; in __finalize_skb_around()
356 skb->data = data; in __finalize_skb_around()
359 skb->mac_header = (typeof(skb->mac_header))~0U; in __finalize_skb_around()
360 skb->transport_header = (typeof(skb->transport_header))~0U; in __finalize_skb_around()
361 skb->alloc_cpu = raw_smp_processor_id(); in __finalize_skb_around()
365 atomic_set(&shinfo->dataref, 1); in __finalize_skb_around()
426 * __build_skb - build a network buffer
460 * takes care of skb->head and skb->pfmemalloc
467 skb->head_frag = 1; in build_skb()
475 * build_skb_around - build a network buffer around provided skb
489 skb->head_frag = 1; in build_skb_around()
497 * __napi_build_skb - build a network buffer
521 * napi_build_skb - build a network buffer
525 * Version of __napi_build_skb() that takes care of skb->head_frag
526 * and skb->pfmemalloc when the data is a page or page fragment.
535 skb->head_frag = 1; in napi_build_skb()
573 /* The following cast might truncate high-order bits of obj_size, this in kmalloc_reserve()
606 * __alloc_skb - allocate a network buffer
649 * Both skb->head and skb_shared_info are cache line aligned. in __alloc_skb()
667 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
674 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
675 refcount_set(&fclones->fclone_ref, 1); in __alloc_skb()
687 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
710 * we use kmalloc() for skb->head allocation. in __netdev_alloc_skb()
729 pfmemalloc = nc->pfmemalloc; in __netdev_alloc_skb()
734 pfmemalloc = nc->pfmemalloc; in __netdev_alloc_skb()
748 skb->pfmemalloc = 1; in __netdev_alloc_skb()
749 skb->head_frag = 1; in __netdev_alloc_skb()
753 skb->dev = dev; in __netdev_alloc_skb()
761 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
769 * CPU cycles by avoiding having to disable and re-enable IRQs.
785 * we use kmalloc() for skb->head allocation. in __napi_alloc_skb()
807 * - 'len' less than GRO_MAX_HEAD makes little sense in __napi_alloc_skb()
808 * - On most systems, larger 'len' values lead to fragment in __napi_alloc_skb()
810 * - kmalloc would use the kmalloc-1k slab for such values in __napi_alloc_skb()
811 * - Builds with smaller GRO_MAX_HEAD will very likely do in __napi_alloc_skb()
817 data = page_frag_alloc_1k(&nc->page_small, gfp_mask); in __napi_alloc_skb()
818 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); in __napi_alloc_skb()
822 data = page_frag_alloc(&nc->page, len, gfp_mask); in __napi_alloc_skb()
823 pfmemalloc = nc->page.pfmemalloc; in __napi_alloc_skb()
836 skb->pfmemalloc = 1; in __napi_alloc_skb()
837 skb->head_frag = 1; in __napi_alloc_skb()
841 skb->dev = napi->dev; in __napi_alloc_skb()
854 skb->len += size; in skb_add_rx_frag()
855 skb->data_len += size; in skb_add_rx_frag()
856 skb->truesize += truesize; in skb_add_rx_frag()
863 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
868 skb->len += size; in skb_coalesce_rx_frag()
869 skb->data_len += size; in skb_coalesce_rx_frag()
870 skb->truesize += truesize; in skb_coalesce_rx_frag()
882 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
895 return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; in is_pp_page()
906 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation in napi_pp_put_page()
916 pp = page->pp; in napi_pp_put_page()
925 const struct napi_struct *napi = READ_ONCE(pp->p.napi); in napi_pp_put_page()
928 READ_ONCE(napi->list_owner) == smp_processor_id(); in napi_pp_put_page()
932 * This will *not* work for NIC using a split-page memory model. in napi_pp_put_page()
945 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) in skb_pp_recycle()
951 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
956 * i.e. when skb->pp_recycle is true, and not for fragments in a
957 * non-pp-recycling skb. It has a fallback to increase references on normal
966 if (!skb->pp_recycle) in skb_pp_frag_ref()
967 return -EINVAL; in skb_pp_frag_ref()
971 for (i = 0; i < shinfo->nr_frags; i++) { in skb_pp_frag_ref()
972 head_page = compound_head(skb_frag_page(&shinfo->frags[i])); in skb_pp_frag_ref()
991 unsigned char *head = skb->head; in skb_free_head()
993 if (skb->head_frag) { in skb_free_head()
1008 if (skb->cloned && in skb_release_data()
1009 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
1010 &shinfo->dataref)) in skb_release_data()
1014 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; in skb_release_data()
1021 for (i = 0; i < shinfo->nr_frags; i++) in skb_release_data()
1022 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe); in skb_release_data()
1025 if (shinfo->frag_list) in skb_release_data()
1026 kfree_skb_list_reason(shinfo->frag_list, reason); in skb_release_data()
1039 skb->pp_recycle = 0; in skb_release_data()
1049 switch (skb->fclone) { in kfree_skbmem()
1061 if (refcount_read(&fclones->fclone_ref) == 1) in kfree_skbmem()
1069 if (!refcount_dec_and_test(&fclones->fclone_ref)) in kfree_skbmem()
1078 if (skb->destructor) { in skb_release_head_state()
1080 skb->destructor(skb); in skb_release_head_state()
1093 if (likely(skb->head)) in skb_release_all()
1098 * __kfree_skb - private function
1132 * kfree_skb_reason - free an sk_buff with special reason
1160 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { in kfree_skb_add_bulk()
1166 sa->skb_array[sa->skb_count++] = skb; in kfree_skb_add_bulk()
1168 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { in kfree_skb_add_bulk()
1170 sa->skb_array); in kfree_skb_add_bulk()
1171 sa->skb_count = 0; in kfree_skb_add_bulk()
1183 struct sk_buff *next = segs->next; in kfree_skb_list_reason()
1200 * Must only be called from net_ratelimit()-ed paths.
1207 struct net_device *dev = skb->dev; in skb_dump()
1208 struct sock *sk = skb->sk; in skb_dump()
1215 len = skb->len; in skb_dump()
1217 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
1230 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
1231 has_mac ? skb->mac_header : -1, in skb_dump()
1232 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
1233 skb->network_header, in skb_dump()
1234 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
1235 has_trans ? skb->transport_header : -1, in skb_dump()
1236 sh->tx_flags, sh->nr_frags, in skb_dump()
1237 sh->gso_size, sh->gso_type, sh->gso_segs, in skb_dump()
1238 skb->csum, skb->ip_summed, skb->csum_complete_sw, in skb_dump()
1239 skb->csum_valid, skb->csum_level, in skb_dump()
1240 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1241 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); in skb_dump()
1245 level, dev->name, &dev->features); in skb_dump()
1248 level, sk->sk_family, sk->sk_type, sk->sk_protocol); in skb_dump()
1252 16, 1, skb->head, headroom, false); in skb_dump()
1257 16, 1, skb->data, seg_len, false); in skb_dump()
1258 len -= seg_len; in skb_dump()
1264 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1265 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1279 len -= seg_len; in skb_dump()
1294 * skb_tx_error - report an sk_buff xmit error
1311 * consume_skb - free an skbuff
1330 * __consume_stateless_skb - free an skbuff, assuming it is stateless
1351 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1353 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { in napi_skb_cache_put()
1355 kasan_mempool_unpoison_object(nc->skb_cache[i], in napi_skb_cache_put()
1359 nc->skb_cache + NAPI_SKB_CACHE_HALF); in napi_skb_cache_put()
1360 nc->skb_count = NAPI_SKB_CACHE_HALF; in napi_skb_cache_put()
1372 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1377 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1384 /* Zero budget indicate non-NAPI context called us, like netpoll */ in napi_consume_skb()
1399 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1416 new->tstamp = old->tstamp; in __copy_skb_header()
1417 /* We do not copy old->sk */ in __copy_skb_header()
1418 new->dev = old->dev; in __copy_skb_header()
1419 memcpy(new->cb, old->cb, sizeof(old->cb)); in __copy_skb_header()
1427 new->queue_mapping = old->queue_mapping; in __copy_skb_header()
1429 memcpy(&new->headers, &old->headers, sizeof(new->headers)); in __copy_skb_header()
1467 #define C(x) n->x = skb->x in __skb_clone()
1469 n->next = n->prev = NULL; in __skb_clone()
1470 n->sk = NULL; in __skb_clone()
1476 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1477 n->cloned = 1; in __skb_clone()
1478 n->nohdr = 0; in __skb_clone()
1479 n->peeked = 0; in __skb_clone()
1482 n->destructor = NULL; in __skb_clone()
1489 refcount_set(&n->users, 1); in __skb_clone()
1491 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1492 skb->cloned = 1; in __skb_clone()
1499 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1510 n->len = first->len; in alloc_skb_for_msg()
1511 n->data_len = first->len; in alloc_skb_for_msg()
1512 n->truesize = first->truesize; in alloc_skb_for_msg()
1514 skb_shinfo(n)->frag_list = first; in alloc_skb_for_msg()
1517 n->destructor = NULL; in alloc_skb_for_msg()
1524 * skb_morph - morph one skb into another
1526 * @src: the skb to supply the contents
1554 user = mmp->user ? : current_user(); in mm_account_pinned_pages()
1556 old_pg = atomic_long_read(&user->locked_vm); in mm_account_pinned_pages()
1560 return -ENOBUFS; in mm_account_pinned_pages()
1561 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); in mm_account_pinned_pages()
1563 if (!mmp->user) { in mm_account_pinned_pages()
1564 mmp->user = get_uid(user); in mm_account_pinned_pages()
1565 mmp->num_pg = num_pg; in mm_account_pinned_pages()
1567 mmp->num_pg += num_pg; in mm_account_pinned_pages()
1576 if (mmp->user) { in mm_unaccount_pinned_pages()
1577 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
1578 free_uid(mmp->user); in mm_unaccount_pinned_pages()
1594 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1595 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1596 uarg->mmp.user = NULL; in msg_zerocopy_alloc()
1598 if (mm_account_pinned_pages(&uarg->mmp, size)) { in msg_zerocopy_alloc()
1603 uarg->ubuf.callback = msg_zerocopy_callback; in msg_zerocopy_alloc()
1604 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; in msg_zerocopy_alloc()
1605 uarg->len = 1; in msg_zerocopy_alloc()
1606 uarg->bytelen = size; in msg_zerocopy_alloc()
1607 uarg->zerocopy = 1; in msg_zerocopy_alloc()
1608 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; in msg_zerocopy_alloc()
1609 refcount_set(&uarg->ubuf.refcnt, 1); in msg_zerocopy_alloc()
1612 return &uarg->ubuf; in msg_zerocopy_alloc()
1629 if (uarg->callback != msg_zerocopy_callback) in msg_zerocopy_realloc()
1633 * so uarg->len and sk_zckey access is serialized in msg_zerocopy_realloc()
1641 bytelen = uarg_zc->bytelen + size; in msg_zerocopy_realloc()
1642 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { in msg_zerocopy_realloc()
1644 if (sk->sk_type == SOCK_STREAM) in msg_zerocopy_realloc()
1649 next = (u32)atomic_read(&sk->sk_zckey); in msg_zerocopy_realloc()
1650 if ((u32)(uarg_zc->id + uarg_zc->len) == next) { in msg_zerocopy_realloc()
1651 if (mm_account_pinned_pages(&uarg_zc->mmp, size)) in msg_zerocopy_realloc()
1653 uarg_zc->len++; in msg_zerocopy_realloc()
1654 uarg_zc->bytelen = bytelen; in msg_zerocopy_realloc()
1655 atomic_set(&sk->sk_zckey, ++next); in msg_zerocopy_realloc()
1658 if (sk->sk_type == SOCK_STREAM) in msg_zerocopy_realloc()
1676 old_lo = serr->ee.ee_info; in skb_zerocopy_notify_extend()
1677 old_hi = serr->ee.ee_data; in skb_zerocopy_notify_extend()
1678 sum_len = old_hi - old_lo + 1ULL + len; in skb_zerocopy_notify_extend()
1686 serr->ee.ee_data += len; in skb_zerocopy_notify_extend()
1694 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1701 mm_unaccount_pinned_pages(&uarg->mmp); in __msg_zerocopy_callback()
1706 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) in __msg_zerocopy_callback()
1709 len = uarg->len; in __msg_zerocopy_callback()
1710 lo = uarg->id; in __msg_zerocopy_callback()
1711 hi = uarg->id + len - 1; in __msg_zerocopy_callback()
1712 is_zerocopy = uarg->zerocopy; in __msg_zerocopy_callback()
1716 serr->ee.ee_errno = 0; in __msg_zerocopy_callback()
1717 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; in __msg_zerocopy_callback()
1718 serr->ee.ee_data = hi; in __msg_zerocopy_callback()
1719 serr->ee.ee_info = lo; in __msg_zerocopy_callback()
1721 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; in __msg_zerocopy_callback()
1723 q = &sk->sk_error_queue; in __msg_zerocopy_callback()
1724 spin_lock_irqsave(&q->lock, flags); in __msg_zerocopy_callback()
1726 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || in __msg_zerocopy_callback()
1731 spin_unlock_irqrestore(&q->lock, flags); in __msg_zerocopy_callback()
1745 uarg_zc->zerocopy = uarg_zc->zerocopy & success; in msg_zerocopy_callback()
1747 if (refcount_dec_and_test(&uarg->refcnt)) in msg_zerocopy_callback()
1754 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; in msg_zerocopy_put_abort()
1756 atomic_dec(&sk->sk_zckey); in msg_zerocopy_put_abort()
1757 uarg_to_msgzc(uarg)->len--; in msg_zerocopy_put_abort()
1769 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1775 return -EEXIST; in skb_zerocopy_iter_stream()
1777 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1778 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1779 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1782 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1783 skb->sk = sk; in skb_zerocopy_iter_stream()
1785 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1790 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1798 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1799 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1812 return -ENOMEM; in skb_zerocopy_clone()
1817 return -EIO; in skb_zerocopy_clone()
1825 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1841 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1847 return -EINVAL; in skb_copy_ubufs()
1860 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs()
1869 return -ENOMEM; in skb_copy_ubufs()
1878 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1893 copy = min_t(u32, psize - d_off, p_len - done); in skb_copy_ubufs()
1908 for (i = 0; i < new_frags - 1; i++) { in skb_copy_ubufs()
1912 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); in skb_copy_ubufs()
1913 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1922 * skb_clone - duplicate an sk_buff
1945 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1946 refcount_read(&fclones->fclone_ref) == 1) { in skb_clone()
1947 n = &fclones->skb2; in skb_clone()
1948 refcount_set(&fclones->fclone_ref, 2); in skb_clone()
1949 n->fclone = SKB_FCLONE_CLONE; in skb_clone()
1958 n->fclone = SKB_FCLONE_UNAVAILABLE; in skb_clone()
1968 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
1969 skb->csum_start += off; in skb_headers_offset_update()
1970 /* {transport,network,mac}_header and tail are relative to skb->head */ in skb_headers_offset_update()
1971 skb->transport_header += off; in skb_headers_offset_update()
1972 skb->network_header += off; in skb_headers_offset_update()
1974 skb->mac_header += off; in skb_headers_offset_update()
1975 skb->inner_transport_header += off; in skb_headers_offset_update()
1976 skb->inner_network_header += off; in skb_headers_offset_update()
1977 skb->inner_mac_header += off; in skb_headers_offset_update()
1985 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; in skb_copy_header()
1986 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; in skb_copy_header()
1987 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; in skb_copy_header()
1999 * skb_copy - create private copy of an sk_buff
2008 * As by-product this function converts non-linear &sk_buff to linear
2018 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
2028 skb_put(n, skb->len); in skb_copy()
2030 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
2038 * __pskb_copy_fclone - create copy of an sk_buff with private head.
2069 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
2071 n->truesize += skb->data_len; in __pskb_copy_fclone()
2072 n->data_len = skb->data_len; in __pskb_copy_fclone()
2073 n->len = skb->len; in __pskb_copy_fclone()
2075 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
2084 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
2085 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
2088 skb_shinfo(n)->nr_frags = i; in __pskb_copy_fclone()
2092 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
2103 * pskb_expand_head - reallocate header of &sk_buff
2144 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
2148 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
2159 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
2160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
2170 off = (data + nhead) - skb->head; in pskb_expand_head()
2172 skb->head = data; in pskb_expand_head()
2173 skb->head_frag = 0; in pskb_expand_head()
2174 skb->data += off; in pskb_expand_head()
2180 skb->tail += off; in pskb_expand_head()
2182 skb->cloned = 0; in pskb_expand_head()
2183 skb->hdr_len = 0; in pskb_expand_head()
2184 skb->nohdr = 0; in pskb_expand_head()
2185 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
2189 /* It is not generally safe to change skb->truesize. in pskb_expand_head()
2193 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
2194 skb->truesize += size - osize; in pskb_expand_head()
2201 return -ENOMEM; in pskb_expand_head()
2210 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
2226 /* Note: We plan to rework this in linux-6.4 */
2234 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
2240 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
2245 /* We can not change skb->end if the original or new value in __skb_unclone_keeptruesize()
2253 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", in __skb_unclone_keeptruesize()
2261 /* We are about to change back skb->end, in __skb_unclone_keeptruesize()
2264 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
2266 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); in __skb_unclone_keeptruesize()
2274 * skb_expand_head - reallocate header of &sk_buff
2279 * if possible; copies skb->sk to new skb as needed
2287 int delta = headroom - skb_headroom(skb); in skb_expand_head()
2289 struct sock *sk = skb->sk; in skb_expand_head()
2312 delta = skb_end_offset(skb) - osize; in skb_expand_head()
2313 refcount_add(delta, &sk->sk_wmem_alloc); in skb_expand_head()
2314 skb->truesize += delta; in skb_expand_head()
2325 * skb_copy_expand - copy and expand sk_buff
2349 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2361 skb_put(n, skb->len); in skb_copy_expand()
2368 head_copy_off = newheadroom - head_copy_len; in skb_copy_expand()
2371 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2372 skb->len + head_copy_len)); in skb_copy_expand()
2376 skb_headers_offset_update(n, newheadroom - oldheadroom); in skb_copy_expand()
2383 * __skb_pad - zero pad the tail of an skb
2403 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2407 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2414 /* FIXME: The use of this function with non-linear skb's really needs in __skb_pad()
2421 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2432 * pskb_put - add data to the tail of a potentially fragmented buffer
2438 * fragmented buffer. @tail must be the last fragment of @skb -- or
2447 skb->data_len += len; in pskb_put()
2448 skb->len += len; in pskb_put()
2455 * skb_put - add data to a buffer
2467 skb->tail += len; in skb_put()
2468 skb->len += len; in skb_put()
2469 if (unlikely(skb->tail > skb->end)) in skb_put()
2476 * skb_push - add data to the start of a buffer
2486 skb->data -= len; in skb_push()
2487 skb->len += len; in skb_push()
2488 if (unlikely(skb->data < skb->head)) in skb_push()
2490 return skb->data; in skb_push()
2495 * skb_pull - remove data from the start of a buffer
2511 * skb_pull_data - remove data from the start of a buffer returning its
2523 void *data = skb->data; in skb_pull_data()
2525 if (skb->len < len) in skb_pull_data()
2535 * skb_trim - remove end from a buffer
2545 if (skb->len > len) in skb_trim()
2558 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2571 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2578 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2581 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2591 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2592 fragp = &frag->next) { in ___pskb_trim()
2593 int end = offset + frag->len; in ___pskb_trim()
2600 return -ENOMEM; in ___pskb_trim()
2602 nfrag->next = frag->next; in ___pskb_trim()
2614 unlikely((err = pskb_trim(frag, len - offset)))) in ___pskb_trim()
2617 if (frag->next) in ___pskb_trim()
2618 skb_drop_list(&frag->next); in ___pskb_trim()
2624 skb->data_len -= skb->len - len; in ___pskb_trim()
2625 skb->len = len; in ___pskb_trim()
2627 skb->len = len; in ___pskb_trim()
2628 skb->data_len = 0; in ___pskb_trim()
2632 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2642 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2643 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2645 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2648 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2650 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2653 return -EINVAL; in pskb_trim_rcsum_slow()
2660 * __pskb_pull_tail - advance tail of skb header
2690 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2709 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2710 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2714 eat -= size; in __pskb_pull_tail()
2725 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2730 if (list->len <= eat) { in __pskb_pull_tail()
2732 eat -= list->len; in __pskb_pull_tail()
2733 list = list->next; in __pskb_pull_tail()
2737 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2739 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2742 /* Sucks! We need to fork list. :-( */ in __pskb_pull_tail()
2746 insp = list->next; in __pskb_pull_tail()
2762 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2763 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2768 clone->next = list; in __pskb_pull_tail()
2769 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2777 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2778 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2782 eat -= size; in __pskb_pull_tail()
2784 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2786 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2797 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2800 skb->tail += delta; in __pskb_pull_tail()
2801 skb->data_len -= delta; in __pskb_pull_tail()
2803 if (!skb->data_len) in __pskb_pull_tail()
2811 * skb_copy_bits - copy bits from skb to kernel buffer
2831 if (offset > (int)skb->len - len) in skb_copy_bits()
2835 if ((copy = start - offset) > 0) { in skb_copy_bits()
2839 if ((len -= copy) == 0) in skb_copy_bits()
2845 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2847 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2852 if ((copy = end - offset) > 0) { in skb_copy_bits()
2861 skb_frag_off(f) + offset - start, in skb_copy_bits()
2868 if ((len -= copy) == 0) in skb_copy_bits()
2881 end = start + frag_iter->len; in skb_copy_bits()
2882 if ((copy = end - offset) > 0) { in skb_copy_bits()
2885 if (skb_copy_bits(frag_iter, offset - start, to, copy)) in skb_copy_bits()
2887 if ((len -= copy) == 0) in skb_copy_bits()
2899 return -EFAULT; in skb_copy_bits()
2909 put_page(spd->pages[i]); in sock_spd_release()
2921 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); in linear_to_page()
2923 memcpy(page_address(pfrag->page) + pfrag->offset, in linear_to_page()
2925 *offset = pfrag->offset; in linear_to_page()
2926 pfrag->offset += *len; in linear_to_page()
2928 return pfrag->page; in linear_to_page()
2935 return spd->nr_pages && in spd_can_coalesce()
2936 spd->pages[spd->nr_pages - 1] == page && in spd_can_coalesce()
2937 (spd->partial[spd->nr_pages - 1].offset + in spd_can_coalesce()
2938 spd->partial[spd->nr_pages - 1].len == offset); in spd_can_coalesce()
2950 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) in spd_fill_page()
2959 spd->partial[spd->nr_pages - 1].len += *len; in spd_fill_page()
2963 spd->pages[spd->nr_pages] = page; in spd_fill_page()
2964 spd->partial[spd->nr_pages].len = *len; in spd_fill_page()
2965 spd->partial[spd->nr_pages].offset = offset; in spd_fill_page()
2966 spd->nr_pages++; in spd_fill_page()
2983 *off -= plen; in __splice_segment()
2989 plen -= *off; in __splice_segment()
2999 plen -= flen; in __splice_segment()
3000 *len -= flen; in __splice_segment()
3018 * If skb->head_frag is set, this 'linear' part is backed by a in __skb_splice_bits()
3022 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
3023 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
3033 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
3034 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
3043 if (*offset >= iter->len) { in __skb_splice_bits()
3044 *offset -= iter->len; in __skb_splice_bits()
3088 struct socket *sock = sk->sk_socket; in sendmsg_locked()
3092 return -EINVAL; in sendmsg_locked()
3094 if (!sock->ops->sendmsg_locked) in sendmsg_locked()
3097 return sock->ops->sendmsg_locked(sk, msg, size); in sendmsg_locked()
3102 struct socket *sock = sk->sk_socket; in sendmsg_unlocked()
3105 return -EINVAL; in sendmsg_unlocked()
3125 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3126 kv.iov_base = skb->data + offset; in __skb_send_sock()
3138 len -= ret; in __skb_send_sock()
3146 offset -= skb_headlen(skb); in __skb_send_sock()
3149 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3150 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3155 offset -= skb_frag_size(frag); in __skb_send_sock()
3158 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
3159 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
3161 slen = min_t(size_t, len, skb_frag_size(frag) - offset); in __skb_send_sock()
3179 len -= ret; in __skb_send_sock()
3181 slen -= ret; in __skb_send_sock()
3192 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
3195 } else if (skb->next) { in __skb_send_sock()
3196 skb = skb->next; in __skb_send_sock()
3202 return orig_len - len; in __skb_send_sock()
3205 return orig_len == len ? ret : orig_len - len; in __skb_send_sock()
3223 * skb_store_bits - store bits from kernel buffer to skb
3240 if (offset > (int)skb->len - len) in skb_store_bits()
3243 if ((copy = start - offset) > 0) { in skb_store_bits()
3247 if ((len -= copy) == 0) in skb_store_bits()
3253 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
3254 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
3260 if ((copy = end - offset) > 0) { in skb_store_bits()
3269 skb_frag_off(frag) + offset - start, in skb_store_bits()
3276 if ((len -= copy) == 0) in skb_store_bits()
3289 end = start + frag_iter->len; in skb_store_bits()
3290 if ((copy = end - offset) > 0) { in skb_store_bits()
3293 if (skb_store_bits(frag_iter, offset - start, in skb_store_bits()
3296 if ((len -= copy) == 0) in skb_store_bits()
3307 return -EFAULT; in skb_store_bits()
3316 int i, copy = start - offset; in __skb_checksum()
3324 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, in __skb_checksum()
3325 skb->data + offset, copy, csum); in __skb_checksum()
3326 if ((len -= copy) == 0) in __skb_checksum()
3332 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
3334 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
3339 if ((copy = end - offset) > 0) { in __skb_checksum()
3349 skb_frag_off(frag) + offset - start, in __skb_checksum()
3352 csum2 = INDIRECT_CALL_1(ops->update, in __skb_checksum()
3356 csum = INDIRECT_CALL_1(ops->combine, in __skb_checksum()
3362 if (!(len -= copy)) in __skb_checksum()
3374 end = start + frag_iter->len; in __skb_checksum()
3375 if ((copy = end - offset) > 0) { in __skb_checksum()
3379 csum2 = __skb_checksum(frag_iter, offset - start, in __skb_checksum()
3381 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, in __skb_checksum()
3383 if ((len -= copy) == 0) in __skb_checksum()
3414 int i, copy = start - offset; in skb_copy_and_csum_bits()
3423 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3425 if ((len -= copy) == 0) in skb_copy_and_csum_bits()
3432 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3437 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3438 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3439 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3449 skb_frag_off(frag) + offset - start, in skb_copy_and_csum_bits()
3460 if (!(len -= copy)) in skb_copy_and_csum_bits()
3474 end = start + frag_iter->len; in skb_copy_and_csum_bits()
3475 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3479 offset - start, in skb_copy_and_csum_bits()
3482 if ((len -= copy) == 0) in skb_copy_and_csum_bits()
3499 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3502 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3503 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3504 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3507 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3512 /* This function assumes skb->csum already holds pseudo header's checksum,
3514 * __skb_checksum_validate_complete(). And, the original skb->csum must
3517 * It returns non-zero if the recomputed checksum is still invalid, otherwise
3518 * zero. The new checksum is stored back into skb->csum unless the skb is
3526 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3528 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3531 * re-computed checksum is valid instead, then we have a mismatch in __skb_checksum_complete()
3532 * between the original skb->csum and skb_checksum(). This means either in __skb_checksum_complete()
3533 * the original hardware checksum is incorrect or we screw up skb->csum in __skb_checksum_complete()
3534 * when moving skb->data around. in __skb_checksum_complete()
3537 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3538 !skb->csum_complete_sw) in __skb_checksum_complete()
3539 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3544 skb->csum = csum; in __skb_checksum_complete()
3545 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3546 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3547 skb->csum_valid = !sum; in __skb_checksum_complete()
3581 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3592 if (!from->head_frag || in skb_zerocopy_headlen()
3594 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { in skb_zerocopy_headlen()
3597 hlen = from->len; in skb_zerocopy_headlen()
3601 hlen = from->len; in skb_zerocopy_headlen()
3608 * skb_zerocopy - Zero copy skb to skb
3622 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3623 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3629 int plen = 0; /* length of skb->head fragment */ in skb_zerocopy()
3634 BUG_ON(!from->head_frag && !hlen); in skb_zerocopy()
3644 len -= hlen; in skb_zerocopy()
3648 page = virt_to_head_page(from->head); in skb_zerocopy()
3649 offset = from->data - (unsigned char *)page_address(page); in skb_zerocopy()
3653 len -= plen; in skb_zerocopy()
3661 return -ENOMEM; in skb_zerocopy()
3665 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { in skb_zerocopy()
3670 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; in skb_zerocopy()
3671 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), in skb_zerocopy()
3673 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); in skb_zerocopy()
3674 len -= size; in skb_zerocopy()
3678 skb_shinfo(to)->nr_frags = j; in skb_zerocopy()
3689 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3699 if (csstart != skb->len) in skb_copy_and_csum_dev()
3701 skb->len - csstart); in skb_copy_and_csum_dev()
3703 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3704 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3712 * skb_dequeue - remove from the head of the queue
3725 spin_lock_irqsave(&list->lock, flags); in skb_dequeue()
3727 spin_unlock_irqrestore(&list->lock, flags); in skb_dequeue()
3733 * skb_dequeue_tail - remove from the tail of the queue
3745 spin_lock_irqsave(&list->lock, flags); in skb_dequeue_tail()
3747 spin_unlock_irqrestore(&list->lock, flags); in skb_dequeue_tail()
3753 * skb_queue_purge_reason - empty a list
3772 spin_lock_irqsave(&list->lock, flags); in skb_queue_purge_reason()
3774 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_purge_reason()
3781 * skb_rbtree_purge - empty a skb rbtree
3788 * out-of-order queue is protected by the socket lock).
3799 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3800 sum += skb->truesize; in skb_rbtree_purge()
3814 spin_lock_irqsave(&list->lock, flags); in skb_errqueue_purge()
3816 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || in skb_errqueue_purge()
3817 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) in skb_errqueue_purge()
3822 spin_unlock_irqrestore(&list->lock, flags); in skb_errqueue_purge()
3828 * skb_queue_head - queue a buffer at the list head
3842 spin_lock_irqsave(&list->lock, flags); in skb_queue_head()
3844 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_head()
3849 * skb_queue_tail - queue a buffer at the list tail
3863 spin_lock_irqsave(&list->lock, flags); in skb_queue_tail()
3865 spin_unlock_irqrestore(&list->lock, flags); in skb_queue_tail()
3870 * skb_unlink - remove a buffer from a list
3883 spin_lock_irqsave(&list->lock, flags); in skb_unlink()
3885 spin_unlock_irqrestore(&list->lock, flags); in skb_unlink()
3890 * skb_append - append a buffer
3903 spin_lock_irqsave(&list->lock, flags); in skb_append()
3905 spin_unlock_irqrestore(&list->lock, flags); in skb_append()
3915 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3916 pos - len); in skb_split_inside_header()
3918 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3919 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
3921 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
3922 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
3923 skb1->data_len = skb->data_len; in skb_split_inside_header()
3924 skb1->len += skb1->data_len; in skb_split_inside_header()
3925 skb->data_len = 0; in skb_split_inside_header()
3926 skb->len = len; in skb_split_inside_header()
3935 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
3937 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
3938 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
3939 skb->len = len; in skb_split_no_header()
3940 skb->data_len = len - pos; in skb_split_no_header()
3943 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
3946 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
3958 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); in skb_split_no_header()
3959 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); in skb_split_no_header()
3960 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
3961 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3965 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3968 skb_shinfo(skb1)->nr_frags = k; in skb_split_no_header()
3972 * skb_split - Split fragmented skb to two parts at length len.
3984 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
3993 /* Shifting from/to a cloned skb is a no-go.
4003 * skb_shift - Shifts paged data partially from skb to another
4015 * to have non-paged data as well.
4018 * specialized skb free'er to handle frags without up-to-date nr_frags.
4025 BUG_ON(shiftlen > skb->len); in skb_shift()
4034 to = skb_shinfo(tgt)->nr_frags; in skb_shift()
4035 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4043 merge = -1; in skb_shift()
4045 merge = to - 1; in skb_shift()
4047 todo -= skb_frag_size(fragfrom); in skb_shift()
4054 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4055 fragto = &skb_shinfo(tgt)->frags[merge]; in skb_shift()
4067 /* Skip full, not-fitting skb to avoid expensive operations */ in skb_shift()
4068 if ((shiftlen == skb->len) && in skb_shift()
4069 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4075 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4079 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4080 fragto = &skb_shinfo(tgt)->frags[to]; in skb_shift()
4084 todo -= skb_frag_size(fragfrom); in skb_shift()
4104 skb_shinfo(tgt)->nr_frags = to; in skb_shift()
4107 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
4108 fragto = &skb_shinfo(tgt)->frags[merge]; in skb_shift()
4111 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
4116 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4117 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4118 skb_shinfo(skb)->nr_frags = to; in skb_shift()
4120 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
4126 tgt->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4127 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
4129 skb_len_add(skb, -shiftlen); in skb_shift()
4136 * skb_prepare_seq_read - Prepare a sequential read of skb data
4148 st->lower_offset = from; in skb_prepare_seq_read()
4149 st->upper_offset = to; in skb_prepare_seq_read()
4150 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
4151 st->frag_idx = st->stepped_offset = 0; in skb_prepare_seq_read()
4152 st->frag_data = NULL; in skb_prepare_seq_read()
4153 st->frag_off = 0; in skb_prepare_seq_read()
4158 * skb_seq_read - Sequentially read skb data
4179 * at the moment, state->root_skb could be replaced with
4185 unsigned int block_limit, abs_offset = consumed + st->lower_offset; in skb_seq_read()
4188 if (unlikely(abs_offset >= st->upper_offset)) { in skb_seq_read()
4189 if (st->frag_data) { in skb_seq_read()
4190 kunmap_atomic(st->frag_data); in skb_seq_read()
4191 st->frag_data = NULL; in skb_seq_read()
4197 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; in skb_seq_read()
4199 if (abs_offset < block_limit && !st->frag_data) { in skb_seq_read()
4200 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); in skb_seq_read()
4201 return block_limit - abs_offset; in skb_seq_read()
4204 if (st->frag_idx == 0 && !st->frag_data) in skb_seq_read()
4205 st->stepped_offset += skb_headlen(st->cur_skb); in skb_seq_read()
4207 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { in skb_seq_read()
4210 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; in skb_seq_read()
4217 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; in skb_seq_read()
4218 pg_off = offset_in_page(pg_off + st->frag_off); in skb_seq_read()
4219 pg_sz = min_t(unsigned int, pg_sz - st->frag_off, in skb_seq_read()
4220 PAGE_SIZE - pg_off); in skb_seq_read()
4223 block_limit = pg_sz + st->stepped_offset; in skb_seq_read()
4225 if (!st->frag_data) in skb_seq_read()
4226 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); in skb_seq_read()
4228 *data = (u8 *)st->frag_data + pg_off + in skb_seq_read()
4229 (abs_offset - st->stepped_offset); in skb_seq_read()
4231 return block_limit - abs_offset; in skb_seq_read()
4234 if (st->frag_data) { in skb_seq_read()
4235 kunmap_atomic(st->frag_data); in skb_seq_read()
4236 st->frag_data = NULL; in skb_seq_read()
4239 st->stepped_offset += pg_sz; in skb_seq_read()
4240 st->frag_off += pg_sz; in skb_seq_read()
4241 if (st->frag_off == skb_frag_size(frag)) { in skb_seq_read()
4242 st->frag_off = 0; in skb_seq_read()
4243 st->frag_idx++; in skb_seq_read()
4247 if (st->frag_data) { in skb_seq_read()
4248 kunmap_atomic(st->frag_data); in skb_seq_read()
4249 st->frag_data = NULL; in skb_seq_read()
4252 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { in skb_seq_read()
4253 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; in skb_seq_read()
4254 st->frag_idx = 0; in skb_seq_read()
4256 } else if (st->cur_skb->next) { in skb_seq_read()
4257 st->cur_skb = st->cur_skb->next; in skb_seq_read()
4258 st->frag_idx = 0; in skb_seq_read()
4267 * skb_abort_seq_read - Abort a sequential read of skb data
4275 if (st->frag_data) in skb_abort_seq_read()
4276 kunmap_atomic(st->frag_data); in skb_abort_seq_read()
4280 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
4295 * skb_find_text - Find a text pattern in skb data
4309 unsigned int patlen = config->ops->get_pattern_len(config); in skb_find_text()
4315 config->get_next_block = skb_ts_get_next_block; in skb_find_text()
4316 config->finish = skb_ts_finish; in skb_find_text()
4321 return (ret + patlen <= to - from ? ret : UINT_MAX); in skb_find_text()
4328 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
4331 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
4337 return -EMSGSIZE; in skb_append_pagefrags()
4345 * skb_pull_rcsum - pull skb and update receive checksum
4357 unsigned char *data = skb->data; in skb_pull_rcsum()
4359 BUG_ON(len > skb->len); in skb_pull_rcsum()
4362 return skb->data; in skb_pull_rcsum()
4371 page = virt_to_head_page(frag_skb->head); in skb_head_frag_to_page_desc()
4372 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - in skb_head_frag_to_page_desc()
4382 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4390 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4397 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4401 list_skb = list_skb->next; in skb_segment_list()
4404 delta_truesize += nskb->truesize; in skb_segment_list()
4412 err = -ENOMEM; in skb_segment_list()
4417 skb->next = nskb; in skb_segment_list()
4419 tail->next = nskb; in skb_segment_list()
4422 nskb->next = list_skb; in skb_segment_list()
4428 delta_len += nskb->len; in skb_segment_list()
4430 skb_push(nskb, -skb_network_offset(nskb) + offset); in skb_segment_list()
4433 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4436 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4437 nskb->transport_header += len_diff; in skb_segment_list()
4438 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4439 nskb->data - tnl_hlen, in skb_segment_list()
4447 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
4448 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4449 skb->len = skb->len - delta_len; in skb_segment_list()
4453 skb->prev = tail; in skb_segment_list()
4464 kfree_skb_list(skb->next); in skb_segment_list()
4465 skb->next = NULL; in skb_segment_list()
4466 return ERR_PTR(-ENOMEM); in skb_segment_list()
4471 * skb_segment - Perform protocol segmentation on skb.
4473 * @features: features for the output path (see dev->features)
4484 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; in skb_segment()
4485 unsigned int mss = skb_shinfo(head_skb)->gso_size; in skb_segment()
4486 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); in skb_segment()
4491 unsigned int len = head_skb->len; in skb_segment()
4496 int err = -ENOMEM; in skb_segment()
4500 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && in skb_segment()
4504 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { in skb_segment()
4505 if (skb_headlen(check_skb) && !check_skb->head_frag) { in skb_segment()
4524 return ERR_PTR(-EINVAL); in skb_segment()
4535 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) in skb_segment()
4546 frag_len = list_skb->len; in skb_segment()
4548 if (frag_len != iter->len && iter->next) in skb_segment()
4550 if (skb_headlen(iter) && !iter->head_frag) in skb_segment()
4553 len -= iter->len; in skb_segment()
4565 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; in skb_segment()
4577 return ERR_PTR(-ENOMEM); in skb_segment()
4579 nfrags = skb_shinfo(head_skb)->nr_frags; in skb_segment()
4580 frag = skb_shinfo(head_skb)->frags; in skb_segment()
4590 len = list_skb->len; in skb_segment()
4592 len = head_skb->len - offset; in skb_segment()
4597 hsize = skb_headlen(head_skb) - offset; in skb_segment()
4608 nfrags = skb_shinfo(list_skb)->nr_frags; in skb_segment()
4609 frag = skb_shinfo(list_skb)->frags; in skb_segment()
4625 list_skb = list_skb->next; in skb_segment()
4638 nskb->truesize += skb_end_offset(nskb) - hsize; in skb_segment()
4659 tail->next = nskb; in skb_segment()
4666 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); in skb_segment()
4669 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, in skb_segment()
4670 nskb->data - tnl_hlen, in skb_segment()
4673 if (nskb->len == len + doffset) in skb_segment()
4678 if (!nskb->remcsum_offload) in skb_segment()
4679 nskb->ip_summed = CHECKSUM_NONE; in skb_segment()
4680 SKB_GSO_CB(nskb)->csum = in skb_segment()
4685 SKB_GSO_CB(nskb)->csum_start = in skb_segment()
4694 nskb_frag = skb_shinfo(nskb)->frags; in skb_segment()
4699 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & in skb_segment()
4713 nfrags = skb_shinfo(list_skb)->nr_frags; in skb_segment()
4714 frag = skb_shinfo(list_skb)->frags; in skb_segment()
4719 BUG_ON(!list_skb->head_frag); in skb_segment()
4722 i--; in skb_segment()
4723 frag--; in skb_segment()
4726 list_skb = list_skb->next; in skb_segment()
4729 if (unlikely(skb_shinfo(nskb)->nr_frags >= in skb_segment()
4734 err = -EINVAL; in skb_segment()
4743 skb_frag_off_add(nskb_frag, offset - pos); in skb_segment()
4744 skb_frag_size_sub(nskb_frag, offset - pos); in skb_segment()
4747 skb_shinfo(nskb)->nr_frags++; in skb_segment()
4754 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); in skb_segment()
4762 nskb->data_len = len - hsize; in skb_segment()
4763 nskb->len += nskb->data_len; in skb_segment()
4764 nskb->truesize += nskb->data_len; in skb_segment()
4772 if (!nskb->remcsum_offload) in skb_segment()
4773 nskb->ip_summed = CHECKSUM_NONE; in skb_segment()
4774 SKB_GSO_CB(nskb)->csum = in skb_segment()
4776 nskb->len - doffset, 0); in skb_segment()
4777 SKB_GSO_CB(nskb)->csum_start = in skb_segment()
4780 } while ((offset += len) < head_skb->len); in skb_segment()
4783 * Put it in segs->prev to avoid walking the list. in skb_segment()
4786 segs->prev = tail; in skb_segment()
4790 int type = skb_shinfo(head_skb)->gso_type; in skb_segment()
4791 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; in skb_segment()
4800 for (iter = segs; iter; iter = iter->next) { in skb_segment()
4801 skb_shinfo(iter)->gso_size = gso_size; in skb_segment()
4802 skb_shinfo(iter)->gso_segs = partial_segs; in skb_segment()
4803 skb_shinfo(iter)->gso_type = type; in skb_segment()
4804 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; in skb_segment()
4807 if (tail->len - doffset <= gso_size) in skb_segment()
4808 skb_shinfo(tail)->gso_size = 0; in skb_segment()
4810 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); in skb_segment()
4817 if (head_skb->destructor == sock_wfree) { in skb_segment()
4818 swap(tail->truesize, head_skb->truesize); in skb_segment()
4819 swap(tail->destructor, head_skb->destructor); in skb_segment()
4820 swap(tail->sk, head_skb->sk); in skb_segment()
4886 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
4906 * struct skb_shared_info is located at the end of skb->head, in skb_init()
4924 int i, copy = start - offset; in __skb_to_sgvec()
4929 return -EMSGSIZE; in __skb_to_sgvec()
4934 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
4936 if ((len -= copy) == 0) in __skb_to_sgvec()
4941 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
4946 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
4947 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
4948 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
4949 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) in __skb_to_sgvec()
4950 return -EMSGSIZE; in __skb_to_sgvec()
4955 skb_frag_off(frag) + offset - start); in __skb_to_sgvec()
4957 if (!(len -= copy)) in __skb_to_sgvec()
4969 end = start + frag_iter->len; in __skb_to_sgvec()
4970 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
4971 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) in __skb_to_sgvec()
4972 return -EMSGSIZE; in __skb_to_sgvec()
4976 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, in __skb_to_sgvec()
4981 if ((len -= copy) == 0) in __skb_to_sgvec()
4992 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4994 * @sg: The scatter-gather list to map into
4998 * Fill the specified scatter-gather list with mappings/pointers into a
5000 * the number of scatterlist items used, or -EMSGSIZE if the contents
5010 sg_mark_end(&sg[nsg - 1]); in skb_to_sgvec()
5045 * skb_cow_data - Check that a socket buffer's data buffers are writable
5071 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
5073 return -ENOMEM; in skb_cow_data()
5083 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
5084 return -ENOMEM; in skb_cow_data()
5094 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
5109 if (skb1->next == NULL && tailbits) { in skb_cow_data()
5110 if (skb_shinfo(skb1)->nr_frags || in skb_cow_data()
5119 skb_shinfo(skb1)->nr_frags || in skb_cow_data()
5132 return -ENOMEM; in skb_cow_data()
5134 if (skb1->sk) in skb_cow_data()
5135 skb_set_owner_w(skb2, skb1->sk); in skb_cow_data()
5140 skb2->next = skb1->next; in skb_cow_data()
5147 skb_p = &skb1->next; in skb_cow_data()
5156 struct sock *sk = skb->sk; in sock_rmem_free()
5158 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
5166 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
5175 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
5176 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) in sock_queue_err_skb()
5177 return -ENOMEM; in sock_queue_err_skb()
5180 skb->sk = sk; in sock_queue_err_skb()
5181 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
5182 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
5188 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
5197 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
5198 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
5203 struct sk_buff_head *q = &sk->sk_error_queue; in sock_dequeue_err_skb()
5211 spin_lock_irqsave(&q->lock, flags); in sock_dequeue_err_skb()
5216 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; in sock_dequeue_err_skb()
5218 spin_unlock_irqrestore(&q->lock, flags); in sock_dequeue_err_skb()
5221 sk->sk_err = 0; in sock_dequeue_err_skb()
5231 * skb_clone_sk - create clone of skb, and take reference to socket
5245 struct sock *sk = skb->sk; in skb_clone_sk()
5248 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) in skb_clone_sk()
5257 clone->sk = sk; in skb_clone_sk()
5258 clone->destructor = sock_efree; in skb_clone_sk()
5272 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
5276 serr->ee.ee_errno = ENOMSG; in __skb_complete_tx_timestamp()
5277 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; in __skb_complete_tx_timestamp()
5278 serr->ee.ee_info = tstype; in __skb_complete_tx_timestamp()
5279 serr->opt_stats = opt_stats; in __skb_complete_tx_timestamp()
5280 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
5281 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { in __skb_complete_tx_timestamp()
5282 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
5284 serr->ee.ee_data -= atomic_read(&sk->sk_tskey); in __skb_complete_tx_timestamp()
5300 read_lock_bh(&sk->sk_callback_lock); in skb_may_tx_timestamp()
5301 ret = sk->sk_socket && sk->sk_socket->file && in skb_may_tx_timestamp()
5302 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); in skb_may_tx_timestamp()
5303 read_unlock_bh(&sk->sk_callback_lock); in skb_may_tx_timestamp()
5310 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
5318 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { in skb_complete_tx_timestamp()
5342 tsflags = READ_ONCE(sk->sk_tsflags); in __skb_tstamp_tx()
5344 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) in __skb_tstamp_tx()
5373 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5375 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5390 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, in skb_tstamp_tx()
5398 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5402 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5403 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5407 serr->ee.ee_errno = ENOMSG; in skb_complete_wifi_ack()
5408 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; in skb_complete_wifi_ack()
5413 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { in skb_complete_wifi_ack()
5424 * skb_partial_csum_set - set up and verify partial csum values for packet
5426 * @start: the number of bytes after skb->data to start checksumming.
5429 * For untrusted partially-checksummed packets, we need to make sure the values
5430 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5432 * This function checks and sets those values and skb->ip_summed: if this
5445 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5446 skb->csum_start = csum_start; in skb_partial_csum_set()
5447 skb->csum_offset = off; in skb_partial_csum_set()
5448 skb->transport_header = csum_start; in skb_partial_csum_set()
5462 if (max > skb->len) in skb_maybe_pull_tail()
5463 max = skb->len; in skb_maybe_pull_tail()
5465 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5466 return -ENOMEM; in skb_maybe_pull_tail()
5469 return -EPROTO; in skb_maybe_pull_tail()
5489 err = -EPROTO; in skb_checksum_setup_ip()
5490 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5498 err = -EPROTO; in skb_checksum_setup_ip()
5499 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5502 return ERR_PTR(-EPROTO); in skb_checksum_setup_ip()
5530 err = -EPROTO; in skb_checksum_setup_ipv4()
5535 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5540 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5541 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5542 skb->len - off, in skb_checksum_setup_ipv4()
5543 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5577 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5579 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5595 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5610 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5626 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) in skb_checksum_setup_ipv6()
5629 nexthdr = hp->nexthdr; in skb_checksum_setup_ipv6()
5639 err = -EPROTO; in skb_checksum_setup_ipv6()
5649 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5650 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5651 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5659 * skb_checksum_setup - set up partial checksum offset
5661 * @recalculate: if true the pseudo-header checksum will be recalculated
5667 switch (skb->protocol) { in skb_checksum_setup()
5677 err = -EPROTO; in skb_checksum_setup()
5686 * skb_checksum_maybe_trim - maybe trims the given skb
5693 * (e.g. transport_len exceeds skb length or out-of-memory).
5705 if (skb->len < len) in skb_checksum_maybe_trim()
5707 else if (skb->len == len) in skb_checksum_maybe_trim()
5724 * skb_checksum_trimmed - validate checksum of an skb
5774 skb->dev->name); in __skb_warn_lro_forwarding()
5790 * skb_try_coalesce - try to merge skb to prior one
5800 int i, delta, len = from->len; in skb_try_coalesce()
5807 /* In general, avoid mixing page_pool and non-page_pool allocated in skb_try_coalesce()
5809 * references if @from is cloned and !@to->pp_recycle but its in skb_try_coalesce()
5813 if (to->pp_recycle != from->pp_recycle) in skb_try_coalesce()
5825 if (to_shinfo->frag_list || from_shinfo->frag_list) in skb_try_coalesce()
5834 if (to_shinfo->nr_frags + in skb_try_coalesce()
5835 from_shinfo->nr_frags >= MAX_SKB_FRAGS) in skb_try_coalesce()
5841 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_try_coalesce()
5843 page = virt_to_head_page(from->head); in skb_try_coalesce()
5844 offset = from->data - (unsigned char *)page_address(page); in skb_try_coalesce()
5846 skb_fill_page_desc(to, to_shinfo->nr_frags, in skb_try_coalesce()
5850 if (to_shinfo->nr_frags + in skb_try_coalesce()
5851 from_shinfo->nr_frags > MAX_SKB_FRAGS) in skb_try_coalesce()
5854 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); in skb_try_coalesce()
5859 memcpy(to_shinfo->frags + to_shinfo->nr_frags, in skb_try_coalesce()
5860 from_shinfo->frags, in skb_try_coalesce()
5861 from_shinfo->nr_frags * sizeof(skb_frag_t)); in skb_try_coalesce()
5862 to_shinfo->nr_frags += from_shinfo->nr_frags; in skb_try_coalesce()
5865 from_shinfo->nr_frags = 0; in skb_try_coalesce()
5871 for (i = 0; i < from_shinfo->nr_frags; i++) in skb_try_coalesce()
5872 __skb_frag_ref(&from_shinfo->frags[i]); in skb_try_coalesce()
5875 to->truesize += delta; in skb_try_coalesce()
5876 to->len += len; in skb_try_coalesce()
5877 to->data_len += len; in skb_try_coalesce()
5885 * skb_scrub_packet - scrub an skb
5899 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
5900 skb->skb_iif = 0; in skb_scrub_packet()
5901 skb->ignore_df = 0; in skb_scrub_packet()
5908 skb->offload_fwd_mark = 0; in skb_scrub_packet()
5909 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
5916 skb->mark = 0; in skb_scrub_packet()
5931 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
5934 mac_len - VLAN_HLEN - ETH_TLEN); in skb_reorder_vlan_header()
5939 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
5943 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
5953 /* vlan_tci is already set-up so leave this for another time */ in skb_vlan_untag()
5964 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
5965 vlan_tci = ntohs(vhdr->h_vlan_TCI); in skb_vlan_untag()
5966 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
5991 return -ENOMEM; in skb_ensure_writable()
6002 int needed_headroom = dev->needed_headroom; in skb_ensure_writable_head_tail()
6003 int needed_tailroom = dev->needed_tailroom; in skb_ensure_writable_head_tail()
6010 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) in skb_ensure_writable_head_tail()
6011 needed_tailroom += ETH_ZLEN - skb->len; in skb_ensure_writable_head_tail()
6013 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); in skb_ensure_writable_head_tail()
6014 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); in skb_ensure_writable_head_tail()
6030 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
6034 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", in __skb_vlan_pop()
6036 return -EINVAL; in __skb_vlan_pop()
6043 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
6047 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
6059 * Expects skb->data at mac header.
6070 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6078 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
6081 vlan_proto = skb->protocol; in skb_vlan_pop()
6092 * Expects skb->data at mac header.
6097 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
6101 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", in skb_vlan_push()
6103 return -EINVAL; in skb_vlan_push()
6106 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
6111 skb->protocol = skb->vlan_proto; in skb_vlan_push()
6112 skb->mac_len += VLAN_HLEN; in skb_vlan_push()
6114 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
6122 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
6128 * Expects that skb->data points to the mac header and that no VLAN tags are
6131 * Returns 0 on success, -errno otherwise.
6137 return -EPROTO; in skb_eth_pop()
6148 * skb_eth_push() - Add a new Ethernet header at the head of a packet
6156 * Expects that skb->data points to the mac header, which must be empty.
6158 * Returns 0 on success, -errno otherwise.
6167 return -EPROTO; in skb_eth_push()
6178 ether_addr_copy(eth->h_dest, dst); in skb_eth_push()
6179 ether_addr_copy(eth->h_source, src); in skb_eth_push()
6180 eth->h_proto = skb->protocol; in skb_eth_push()
6192 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
6193 __be16 diff[] = { ~hdr->h_proto, ethertype }; in skb_mod_eth_type()
6195 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
6198 hdr->h_proto = ethertype; in skb_mod_eth_type()
6202 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
6212 * Expects skb->data at mac header.
6214 * Returns 0 on success, -errno otherwise.
6223 return -EINVAL; in skb_mpls_push()
6226 if (skb->encapsulation) in skb_mpls_push()
6227 return -EINVAL; in skb_mpls_push()
6233 if (!skb->inner_protocol) { in skb_mpls_push()
6235 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
6239 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6246 lse->label_stack_entry = mpls_lse; in skb_mpls_push()
6251 skb->protocol = mpls_proto; in skb_mpls_push()
6258 * skb_mpls_pop() - pop the outermost MPLS header
6265 * Expects skb->data at mac header.
6267 * Returns 0 on success, -errno otherwise.
6274 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6293 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6296 skb->protocol = next_proto; in skb_mpls_pop()
6303 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6308 * Expects skb->data at mac header.
6310 * Returns 0 on success, -errno otherwise.
6316 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6317 return -EINVAL; in skb_mpls_update_lse()
6319 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6323 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6324 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6326 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6329 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6336 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6340 * Expects skb->data at mac header.
6342 * Returns 0 on success, -errno otherwise.
6349 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6350 return -EINVAL; in skb_mpls_dec_ttl()
6353 return -ENOMEM; in skb_mpls_dec_ttl()
6355 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6357 if (!--ttl) in skb_mpls_dec_ttl()
6358 return -EINVAL; in skb_mpls_dec_ttl()
6368 * alloc_skb_with_frags - allocate skb with page frags
6389 *errcode = -EMSGSIZE; in alloc_skb_with_frags()
6393 *errcode = -ENOBUFS; in alloc_skb_with_frags()
6399 if (nr_frags == MAX_SKB_FRAGS - 1) in alloc_skb_with_frags()
6402 order--; in alloc_skb_with_frags()
6410 order--; in alloc_skb_with_frags()
6422 skb->truesize += (PAGE_SIZE << order); in alloc_skb_with_frags()
6423 data_len -= chunk; in alloc_skb_with_frags()
6439 int new_hlen = headlen - off; in pskb_carve_inside_header()
6447 return -ENOMEM; in pskb_carve_inside_header()
6452 skb->len -= off; in pskb_carve_inside_header()
6457 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6462 return -ENOMEM; in pskb_carve_inside_header()
6464 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6470 /* we can reuse existing recount- all we did was in pskb_carve_inside_header()
6476 skb->head = data; in pskb_carve_inside_header()
6477 skb->data = data; in pskb_carve_inside_header()
6478 skb->head_frag = 0; in pskb_carve_inside_header()
6482 skb->cloned = 0; in pskb_carve_inside_header()
6483 skb->hdr_len = 0; in pskb_carve_inside_header()
6484 skb->nohdr = 0; in pskb_carve_inside_header()
6485 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6499 struct sk_buff *list = shinfo->frag_list; in pskb_carve_frag_list()
6506 return -EFAULT; in pskb_carve_frag_list()
6508 if (list->len <= eat) { in pskb_carve_frag_list()
6510 eat -= list->len; in pskb_carve_frag_list()
6511 list = list->next; in pskb_carve_frag_list()
6518 return -ENOMEM; in pskb_carve_frag_list()
6519 insp = list->next; in pskb_carve_frag_list()
6527 return -ENOMEM; in pskb_carve_frag_list()
6534 while ((list = shinfo->frag_list) != insp) { in pskb_carve_frag_list()
6535 shinfo->frag_list = list->next; in pskb_carve_frag_list()
6540 clone->next = list; in pskb_carve_frag_list()
6541 shinfo->frag_list = clone; in pskb_carve_frag_list()
6547 * non-linear part of skb
6555 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6563 return -ENOMEM; in pskb_carve_inside_nonlinear()
6570 return -ENOMEM; in pskb_carve_inside_nonlinear()
6574 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6577 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6588 skb_frag_off_add(&shinfo->frags[0], off - pos); in pskb_carve_inside_nonlinear()
6589 skb_frag_size_sub(&shinfo->frags[0], off - pos); in pskb_carve_inside_nonlinear()
6596 shinfo->nr_frags = k; in pskb_carve_inside_nonlinear()
6601 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6602 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ in pskb_carve_inside_nonlinear()
6604 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6606 return -ENOMEM; in pskb_carve_inside_nonlinear()
6610 skb->head = data; in pskb_carve_inside_nonlinear()
6611 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6612 skb->data = data; in pskb_carve_inside_nonlinear()
6616 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6617 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6618 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6619 skb->len -= off; in pskb_carve_inside_nonlinear()
6620 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6621 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6657 * skb_condense - try to get rid of fragments/frag_list if possible
6661 * If packet has bytes in frags and enough tail room in skb->head,
6665 * We do not reallocate skb->head thus can not fail.
6666 * Caller must re-evaluate skb->truesize if needed.
6670 if (skb->data_len) { in skb_condense()
6671 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6676 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6678 /* At this point, skb->truesize might be over estimated, in skb_condense()
6681 * When we pulled its content into skb->head, fragment in skb_condense()
6683 * adjust skb->truesize, not knowing the frag truesize. in skb_condense()
6685 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6692 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); in skb_ext_get_ptr()
6696 * __skb_ext_alloc - allocate a new skb extensions storage
6709 memset(new->offset, 0, sizeof(new->offset)); in __skb_ext_alloc()
6710 refcount_set(&new->refcnt, 1); in __skb_ext_alloc()
6721 if (refcount_read(&old->refcnt) == 1) in skb_ext_maybe_cow()
6728 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); in skb_ext_maybe_cow()
6729 refcount_set(&new->refcnt, 1); in skb_ext_maybe_cow()
6736 for (i = 0; i < sp->len; i++) in skb_ext_maybe_cow()
6737 xfrm_state_hold(sp->xvec[i]); in skb_ext_maybe_cow()
6745 * __skb_ext_set - attach the specified extension storage to this skb
6761 ext->chunks = newlen; in __skb_ext_set()
6762 ext->offset[id] = newoff; in __skb_ext_set()
6763 skb->extensions = ext; in __skb_ext_set()
6764 skb->active_extensions = 1 << id; in __skb_ext_set()
6769 * skb_ext_add - allocate space for given extension, COW if needed
6787 if (skb->active_extensions) { in skb_ext_add()
6788 old = skb->extensions; in skb_ext_add()
6790 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6797 newoff = new->chunks; in skb_ext_add()
6807 new->chunks = newlen; in skb_ext_add()
6808 new->offset[id] = newoff; in skb_ext_add()
6810 skb->slow_gro = 1; in skb_ext_add()
6811 skb->extensions = new; in skb_ext_add()
6812 skb->active_extensions |= 1 << id; in skb_ext_add()
6822 for (i = 0; i < sp->len; i++) in skb_ext_put_sp()
6823 xfrm_state_put(sp->xvec[i]); in skb_ext_put_sp()
6830 if (flow->key) in skb_ext_put_mctp()
6831 mctp_key_unref(flow->key); in skb_ext_put_mctp()
6837 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6839 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6840 if (skb->active_extensions == 0) { in __skb_ext_del()
6841 skb->extensions = NULL; in __skb_ext_del()
6845 refcount_read(&ext->refcnt) == 1) { in __skb_ext_del()
6849 sp->len = 0; in __skb_ext_del()
6860 if (refcount_read(&ext->refcnt) == 1) in __skb_ext_put()
6863 if (!refcount_dec_and_test(&ext->refcnt)) in __skb_ext_put()
6881 * skb_attempt_defer_free - queue skb for remote freeing
6884 * Put @skb in a per-cpu list, using the cpu which
6890 int cpu = skb->alloc_cpu; in skb_attempt_defer_free()
6903 DEBUG_NET_WARN_ON_ONCE(skb->destructor); in skb_attempt_defer_free()
6907 if (READ_ONCE(sd->defer_count) >= defer_max) in skb_attempt_defer_free()
6910 spin_lock_bh(&sd->defer_lock); in skb_attempt_defer_free()
6912 kick = sd->defer_count == (defer_max >> 1); in skb_attempt_defer_free()
6914 WRITE_ONCE(sd->defer_count, sd->defer_count + 1); in skb_attempt_defer_free()
6916 skb->next = sd->defer_list; in skb_attempt_defer_free()
6918 WRITE_ONCE(sd->defer_list, skb); in skb_attempt_defer_free()
6919 spin_unlock_bh(&sd->defer_lock); in skb_attempt_defer_free()
6924 if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) in skb_attempt_defer_free()
6925 smp_call_function_single_async(cpu, &sd->defer_csd); in skb_attempt_defer_free()
6937 skb->csum = csum_block_add(skb->csum, csum, skb->len); in skb_splice_csum_page()
6941 * skb_splice_from_iter - Splice (or copy) pages to skbuff
6952 * Returns the amount of data spliced/copied or -EMSGSIZE if there's
6963 while (iter->count > 0) { in skb_splice_from_iter()
6967 ret = -EMSGSIZE; in skb_splice_from_iter()
6968 space = frag_limit - skb_shinfo(skb)->nr_frags; in skb_splice_from_iter()
6977 ret = len ?: -EIO; in skb_splice_from_iter()
6984 size_t part = min_t(size_t, PAGE_SIZE - off, len); in skb_splice_from_iter()
6986 ret = -EIO; in skb_splice_from_iter()
6997 if (skb->ip_summed == CHECKSUM_NONE) in skb_splice_from_iter()
7002 maxsize -= part; in skb_splice_from_iter()
7003 len -= part; in skb_splice_from_iter()
7043 if (WARN_ON_ONCE(!i->data_source)) in csum_and_copy_from_iter_full()