Lines Matching full:skb

44  * DOC: skb checksums
105 * not in skb->csum. Thus, skb->csum is undefined in this case.
133 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
145 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
155 * referred to by skb->csum_start + skb->csum_offset and any preceding
194 * The skb was already checksummed by the protocol, or a checksum is not
253 /* Maximum value in skb->csum_level */
260 /* For X bytes available in skb->head, what is the minimal
272 /* return minimum truesize of one skb containing X bytes of data */
311 * skb is out in neigh layer.
357 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
369 * skb_frag_size() - Returns the size of a skb fragment
370 * @frag: skb fragment
378 * skb_frag_size_set() - Sets the size of a skb fragment
379 * @frag: skb fragment
388 * skb_frag_size_add() - Increments the size of a skb fragment by @delta
389 * @frag: skb fragment
398 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
399 * @frag: skb fragment
423 * @f: skb frag to operate on
455 * skb->tstamp.
539 int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg);
543 * The callback notifies userspace to release buffers when skb DMA is done in
544 * lower device, the skb last reference should be 0 when calling this.
578 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) argument
595 * the end of the header data, ie. at skb->end.
626 * remains valid until skb destructor.
643 * skb_header_cloned() checks if skb is allowed to add / write the headers.
645 * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr
646 * (via __skb_header_release()). Any clone created from marked skb will get
651 * <alloc skb>
658 * doing the right thing. In practice there's usually only one payload-only skb.
667 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
668 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
669 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
675 /* This indicates the skb is from an untrusted source. */
743 * this is the part of the skb operated on by the common helpers
748 * Optionally &skb_shared_info.frag_list may point to another skb.
791 * @hdr_len: writable header length of cloned skb
793 * @csum_start: Offset from skb->head where checksumming should start
825 * @head_frag: skb was allocated from page fragments,
847 * @unreadable: indicates that at least 1 of the fragments in this skb is
850 * @decrypted: Decrypted SKB
852 * @tstamp_type: When set, skb->tstamp has the
853 * delivery_time clock base of skb->tstamp.
854 * @napi_id: id of the NAPI struct this skb came from
856 * @alloc_cpu: CPU which did the skb allocation.
866 * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
914 * first. This is owned by whoever has the skb queued ATM.
921 void (*destructor)(struct sk_buff *skb);
1134 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
1135 * @skb: buffer
1137 static inline bool skb_pfmemalloc(const struct sk_buff *skb) in skb_pfmemalloc() argument
1139 return unlikely(skb->pfmemalloc); in skb_pfmemalloc()
1143 * skb might have a dst pointer attached, refcounted or not.
1150 * skb_dst - returns skb dst_entry
1151 * @skb: buffer
1153 * Returns: skb dst_entry, regardless of reference taken or not.
1155 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) in skb_dst() argument
1160 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst()
1163 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst()
1167 * skb_dst_set - sets skb dst
1168 * @skb: buffer
1171 * Sets skb dst, assuming a reference was taken on dst and should
1174 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set() argument
1176 skb->slow_gro |= !!dst; in skb_dst_set()
1177 skb->_skb_refdst = (unsigned long)dst; in skb_dst_set()
1181 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
1182 * @skb: buffer
1185 * Sets skb dst, assuming a reference was not taken on dst.
1190 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set_noref() argument
1193 skb->slow_gro |= !!dst; in skb_dst_set_noref()
1194 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; in skb_dst_set_noref()
1198 * skb_dst_is_noref - Test if skb dst isn't refcounted
1199 * @skb: buffer
1201 static inline bool skb_dst_is_noref(const struct sk_buff *skb) in skb_dst_is_noref() argument
1203 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); in skb_dst_is_noref()
1206 /* For mangling skb->pkt_type from user space side from applications
1216 * skb_napi_id - Returns the skb's NAPI id
1217 * @skb: buffer
1219 static inline unsigned int skb_napi_id(const struct sk_buff *skb) in skb_napi_id() argument
1222 return skb->napi_id; in skb_napi_id()
1228 static inline bool skb_wifi_acked_valid(const struct sk_buff *skb) in skb_wifi_acked_valid() argument
1231 return skb->wifi_acked_valid; in skb_wifi_acked_valid()
1238 * skb_unref - decrement the skb's reference count
1239 * @skb: buffer
1241 * Returns: true if we can free the skb.
1243 static inline bool skb_unref(struct sk_buff *skb) in skb_unref() argument
1245 if (unlikely(!skb)) in skb_unref()
1247 if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1)) in skb_unref()
1249 else if (likely(!refcount_dec_and_test(&skb->users))) in skb_unref()
1255 static inline bool skb_data_unref(const struct sk_buff *skb, in skb_data_unref() argument
1260 if (!skb->cloned) in skb_data_unref()
1263 bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; in skb_data_unref()
1273 void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
1277 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) in kfree_skb_reason() argument
1279 sk_skb_reason_drop(NULL, skb, reason); in kfree_skb_reason()
1284 * @skb: buffer to free
1286 static inline void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
1288 kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); in kfree_skb()
1291 void skb_release_head_state(struct sk_buff *skb);
1294 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1295 void skb_tx_error(struct sk_buff *skb);
1303 void consume_skb(struct sk_buff *skb);
1305 static inline void consume_skb(struct sk_buff *skb) in consume_skb() argument
1307 return kfree_skb(skb); in consume_skb()
1311 void __consume_stateless_skb(struct sk_buff *skb);
1312 void __kfree_skb(struct sk_buff *skb);
1314 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1322 struct sk_buff *build_skb_around(struct sk_buff *skb,
1324 void skb_attempt_defer_free(struct sk_buff *skb);
1362 * @skb: buffer
1364 * Returns: true if skb is a fast clone, and its clone is not freed.
1369 const struct sk_buff *skb) in skb_fclone_busy() argument
1373 fclones = container_of(skb, struct sk_buff_fclones, skb1); in skb_fclone_busy()
1375 return skb->fclone == SKB_FCLONE_ORIG && in skb_fclone_busy()
1394 void skb_headers_offset_update(struct sk_buff *skb, int off);
1395 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1396 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1398 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1399 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1401 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, in __pskb_copy() argument
1404 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); in __pskb_copy()
1407 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1408 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1410 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
1411 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1413 int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1415 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1417 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1418 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1421 * skb_pad - zero pad the tail of an skb
1422 * @skb: buffer to pad
1429 * May return error in out of memory cases. The skb is freed on error.
1431 static inline int skb_pad(struct sk_buff *skb, int pad) in skb_pad() argument
1433 return __skb_pad(skb, pad, true); in skb_pad()
1437 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1451 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1458 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1494 static inline void skb_clear_hash(struct sk_buff *skb) in skb_clear_hash() argument
1496 skb->hash = 0; in skb_clear_hash()
1497 skb->sw_hash = 0; in skb_clear_hash()
1498 skb->l4_hash = 0; in skb_clear_hash()
1501 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) in skb_clear_hash_if_not_l4() argument
1503 if (!skb->l4_hash) in skb_clear_hash_if_not_l4()
1504 skb_clear_hash(skb); in skb_clear_hash_if_not_l4()
1508 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) in __skb_set_hash() argument
1510 skb->l4_hash = is_l4; in __skb_set_hash()
1511 skb->sw_hash = is_sw; in __skb_set_hash()
1512 skb->hash = hash; in __skb_set_hash()
1516 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) in skb_set_hash() argument
1519 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); in skb_set_hash()
1523 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) in __skb_set_sw_hash() argument
1525 __skb_set_hash(skb, hash, true, is_l4); in __skb_set_sw_hash()
1528 u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb);
1530 static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb) in __skb_get_hash_symmetric() argument
1532 return __skb_get_hash_symmetric_net(NULL, skb); in __skb_get_hash_symmetric()
1535 void __skb_get_hash_net(const struct net *net, struct sk_buff *skb);
1536 u32 skb_get_poff(const struct sk_buff *skb);
1537 u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1539 __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1551 const struct sk_buff *skb,
1556 static inline bool skb_flow_dissect(const struct sk_buff *skb, in skb_flow_dissect() argument
1560 return __skb_flow_dissect(NULL, skb, flow_dissector, in skb_flow_dissect()
1564 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, in skb_flow_dissect_flow_keys() argument
1569 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector, in skb_flow_dissect_flow_keys()
1575 const struct sk_buff *skb, in skb_flow_dissect_flow_keys_basic() argument
1581 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, in skb_flow_dissect_flow_keys_basic()
1585 void skb_flow_dissect_meta(const struct sk_buff *skb,
1589 /* Gets a skb connection tracking info, ctinfo map should be a
1594 skb_flow_dissect_ct(const struct sk_buff *skb,
1600 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1604 void skb_flow_dissect_hash(const struct sk_buff *skb,
1608 static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb) in skb_get_hash_net() argument
1610 if (!skb->l4_hash && !skb->sw_hash) in skb_get_hash_net()
1611 __skb_get_hash_net(net, skb); in skb_get_hash_net()
1613 return skb->hash; in skb_get_hash_net()
1616 static inline __u32 skb_get_hash(struct sk_buff *skb) in skb_get_hash() argument
1618 if (!skb->l4_hash && !skb->sw_hash) in skb_get_hash()
1619 __skb_get_hash_net(NULL, skb); in skb_get_hash()
1621 return skb->hash; in skb_get_hash()
1624 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) in skb_get_hash_flowi6() argument
1626 if (!skb->l4_hash && !skb->sw_hash) { in skb_get_hash_flowi6()
1630 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); in skb_get_hash_flowi6()
1633 return skb->hash; in skb_get_hash_flowi6()
1636 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1639 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) in skb_get_hash_raw() argument
1641 return skb->hash; in skb_get_hash_raw()
1661 static inline bool skb_is_decrypted(const struct sk_buff *skb) in skb_is_decrypted() argument
1664 return skb->decrypted; in skb_is_decrypted()
1679 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) in skb_end_pointer() argument
1681 return skb->head + skb->end; in skb_end_pointer()
1684 static inline unsigned int skb_end_offset(const struct sk_buff *skb) in skb_end_offset() argument
1686 return skb->end; in skb_end_offset()
1689 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) in skb_set_end_offset() argument
1691 skb->end = offset; in skb_set_end_offset()
1694 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) in skb_end_pointer() argument
1696 return skb->end; in skb_end_pointer()
1699 static inline unsigned int skb_end_offset(const struct sk_buff *skb) in skb_end_offset() argument
1701 return skb->end - skb->head; in skb_end_offset()
1704 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) in skb_set_end_offset() argument
1706 skb->end = skb->head + offset; in skb_set_end_offset()
1718 struct sk_buff *skb, struct iov_iter *from,
1721 int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
1724 static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, in skb_zerocopy_iter_dgram() argument
1727 return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_dgram()
1730 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1735 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) argument
1737 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) in skb_hwtstamps() argument
1739 return &skb_shinfo(skb)->hwtstamps; in skb_hwtstamps()
1742 static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) in skb_zcopy() argument
1744 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; in skb_zcopy()
1746 return is_zcopy ? skb_uarg(skb) : NULL; in skb_zcopy()
1749 static inline bool skb_zcopy_pure(const struct sk_buff *skb) in skb_zcopy_pure() argument
1751 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; in skb_zcopy_pure()
1754 static inline bool skb_zcopy_managed(const struct sk_buff *skb) in skb_zcopy_managed() argument
1756 return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; in skb_zcopy_managed()
1770 static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg) in skb_zcopy_init() argument
1772 skb_shinfo(skb)->destructor_arg = uarg; in skb_zcopy_init()
1773 skb_shinfo(skb)->flags |= uarg->flags; in skb_zcopy_init()
1776 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, in skb_zcopy_set() argument
1779 if (skb && uarg && !skb_zcopy(skb)) { in skb_zcopy_set()
1784 skb_zcopy_init(skb, uarg); in skb_zcopy_set()
1788 static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) in skb_zcopy_set_nouarg() argument
1790 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); in skb_zcopy_set_nouarg()
1791 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; in skb_zcopy_set_nouarg()
1794 static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) in skb_zcopy_is_nouarg() argument
1796 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; in skb_zcopy_is_nouarg()
1799 static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) in skb_zcopy_get_nouarg() argument
1801 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); in skb_zcopy_get_nouarg()
1821 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) in skb_zcopy_clear() argument
1823 struct ubuf_info *uarg = skb_zcopy(skb); in skb_zcopy_clear()
1826 if (!skb_zcopy_is_nouarg(skb)) in skb_zcopy_clear()
1827 uarg->ops->complete(skb, uarg, zerocopy_success); in skb_zcopy_clear()
1829 skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; in skb_zcopy_clear()
1833 void __skb_zcopy_downgrade_managed(struct sk_buff *skb);
1835 static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb) in skb_zcopy_downgrade_managed() argument
1837 if (unlikely(skb_zcopy_managed(skb))) in skb_zcopy_downgrade_managed()
1838 __skb_zcopy_downgrade_managed(skb); in skb_zcopy_downgrade_managed()
1841 /* Return true if frags in this skb are readable by the host. */
1842 static inline bool skb_frags_readable(const struct sk_buff *skb) in skb_frags_readable() argument
1844 return !skb->unreadable; in skb_frags_readable()
1847 static inline void skb_mark_not_on_list(struct sk_buff *skb) in skb_mark_not_on_list() argument
1849 skb->next = NULL; in skb_mark_not_on_list()
1852 static inline void skb_poison_list(struct sk_buff *skb) in skb_poison_list() argument
1855 skb->next = SKB_LIST_POISON_NEXT; in skb_poison_list()
1859 /* Iterate through singly-linked GSO fragments of an skb. */
1860 #define skb_list_walk_safe(first, skb, next_skb) \ argument
1861 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1862 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1864 static inline void skb_list_del_init(struct sk_buff *skb) in skb_list_del_init() argument
1866 __list_del_entry(&skb->list); in skb_list_del_init()
1867 skb_mark_not_on_list(skb); in skb_list_del_init()
1895 * skb_queue_is_last - check if skb is the last entry in the queue
1897 * @skb: buffer
1899 * Returns true if @skb is the last buffer on the list.
1902 const struct sk_buff *skb) in skb_queue_is_last() argument
1904 return skb->next == (const struct sk_buff *) list; in skb_queue_is_last()
1908 * skb_queue_is_first - check if skb is the first entry in the queue
1910 * @skb: buffer
1912 * Returns true if @skb is the first buffer on the list.
1915 const struct sk_buff *skb) in skb_queue_is_first() argument
1917 return skb->prev == (const struct sk_buff *) list; in skb_queue_is_first()
1923 * @skb: current buffer
1925 * Return the next packet in @list after @skb. It is only valid to
1929 const struct sk_buff *skb) in skb_queue_next() argument
1934 BUG_ON(skb_queue_is_last(list, skb)); in skb_queue_next()
1935 return skb->next; in skb_queue_next()
1941 * @skb: current buffer
1943 * Return the prev packet in @list before @skb. It is only valid to
1947 const struct sk_buff *skb) in skb_queue_prev() argument
1952 BUG_ON(skb_queue_is_first(list, skb)); in skb_queue_prev()
1953 return skb->prev; in skb_queue_prev()
1958 * @skb: buffer to reference
1963 static inline struct sk_buff *skb_get(struct sk_buff *skb) in skb_get() argument
1965 refcount_inc(&skb->users); in skb_get()
1966 return skb; in skb_get()
1975 * @skb: buffer to check
1981 static inline int skb_cloned(const struct sk_buff *skb) in skb_cloned() argument
1983 return skb->cloned && in skb_cloned()
1984 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; in skb_cloned()
1987 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) in skb_unclone() argument
1991 if (skb_cloned(skb)) in skb_unclone()
1992 return pskb_expand_head(skb, 0, 0, pri); in skb_unclone()
1997 /* This variant of skb_unclone() makes sure skb->truesize
1998 * and skb_end_offset() are not changed, whenever a new skb->head is needed.
2003 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
2004 static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in skb_unclone_keeptruesize() argument
2008 if (skb_cloned(skb)) in skb_unclone_keeptruesize()
2009 return __skb_unclone_keeptruesize(skb, pri); in skb_unclone_keeptruesize()
2015 * @skb: buffer to check
2020 static inline int skb_header_cloned(const struct sk_buff *skb) in skb_header_cloned() argument
2024 if (!skb->cloned) in skb_header_cloned()
2027 dataref = atomic_read(&skb_shinfo(skb)->dataref); in skb_header_cloned()
2032 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) in skb_header_unclone() argument
2036 if (skb_header_cloned(skb)) in skb_header_unclone()
2037 return pskb_expand_head(skb, 0, 0, pri); in skb_header_unclone()
2044 * @skb: buffer to operate on
2048 static inline void __skb_header_release(struct sk_buff *skb) in __skb_header_release() argument
2050 skb->nohdr = 1; in __skb_header_release()
2051 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); in __skb_header_release()
2057 * @skb: buffer to check
2062 static inline int skb_shared(const struct sk_buff *skb) in skb_shared() argument
2064 return refcount_read(&skb->users) != 1; in skb_shared()
2069 * @skb: buffer to check
2080 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) in skb_share_check() argument
2083 if (skb_shared(skb)) { in skb_share_check()
2084 struct sk_buff *nskb = skb_clone(skb, pri); in skb_share_check()
2087 consume_skb(skb); in skb_share_check()
2089 kfree_skb(skb); in skb_share_check()
2090 skb = nskb; in skb_share_check()
2092 return skb; in skb_share_check()
2104 * @skb: buffer to check
2115 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, in skb_unshare() argument
2119 if (skb_cloned(skb)) { in skb_unshare()
2120 struct sk_buff *nskb = skb_copy(skb, pri); in skb_unshare()
2124 consume_skb(skb); in skb_unshare()
2126 kfree_skb(skb); in skb_unshare()
2127 skb = nskb; in skb_unshare()
2129 return skb; in skb_unshare()
2147 struct sk_buff *skb = list_->next; in skb_peek() local
2149 if (skb == (struct sk_buff *)list_) in skb_peek()
2150 skb = NULL; in skb_peek()
2151 return skb; in skb_peek()
2166 * skb_peek_next - peek skb following the given one from a queue
2167 * @skb: skb to start from
2174 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, in skb_peek_next() argument
2177 struct sk_buff *next = skb->next; in skb_peek_next()
2199 struct sk_buff *skb = READ_ONCE(list_->prev); in skb_peek_tail() local
2201 if (skb == (struct sk_buff *)list_) in skb_peek_tail()
2202 skb = NULL; in skb_peek_tail()
2203 return skb; in skb_peek_tail()
2248 * this is needed for now since a whole lot of users of the skb-queue
2302 * skb_queue_splice - join two skb lists, this is designed for stacks
2316 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
2333 * skb_queue_splice_tail - join two skb lists, each list being a queue
2347 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
2430 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2431 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in __skb_unlink() argument
2436 next = skb->next; in __skb_unlink()
2437 prev = skb->prev; in __skb_unlink()
2438 skb->next = skb->prev = NULL; in __skb_unlink()
2453 struct sk_buff *skb = skb_peek(list); in __skb_dequeue() local
2454 if (skb) in __skb_dequeue()
2455 __skb_unlink(skb, list); in __skb_dequeue()
2456 return skb; in __skb_dequeue()
2470 struct sk_buff *skb = skb_peek_tail(list); in __skb_dequeue_tail() local
2471 if (skb) in __skb_dequeue_tail()
2472 __skb_unlink(skb, list); in __skb_dequeue_tail()
2473 return skb; in __skb_dequeue_tail()
2478 static inline bool skb_is_nonlinear(const struct sk_buff *skb) in skb_is_nonlinear() argument
2480 return skb->data_len; in skb_is_nonlinear()
2483 static inline unsigned int skb_headlen(const struct sk_buff *skb) in skb_headlen() argument
2485 return skb->len - skb->data_len; in skb_headlen()
2488 static inline unsigned int __skb_pagelen(const struct sk_buff *skb) in __skb_pagelen() argument
2492 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) in __skb_pagelen()
2493 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_pagelen()
2497 static inline unsigned int skb_pagelen(const struct sk_buff *skb) in skb_pagelen() argument
2499 return skb_headlen(skb) + __skb_pagelen(skb); in skb_pagelen()
2536 * skb_len_add - adds a number to len fields of skb
2537 * @skb: buffer to add len to
2540 static inline void skb_len_add(struct sk_buff *skb, int delta) in skb_len_add() argument
2542 skb->len += delta; in skb_len_add()
2543 skb->data_len += delta; in skb_len_add()
2544 skb->truesize += delta; in skb_len_add()
2548 * __skb_fill_netmem_desc - initialise a fragment in an skb
2549 * @skb: buffer containing fragment to be initialised
2555 * Initialises the @i'th fragment of @skb to point to &size bytes at
2560 static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i, in __skb_fill_netmem_desc() argument
2565 __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size); in __skb_fill_netmem_desc()
2568 skb->unreadable = true; in __skb_fill_netmem_desc()
2574 /* Propagate page pfmemalloc to the skb if we can. The problem is in __skb_fill_netmem_desc()
2580 skb->pfmemalloc = true; in __skb_fill_netmem_desc()
2583 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, in __skb_fill_page_desc() argument
2586 __skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size); in __skb_fill_page_desc()
2589 static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i, in skb_fill_netmem_desc() argument
2592 __skb_fill_netmem_desc(skb, i, netmem, off, size); in skb_fill_netmem_desc()
2593 skb_shinfo(skb)->nr_frags = i + 1; in skb_fill_netmem_desc()
2597 * skb_fill_page_desc - initialise a paged fragment in an skb
2598 * @skb: buffer containing fragment to be initialised
2605 * @skb to point to @size bytes at offset @off within @page. In
2606 * addition updates @skb such that @i is the last fragment.
2610 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, in skb_fill_page_desc() argument
2613 skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size); in skb_fill_page_desc()
2617 * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
2618 * @skb: buffer containing fragment to be initialised
2627 static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i, in skb_fill_page_desc_noacc() argument
2631 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_fill_page_desc_noacc()
2637 void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
2640 static inline void skb_add_rx_frag(struct sk_buff *skb, int i, in skb_add_rx_frag() argument
2644 skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size, in skb_add_rx_frag()
2648 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2651 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) argument
2654 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) in skb_tail_pointer() argument
2656 return skb->head + skb->tail; in skb_tail_pointer()
2659 static inline void skb_reset_tail_pointer(struct sk_buff *skb) in skb_reset_tail_pointer() argument
2661 skb->tail = skb->data - skb->head; in skb_reset_tail_pointer()
2664 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) in skb_set_tail_pointer() argument
2666 skb_reset_tail_pointer(skb); in skb_set_tail_pointer()
2667 skb->tail += offset; in skb_set_tail_pointer()
2671 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) in skb_tail_pointer() argument
2673 return skb->tail; in skb_tail_pointer()
2676 static inline void skb_reset_tail_pointer(struct sk_buff *skb) in skb_reset_tail_pointer() argument
2678 skb->tail = skb->data; in skb_reset_tail_pointer()
2681 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) in skb_set_tail_pointer() argument
2683 skb->tail = skb->data + offset; in skb_set_tail_pointer()
2688 static inline void skb_assert_len(struct sk_buff *skb) in skb_assert_len() argument
2691 if (WARN_ONCE(!skb->len, "%s\n", __func__)) in skb_assert_len()
2692 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); in skb_assert_len()
2697 void skb_might_realloc(struct sk_buff *skb);
2699 static inline void skb_might_realloc(struct sk_buff *skb) {} in skb_might_realloc() argument
2705 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2706 void *skb_put(struct sk_buff *skb, unsigned int len);
2707 static inline void *__skb_put(struct sk_buff *skb, unsigned int len) in __skb_put() argument
2709 void *tmp = skb_tail_pointer(skb); in __skb_put()
2710 SKB_LINEAR_ASSERT(skb); in __skb_put()
2711 skb->tail += len; in __skb_put()
2712 skb->len += len; in __skb_put()
2716 static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len) in __skb_put_zero() argument
2718 void *tmp = __skb_put(skb, len); in __skb_put_zero()
2724 static inline void *__skb_put_data(struct sk_buff *skb, const void *data, in __skb_put_data() argument
2727 void *tmp = __skb_put(skb, len); in __skb_put_data()
2733 static inline void __skb_put_u8(struct sk_buff *skb, u8 val) in __skb_put_u8() argument
2735 *(u8 *)__skb_put(skb, 1) = val; in __skb_put_u8()
2738 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) in skb_put_zero() argument
2740 void *tmp = skb_put(skb, len); in skb_put_zero()
2747 static inline void *skb_put_data(struct sk_buff *skb, const void *data, in skb_put_data() argument
2750 void *tmp = skb_put(skb, len); in skb_put_data()
2757 static inline void skb_put_u8(struct sk_buff *skb, u8 val) in skb_put_u8() argument
2759 *(u8 *)skb_put(skb, 1) = val; in skb_put_u8()
2762 void *skb_push(struct sk_buff *skb, unsigned int len);
2763 static inline void *__skb_push(struct sk_buff *skb, unsigned int len) in __skb_push() argument
2767 skb->data -= len; in __skb_push()
2768 skb->len += len; in __skb_push()
2769 return skb->data; in __skb_push()
2772 void *skb_pull(struct sk_buff *skb, unsigned int len);
2773 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) in __skb_pull() argument
2777 skb->len -= len; in __skb_pull()
2778 if (unlikely(skb->len < skb->data_len)) { in __skb_pull()
2780 skb->len += len; in __skb_pull()
2782 skb_dump(KERN_ERR, skb, false); in __skb_pull()
2786 return skb->data += len; in __skb_pull()
2789 static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) in skb_pull_inline() argument
2791 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); in skb_pull_inline()
2794 void *skb_pull_data(struct sk_buff *skb, size_t len);
2796 void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2799 pskb_may_pull_reason(struct sk_buff *skb, unsigned int len) in pskb_may_pull_reason() argument
2802 skb_might_realloc(skb); in pskb_may_pull_reason()
2804 if (likely(len <= skb_headlen(skb))) in pskb_may_pull_reason()
2807 if (unlikely(len > skb->len)) in pskb_may_pull_reason()
2810 if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb)))) in pskb_may_pull_reason()
2816 static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) in pskb_may_pull() argument
2818 return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; in pskb_may_pull()
2821 static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) in pskb_pull() argument
2823 if (!pskb_may_pull(skb, len)) in pskb_pull()
2826 skb->len -= len; in pskb_pull()
2827 return skb->data += len; in pskb_pull()
2830 void skb_condense(struct sk_buff *skb);
2834 * @skb: buffer to check
2838 static inline unsigned int skb_headroom(const struct sk_buff *skb) in skb_headroom() argument
2840 return skb->data - skb->head; in skb_headroom()
2845 * @skb: buffer to check
2849 static inline int skb_tailroom(const struct sk_buff *skb) in skb_tailroom() argument
2851 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; in skb_tailroom()
2856 * @skb: buffer to check
2861 static inline int skb_availroom(const struct sk_buff *skb) in skb_availroom() argument
2863 if (skb_is_nonlinear(skb)) in skb_availroom()
2866 return skb->end - skb->tail - skb->reserved_tailroom; in skb_availroom()
2871 * @skb: buffer to alter
2877 static inline void skb_reserve(struct sk_buff *skb, int len) in skb_reserve() argument
2879 skb->data += len; in skb_reserve()
2880 skb->tail += len; in skb_reserve()
2885 * @skb: buffer to alter
2895 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, in skb_tailroom_reserve() argument
2898 SKB_LINEAR_ASSERT(skb); in skb_tailroom_reserve()
2899 if (mtu < skb_tailroom(skb) - needed_tailroom) in skb_tailroom_reserve()
2901 skb->reserved_tailroom = skb_tailroom(skb) - mtu; in skb_tailroom_reserve()
2904 skb->reserved_tailroom = needed_tailroom; in skb_tailroom_reserve()
2910 static inline void skb_set_inner_protocol(struct sk_buff *skb, in skb_set_inner_protocol() argument
2913 skb->inner_protocol = protocol; in skb_set_inner_protocol()
2914 skb->inner_protocol_type = ENCAP_TYPE_ETHER; in skb_set_inner_protocol()
2917 static inline void skb_set_inner_ipproto(struct sk_buff *skb, in skb_set_inner_ipproto() argument
2920 skb->inner_ipproto = ipproto; in skb_set_inner_ipproto()
2921 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; in skb_set_inner_ipproto()
2924 static inline void skb_reset_inner_headers(struct sk_buff *skb) in skb_reset_inner_headers() argument
2926 skb->inner_mac_header = skb->mac_header; in skb_reset_inner_headers()
2927 skb->inner_network_header = skb->network_header; in skb_reset_inner_headers()
2928 skb->inner_transport_header = skb->transport_header; in skb_reset_inner_headers()
2931 static inline int skb_mac_header_was_set(const struct sk_buff *skb) in skb_mac_header_was_set() argument
2933 return skb->mac_header != (typeof(skb->mac_header))~0U; in skb_mac_header_was_set()
2936 static inline void skb_reset_mac_len(struct sk_buff *skb) in skb_reset_mac_len() argument
2938 if (!skb_mac_header_was_set(skb)) { in skb_reset_mac_len()
2940 skb->mac_len = 0; in skb_reset_mac_len()
2942 skb->mac_len = skb->network_header - skb->mac_header; in skb_reset_mac_len()
2947 *skb) in skb_inner_transport_header()
2949 return skb->head + skb->inner_transport_header; in skb_inner_transport_header()
2952 static inline int skb_inner_transport_offset(const struct sk_buff *skb) in skb_inner_transport_offset() argument
2954 return skb_inner_transport_header(skb) - skb->data; in skb_inner_transport_offset()
2957 static inline void skb_reset_inner_transport_header(struct sk_buff *skb) in skb_reset_inner_transport_header() argument
2959 long offset = skb->data - skb->head; in skb_reset_inner_transport_header()
2961 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset); in skb_reset_inner_transport_header()
2962 skb->inner_transport_header = offset; in skb_reset_inner_transport_header()
2965 static inline void skb_set_inner_transport_header(struct sk_buff *skb, in skb_set_inner_transport_header() argument
2968 skb_reset_inner_transport_header(skb); in skb_set_inner_transport_header()
2969 skb->inner_transport_header += offset; in skb_set_inner_transport_header()
2972 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) in skb_inner_network_header() argument
2974 return skb->head + skb->inner_network_header; in skb_inner_network_header()
2977 static inline void skb_reset_inner_network_header(struct sk_buff *skb) in skb_reset_inner_network_header() argument
2979 long offset = skb->data - skb->head; in skb_reset_inner_network_header()
2981 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset); in skb_reset_inner_network_header()
2982 skb->inner_network_header = offset; in skb_reset_inner_network_header()
2985 static inline void skb_set_inner_network_header(struct sk_buff *skb, in skb_set_inner_network_header() argument
2988 skb_reset_inner_network_header(skb); in skb_set_inner_network_header()
2989 skb->inner_network_header += offset; in skb_set_inner_network_header()
2992 static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb) in skb_inner_network_header_was_set() argument
2994 return skb->inner_network_header > 0; in skb_inner_network_header_was_set()
2997 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) in skb_inner_mac_header() argument
2999 return skb->head + skb->inner_mac_header; in skb_inner_mac_header()
3002 static inline void skb_reset_inner_mac_header(struct sk_buff *skb) in skb_reset_inner_mac_header() argument
3004 long offset = skb->data - skb->head; in skb_reset_inner_mac_header()
3006 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset); in skb_reset_inner_mac_header()
3007 skb->inner_mac_header = offset; in skb_reset_inner_mac_header()
3010 static inline void skb_set_inner_mac_header(struct sk_buff *skb, in skb_set_inner_mac_header() argument
3013 skb_reset_inner_mac_header(skb); in skb_set_inner_mac_header()
3014 skb->inner_mac_header += offset; in skb_set_inner_mac_header()
3016 static inline bool skb_transport_header_was_set(const struct sk_buff *skb) in skb_transport_header_was_set() argument
3018 return skb->transport_header != (typeof(skb->transport_header))~0U; in skb_transport_header_was_set()
3021 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) in skb_transport_header() argument
3023 DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb)); in skb_transport_header()
3024 return skb->head + skb->transport_header; in skb_transport_header()
3027 static inline void skb_reset_transport_header(struct sk_buff *skb) in skb_reset_transport_header() argument
3029 long offset = skb->data - skb->head; in skb_reset_transport_header()
3031 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset); in skb_reset_transport_header()
3032 skb->transport_header = offset; in skb_reset_transport_header()
3035 static inline void skb_set_transport_header(struct sk_buff *skb, in skb_set_transport_header() argument
3038 skb_reset_transport_header(skb); in skb_set_transport_header()
3039 skb->transport_header += offset; in skb_set_transport_header()
3042 static inline unsigned char *skb_network_header(const struct sk_buff *skb) in skb_network_header() argument
3044 return skb->head + skb->network_header; in skb_network_header()
3047 static inline void skb_reset_network_header(struct sk_buff *skb) in skb_reset_network_header() argument
3049 long offset = skb->data - skb->head; in skb_reset_network_header()
3051 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset); in skb_reset_network_header()
3052 skb->network_header = offset; in skb_reset_network_header()
3055 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) in skb_set_network_header() argument
3057 skb_reset_network_header(skb); in skb_set_network_header()
3058 skb->network_header += offset; in skb_set_network_header()
3061 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) in skb_mac_header() argument
3063 DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); in skb_mac_header()
3064 return skb->head + skb->mac_header; in skb_mac_header()
3067 static inline int skb_mac_offset(const struct sk_buff *skb) in skb_mac_offset() argument
3069 return skb_mac_header(skb) - skb->data; in skb_mac_offset()
3072 static inline u32 skb_mac_header_len(const struct sk_buff *skb) in skb_mac_header_len() argument
3074 DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); in skb_mac_header_len()
3075 return skb->network_header - skb->mac_header; in skb_mac_header_len()
3078 static inline void skb_unset_mac_header(struct sk_buff *skb) in skb_unset_mac_header() argument
3080 skb->mac_header = (typeof(skb->mac_header))~0U; in skb_unset_mac_header()
3083 static inline void skb_reset_mac_header(struct sk_buff *skb) in skb_reset_mac_header() argument
3085 long offset = skb->data - skb->head; in skb_reset_mac_header()
3087 DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset); in skb_reset_mac_header()
3088 skb->mac_header = offset; in skb_reset_mac_header()
3091 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) in skb_set_mac_header() argument
3093 skb_reset_mac_header(skb); in skb_set_mac_header()
3094 skb->mac_header += offset; in skb_set_mac_header()
3097 static inline void skb_pop_mac_header(struct sk_buff *skb) in skb_pop_mac_header() argument
3099 skb->mac_header = skb->network_header; in skb_pop_mac_header()
3102 static inline void skb_probe_transport_header(struct sk_buff *skb) in skb_probe_transport_header() argument
3106 if (skb_transport_header_was_set(skb)) in skb_probe_transport_header()
3109 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, in skb_probe_transport_header()
3111 skb_set_transport_header(skb, keys.control.thoff); in skb_probe_transport_header()
3114 static inline void skb_mac_header_rebuild(struct sk_buff *skb) in skb_mac_header_rebuild() argument
3116 if (skb_mac_header_was_set(skb)) { in skb_mac_header_rebuild()
3117 const unsigned char *old_mac = skb_mac_header(skb); in skb_mac_header_rebuild()
3119 skb_set_mac_header(skb, -skb->mac_len); in skb_mac_header_rebuild()
3120 memmove(skb_mac_header(skb), old_mac, skb->mac_len); in skb_mac_header_rebuild()
3125 * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
3128 static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len) in skb_mac_header_rebuild_full() argument
3130 if (skb_mac_header_was_set(skb)) { in skb_mac_header_rebuild_full()
3131 const unsigned char *old_mac = skb_mac_header(skb); in skb_mac_header_rebuild_full()
3133 skb_set_mac_header(skb, -full_mac_len); in skb_mac_header_rebuild_full()
3134 memmove(skb_mac_header(skb), old_mac, full_mac_len); in skb_mac_header_rebuild_full()
3135 __skb_push(skb, full_mac_len - skb->mac_len); in skb_mac_header_rebuild_full()
3139 static inline int skb_checksum_start_offset(const struct sk_buff *skb) in skb_checksum_start_offset() argument
3141 return skb->csum_start - skb_headroom(skb); in skb_checksum_start_offset()
3144 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) in skb_checksum_start() argument
3146 return skb->head + skb->csum_start; in skb_checksum_start()
3149 static inline int skb_transport_offset(const struct sk_buff *skb) in skb_transport_offset() argument
3151 return skb_transport_header(skb) - skb->data; in skb_transport_offset()
3154 static inline u32 skb_network_header_len(const struct sk_buff *skb) in skb_network_header_len() argument
3156 DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb)); in skb_network_header_len()
3157 return skb->transport_header - skb->network_header; in skb_network_header_len()
3160 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) in skb_inner_network_header_len() argument
3162 return skb->inner_transport_header - skb->inner_network_header; in skb_inner_network_header_len()
3165 static inline int skb_network_offset(const struct sk_buff *skb) in skb_network_offset() argument
3167 return skb_network_header(skb) - skb->data; in skb_network_offset()
3170 static inline int skb_inner_network_offset(const struct sk_buff *skb) in skb_inner_network_offset() argument
3172 return skb_inner_network_header(skb) - skb->data; in skb_inner_network_offset()
3176 pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len) in pskb_network_may_pull_reason() argument
3178 return pskb_may_pull_reason(skb, skb_network_offset(skb) + len); in pskb_network_may_pull_reason()
3181 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) in pskb_network_may_pull() argument
3183 return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; in pskb_network_may_pull()
3197 * skb_reserve(skb, NET_IP_ALIGN);
3211 * The networking layer reserves some headroom in skb data (via
3212 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
3234 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
3236 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) in __skb_set_length() argument
3238 if (WARN_ON(skb_is_nonlinear(skb))) in __skb_set_length()
3240 skb->len = len; in __skb_set_length()
3241 skb_set_tail_pointer(skb, len); in __skb_set_length()
3244 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) in __skb_trim() argument
3246 __skb_set_length(skb, len); in __skb_trim()
3249 void skb_trim(struct sk_buff *skb, unsigned int len);
3251 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) in __pskb_trim() argument
3253 if (skb->data_len) in __pskb_trim()
3254 return ___pskb_trim(skb, len); in __pskb_trim()
3255 __skb_trim(skb, len); in __pskb_trim()
3259 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) in pskb_trim() argument
3261 skb_might_realloc(skb); in pskb_trim()
3262 return (len < skb->len) ? __pskb_trim(skb, len) : 0; in pskb_trim()
3267 * @skb: buffer to alter
3271 * the skb is not cloned so we should never get an error due to out-
3274 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) in pskb_trim_unique() argument
3276 int err = pskb_trim(skb, len); in pskb_trim_unique()
3280 static inline int __skb_grow(struct sk_buff *skb, unsigned int len) in __skb_grow() argument
3282 unsigned int diff = len - skb->len; in __skb_grow()
3284 if (skb_tailroom(skb) < diff) { in __skb_grow()
3285 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), in __skb_grow()
3290 __skb_set_length(skb, len); in __skb_grow()
3296 * @skb: buffer to orphan
3299 * destructor function and make the @skb unowned. The buffer continues
3302 static inline void skb_orphan(struct sk_buff *skb) in skb_orphan() argument
3304 if (skb->destructor) { in skb_orphan()
3305 skb->destructor(skb); in skb_orphan()
3306 skb->destructor = NULL; in skb_orphan()
3307 skb->sk = NULL; in skb_orphan()
3309 BUG_ON(skb->sk); in skb_orphan()
3315 * @skb: buffer to orphan frags from
3318 * For each frag in the SKB which needs a destructor (i.e. has an
3322 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags() argument
3324 if (likely(!skb_zcopy(skb))) in skb_orphan_frags()
3326 if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN) in skb_orphan_frags()
3328 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags()
3331 /* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
3332 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags_rx() argument
3334 if (likely(!skb_zcopy(skb))) in skb_orphan_frags_rx()
3336 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags_rx()
3351 struct sk_buff *skb; in __skb_queue_purge_reason() local
3353 while ((skb = __skb_dequeue(list)) != NULL) in __skb_queue_purge_reason()
3354 kfree_skb_reason(skb, reason); in __skb_queue_purge_reason()
3433 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); in __netdev_alloc_skb_ip_align() local
3435 if (NET_IP_ALIGN && skb) in __netdev_alloc_skb_ip_align()
3436 skb_reserve(skb, NET_IP_ALIGN); in __netdev_alloc_skb_ip_align()
3437 return skb; in __netdev_alloc_skb_ip_align()
3466 void napi_consume_skb(struct sk_buff *skb, int budget);
3468 void napi_skb_free_stolen_head(struct sk_buff *skb);
3469 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason);
3540 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
3542 * @skb: The skb that may need pfmemalloc set
3545 struct sk_buff *skb) in skb_propagate_pfmemalloc() argument
3548 skb->pfmemalloc = true; in skb_propagate_pfmemalloc()
3552 * skb_frag_off() - Returns the offset of a skb fragment
3561 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
3562 * @frag: skb fragment
3571 * skb_frag_off_set() - Sets the offset of a skb fragment
3572 * @frag: skb fragment
3581 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
3582 * @fragto: skb fragment where offset is set
3583 * @fragfrom: skb fragment offset is copied from
3676 * @fragto: skb fragment where page is set
3677 * @fragfrom: skb fragment page is copied from
3728 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, in pskb_copy() argument
3731 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); in pskb_copy()
3735 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, in pskb_copy_for_clone() argument
3738 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); in pskb_copy_for_clone()
3744 * @skb: buffer to check
3750 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) in skb_clone_writable() argument
3752 return !skb_header_cloned(skb) && in skb_clone_writable()
3753 skb_headroom(skb) + len <= skb->hdr_len; in skb_clone_writable()
3756 static inline int skb_try_make_writable(struct sk_buff *skb, in skb_try_make_writable() argument
3759 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && in skb_try_make_writable()
3760 pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_try_make_writable()
3763 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, in __skb_cow() argument
3768 if (headroom > skb_headroom(skb)) in __skb_cow()
3769 delta = headroom - skb_headroom(skb); in __skb_cow()
3772 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, in __skb_cow()
3778 * skb_cow - copy header of skb when it is required
3779 * @skb: buffer to cow
3782 * If the skb passed lacks sufficient headroom or its data part
3784 * is returned and original skb is not changed.
3786 * The result is skb with writable area skb->head...skb->tail
3789 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) in skb_cow() argument
3791 return __skb_cow(skb, headroom, skb_cloned(skb)); in skb_cow()
3796 * @skb: buffer to cow
3804 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) in skb_cow_head() argument
3806 return __skb_cow(skb, headroom, skb_header_cloned(skb)); in skb_cow_head()
3811 * @skb: buffer to pad
3817 * success. The skb is freed on error.
3819 static inline int skb_padto(struct sk_buff *skb, unsigned int len) in skb_padto() argument
3821 unsigned int size = skb->len; in skb_padto()
3824 return skb_pad(skb, len - size); in skb_padto()
3829 * @skb: buffer to pad
3836 * success. The skb is freed on error if @free_on_error is true.
3838 static inline int __must_check __skb_put_padto(struct sk_buff *skb, in __skb_put_padto() argument
3842 unsigned int size = skb->len; in __skb_put_padto()
3846 if (__skb_pad(skb, len, free_on_error)) in __skb_put_padto()
3848 __skb_put(skb, len); in __skb_put_padto()
3855 * @skb: buffer to pad
3861 * success. The skb is freed on error.
3863 static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) in skb_put_padto() argument
3865 return __skb_put_padto(skb, len, true); in skb_put_padto()
3871 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, in skb_can_coalesce() argument
3874 if (skb_zcopy(skb)) in skb_can_coalesce()
3877 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; in skb_can_coalesce()
3885 static inline int __skb_linearize(struct sk_buff *skb) in __skb_linearize() argument
3887 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; in __skb_linearize()
3891 * skb_linearize - convert paged skb to linear one
3892 * @skb: buffer to linarize
3895 * is returned and the old skb data released.
3897 static inline int skb_linearize(struct sk_buff *skb) in skb_linearize() argument
3899 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; in skb_linearize()
3904 * @skb: buffer to test
3906 * Return: true if the skb has at least one frag that might be modified
3909 static inline bool skb_has_shared_frag(const struct sk_buff *skb) in skb_has_shared_frag() argument
3911 return skb_is_nonlinear(skb) && in skb_has_shared_frag()
3912 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; in skb_has_shared_frag()
3916 * skb_linearize_cow - make sure skb is linear and writable
3917 * @skb: buffer to process
3920 * is returned and the old skb data released.
3922 static inline int skb_linearize_cow(struct sk_buff *skb) in skb_linearize_cow() argument
3924 return skb_is_nonlinear(skb) || skb_cloned(skb) ? in skb_linearize_cow()
3925 __skb_linearize(skb) : 0; in skb_linearize_cow()
3929 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, in __skb_postpull_rcsum() argument
3932 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpull_rcsum()
3933 skb->csum = csum_block_sub(skb->csum, in __skb_postpull_rcsum()
3935 else if (skb->ip_summed == CHECKSUM_PARTIAL && in __skb_postpull_rcsum()
3936 skb_checksum_start_offset(skb) < 0) in __skb_postpull_rcsum()
3937 skb->ip_summed = CHECKSUM_NONE; in __skb_postpull_rcsum()
3941 * skb_postpull_rcsum - update checksum for received skb after pull
3942 * @skb: buffer to update
3950 static inline void skb_postpull_rcsum(struct sk_buff *skb, in skb_postpull_rcsum() argument
3953 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_postpull_rcsum()
3954 skb->csum = wsum_negate(csum_partial(start, len, in skb_postpull_rcsum()
3955 wsum_negate(skb->csum))); in skb_postpull_rcsum()
3956 else if (skb->ip_summed == CHECKSUM_PARTIAL && in skb_postpull_rcsum()
3957 skb_checksum_start_offset(skb) < 0) in skb_postpull_rcsum()
3958 skb->ip_summed = CHECKSUM_NONE; in skb_postpull_rcsum()
3962 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, in __skb_postpush_rcsum() argument
3965 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpush_rcsum()
3966 skb->csum = csum_block_add(skb->csum, in __skb_postpush_rcsum()
3971 * skb_postpush_rcsum - update checksum for received skb after push
3972 * @skb: buffer to update
3979 static inline void skb_postpush_rcsum(struct sk_buff *skb, in skb_postpush_rcsum() argument
3982 __skb_postpush_rcsum(skb, start, len, 0); in skb_postpush_rcsum()
3985 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3988 * skb_push_rcsum - push skb and update receive checksum
3989 * @skb: buffer to update
3998 static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) in skb_push_rcsum() argument
4000 skb_push(skb, len); in skb_push_rcsum()
4001 skb_postpush_rcsum(skb, skb->data, len); in skb_push_rcsum()
4002 return skb->data; in skb_push_rcsum()
4005 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
4007 * pskb_trim_rcsum - trim received skb and update checksum
4008 * @skb: buffer to trim
4013 * It can change skb pointers.
4016 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum() argument
4018 skb_might_realloc(skb); in pskb_trim_rcsum()
4019 if (likely(len >= skb->len)) in pskb_trim_rcsum()
4021 return pskb_trim_rcsum_slow(skb, len); in pskb_trim_rcsum()
4024 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) in __skb_trim_rcsum() argument
4026 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_trim_rcsum()
4027 skb->ip_summed = CHECKSUM_NONE; in __skb_trim_rcsum()
4028 __skb_trim(skb, len); in __skb_trim_rcsum()
4032 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) in __skb_grow_rcsum() argument
4034 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_grow_rcsum()
4035 skb->ip_summed = CHECKSUM_NONE; in __skb_grow_rcsum()
4036 return __skb_grow(skb, len); in __skb_grow_rcsum()
4042 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) argument
4043 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) argument
4045 #define skb_queue_walk(queue, skb) \ argument
4046 for (skb = (queue)->next; \
4047 skb != (struct sk_buff *)(queue); \
4048 skb = skb->next)
4050 #define skb_queue_walk_safe(queue, skb, tmp) \ argument
4051 for (skb = (queue)->next, tmp = skb->next; \
4052 skb != (struct sk_buff *)(queue); \
4053 skb = tmp, tmp = skb->next)
4055 #define skb_queue_walk_from(queue, skb) \ argument
4056 for (; skb != (struct sk_buff *)(queue); \
4057 skb = skb->next)
4059 #define skb_rbtree_walk(skb, root) \ argument
4060 for (skb = skb_rb_first(root); skb != NULL; \
4061 skb = skb_rb_next(skb))
4063 #define skb_rbtree_walk_from(skb) \ argument
4064 for (; skb != NULL; \
4065 skb = skb_rb_next(skb))
4067 #define skb_rbtree_walk_from_safe(skb, tmp) \ argument
4068 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
4069 skb = tmp)
4071 #define skb_queue_walk_from_safe(queue, skb, tmp) \ argument
4072 for (tmp = skb->next; \
4073 skb != (struct sk_buff *)(queue); \
4074 skb = tmp, tmp = skb->next)
4076 #define skb_queue_reverse_walk(queue, skb) \ argument
4077 for (skb = (queue)->prev; \
4078 skb != (struct sk_buff *)(queue); \
4079 skb = skb->prev)
4081 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ argument
4082 for (skb = (queue)->prev, tmp = skb->prev; \
4083 skb != (struct sk_buff *)(queue); \
4084 skb = tmp, tmp = skb->prev)
4086 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ argument
4087 for (tmp = skb->prev; \
4088 skb != (struct sk_buff *)(queue); \
4089 skb = tmp, tmp = skb->prev)
4091 static inline bool skb_has_frag_list(const struct sk_buff *skb) in skb_has_frag_list() argument
4093 return skb_shinfo(skb)->frag_list != NULL; in skb_has_frag_list()
4096 static inline void skb_frag_list_init(struct sk_buff *skb) in skb_frag_list_init() argument
4098 skb_shinfo(skb)->frag_list = NULL; in skb_frag_list_init()
4101 #define skb_walk_frags(skb, iter) \ argument
4102 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
4107 const struct sk_buff *skb);
4130 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
4132 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
4135 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
4137 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
4138 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
4139 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
4140 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
4141 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
4142 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
4144 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
4147 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
4149 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
4150 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
4154 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
4155 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
4156 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
4157 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
4158 struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
4160 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
4161 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
4162 int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
4163 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
4164 int skb_vlan_pop(struct sk_buff *skb);
4165 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
4166 int skb_eth_pop(struct sk_buff *skb);
4167 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
4169 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
4171 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
4173 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
4174 int skb_mpls_dec_ttl(struct sk_buff *skb);
4175 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
4195 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
4197 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
4201 __skb_header_pointer(const struct sk_buff *skb, int offset, int len, in __skb_header_pointer() argument
4207 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) in __skb_header_pointer()
4214 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) in skb_header_pointer() argument
4216 return __skb_header_pointer(skb, offset, len, skb->data, in skb_header_pointer()
4217 skb_headlen(skb), buffer); in skb_header_pointer()
4221 skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) in skb_pointer_if_linear() argument
4223 if (likely(skb_headlen(skb) - offset >= len)) in skb_pointer_if_linear()
4224 return skb->data + offset; in skb_pointer_if_linear()
4229 * skb_needs_linearize - check if we need to linearize a given skb
4231 * @skb: socket buffer to check
4235 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
4236 * 2. skb is fragmented and the device does not support SG.
4238 static inline bool skb_needs_linearize(struct sk_buff *skb, in skb_needs_linearize() argument
4241 return skb_is_nonlinear(skb) && in skb_needs_linearize()
4242 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || in skb_needs_linearize()
4243 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); in skb_needs_linearize()
4246 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, in skb_copy_from_linear_data() argument
4250 memcpy(to, skb->data, len); in skb_copy_from_linear_data()
4253 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, in skb_copy_from_linear_data_offset() argument
4257 memcpy(to, skb->data + offset, len); in skb_copy_from_linear_data_offset()
4260 static inline void skb_copy_to_linear_data(struct sk_buff *skb, in skb_copy_to_linear_data() argument
4264 memcpy(skb->data, from, len); in skb_copy_to_linear_data()
4267 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, in skb_copy_to_linear_data_offset() argument
4272 memcpy(skb->data + offset, from, len); in skb_copy_to_linear_data_offset()
4277 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) in skb_get_ktime() argument
4279 return skb->tstamp; in skb_get_ktime()
4283 * skb_get_timestamp - get timestamp from a skb
4284 * @skb: skb to get stamp from
4287 * Timestamps are stored in the skb as offsets to a base timestamp.
4291 static inline void skb_get_timestamp(const struct sk_buff *skb, in skb_get_timestamp() argument
4294 *stamp = ns_to_kernel_old_timeval(skb->tstamp); in skb_get_timestamp()
4297 static inline void skb_get_new_timestamp(const struct sk_buff *skb, in skb_get_new_timestamp() argument
4300 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestamp()
4306 static inline void skb_get_timestampns(const struct sk_buff *skb, in skb_get_timestampns() argument
4309 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_timestampns()
4315 static inline void skb_get_new_timestampns(const struct sk_buff *skb, in skb_get_new_timestampns() argument
4318 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestampns()
4324 static inline void __net_timestamp(struct sk_buff *skb) in __net_timestamp() argument
4326 skb->tstamp = ktime_get_real(); in __net_timestamp()
4327 skb->tstamp_type = SKB_CLOCK_REALTIME; in __net_timestamp()
4335 static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, in skb_set_delivery_time() argument
4338 skb->tstamp = kt; in skb_set_delivery_time()
4341 skb->tstamp_type = tstamp_type; in skb_set_delivery_time()
4343 skb->tstamp_type = SKB_CLOCK_REALTIME; in skb_set_delivery_time()
4346 static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb, in skb_set_delivery_type_by_clockid() argument
4365 skb_set_delivery_time(skb, kt, tstamp_type); in skb_set_delivery_type_by_clockid()
4371 * If needed, set the skb->tstamp to the (rcv) timestamp.
4373 static inline void skb_clear_delivery_time(struct sk_buff *skb) in skb_clear_delivery_time() argument
4375 if (skb->tstamp_type) { in skb_clear_delivery_time()
4376 skb->tstamp_type = SKB_CLOCK_REALTIME; in skb_clear_delivery_time()
4378 skb->tstamp = ktime_get_real(); in skb_clear_delivery_time()
4380 skb->tstamp = 0; in skb_clear_delivery_time()
4384 static inline void skb_clear_tstamp(struct sk_buff *skb) in skb_clear_tstamp() argument
4386 if (skb->tstamp_type) in skb_clear_tstamp()
4389 skb->tstamp = 0; in skb_clear_tstamp()
4392 static inline ktime_t skb_tstamp(const struct sk_buff *skb) in skb_tstamp() argument
4394 if (skb->tstamp_type) in skb_tstamp()
4397 return skb->tstamp; in skb_tstamp()
4400 static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) in skb_tstamp_cond() argument
4402 if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) in skb_tstamp_cond()
4403 return skb->tstamp; in skb_tstamp_cond()
4411 static inline u8 skb_metadata_len(const struct sk_buff *skb) in skb_metadata_len() argument
4413 return skb_shinfo(skb)->meta_len; in skb_metadata_len()
4416 static inline void *skb_metadata_end(const struct sk_buff *skb) in skb_metadata_end() argument
4418 return skb_mac_header(skb); in skb_metadata_end()
4473 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) in skb_metadata_set() argument
4475 skb_shinfo(skb)->meta_len = meta_len; in skb_metadata_set()
4478 static inline void skb_metadata_clear(struct sk_buff *skb) in skb_metadata_clear() argument
4480 skb_metadata_set(skb, 0); in skb_metadata_clear()
4483 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
4487 void skb_clone_tx_timestamp(struct sk_buff *skb);
4488 bool skb_defer_rx_timestamp(struct sk_buff *skb);
4492 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) in skb_clone_tx_timestamp() argument
4496 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) in skb_defer_rx_timestamp() argument
4504 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
4508 * must call this function to return the skb back to the stack with a
4511 * @skb: clone of the original outgoing packet
4515 void skb_complete_tx_timestamp(struct sk_buff *skb,
4523 * skb_tstamp_tx - queue clone of skb with send time stamps
4527 * If the skb has a socket associated, then this function clones the
4528 * skb (thus sharing the actual data and optional structures), stores
4546 * @skb: A socket buffer.
4548 static inline void skb_tx_timestamp(struct sk_buff *skb) in skb_tx_timestamp() argument
4550 skb_clone_tx_timestamp(skb); in skb_tx_timestamp()
4551 if (skb_shinfo(skb)->tx_flags & (SKBTX_SW_TSTAMP | SKBTX_BPF)) in skb_tx_timestamp()
4552 skb_tstamp_tx(skb, NULL); in skb_tx_timestamp()
4556 * skb_complete_wifi_ack - deliver skb with wifi status
4558 * @skb: the original outgoing packet
4562 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
4564 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
4565 __sum16 __skb_checksum_complete(struct sk_buff *skb);
4567 static inline int skb_csum_unnecessary(const struct sk_buff *skb) in skb_csum_unnecessary() argument
4569 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || in skb_csum_unnecessary()
4570 skb->csum_valid || in skb_csum_unnecessary()
4571 (skb->ip_summed == CHECKSUM_PARTIAL && in skb_csum_unnecessary()
4572 skb_checksum_start_offset(skb) >= 0)); in skb_csum_unnecessary()
4577 * @skb: packet to process
4580 * the value of skb->csum. The latter can be used to supply the
4588 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
4591 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) in skb_checksum_complete() argument
4593 return skb_csum_unnecessary(skb) ? in skb_checksum_complete()
4594 0 : __skb_checksum_complete(skb); in skb_checksum_complete()
4597 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) in __skb_decr_checksum_unnecessary() argument
4599 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_decr_checksum_unnecessary()
4600 if (skb->csum_level == 0) in __skb_decr_checksum_unnecessary()
4601 skb->ip_summed = CHECKSUM_NONE; in __skb_decr_checksum_unnecessary()
4603 skb->csum_level--; in __skb_decr_checksum_unnecessary()
4607 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) in __skb_incr_checksum_unnecessary() argument
4609 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_incr_checksum_unnecessary()
4610 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) in __skb_incr_checksum_unnecessary()
4611 skb->csum_level++; in __skb_incr_checksum_unnecessary()
4612 } else if (skb->ip_summed == CHECKSUM_NONE) { in __skb_incr_checksum_unnecessary()
4613 skb->ip_summed = CHECKSUM_UNNECESSARY; in __skb_incr_checksum_unnecessary()
4614 skb->csum_level = 0; in __skb_incr_checksum_unnecessary()
4618 static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) in __skb_reset_checksum_unnecessary() argument
4620 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_reset_checksum_unnecessary()
4621 skb->ip_summed = CHECKSUM_NONE; in __skb_reset_checksum_unnecessary()
4622 skb->csum_level = 0; in __skb_reset_checksum_unnecessary()
4631 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, in __skb_checksum_validate_needed() argument
4635 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { in __skb_checksum_validate_needed()
4636 skb->csum_valid = 1; in __skb_checksum_validate_needed()
4637 __skb_decr_checksum_unnecessary(skb); in __skb_checksum_validate_needed()
4655 static inline void skb_checksum_complete_unset(struct sk_buff *skb) in skb_checksum_complete_unset() argument
4657 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_checksum_complete_unset()
4658 skb->ip_summed = CHECKSUM_NONE; in skb_checksum_complete_unset()
4666 * checksum is stored in skb->csum for use in __skb_checksum_complete
4670 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, in __skb_checksum_validate_complete() argument
4674 if (skb->ip_summed == CHECKSUM_COMPLETE) { in __skb_checksum_validate_complete()
4675 if (!csum_fold(csum_add(psum, skb->csum))) { in __skb_checksum_validate_complete()
4676 skb->csum_valid = 1; in __skb_checksum_validate_complete()
4681 skb->csum = psum; in __skb_checksum_validate_complete()
4683 if (complete || skb->len <= CHECKSUM_BREAK) { in __skb_checksum_validate_complete()
4686 csum = __skb_checksum_complete(skb); in __skb_checksum_validate_complete()
4687 skb->csum_valid = !csum; in __skb_checksum_validate_complete()
4694 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) in null_compute_pseudo() argument
4709 #define __skb_checksum_validate(skb, proto, complete, \ argument
4713 skb->csum_valid = 0; \
4714 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4715 __ret = __skb_checksum_validate_complete(skb, \
4716 complete, compute_pseudo(skb, proto)); \
4720 #define skb_checksum_init(skb, proto, compute_pseudo) \ argument
4721 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4723 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ argument
4724 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4726 #define skb_checksum_validate(skb, proto, compute_pseudo) \ argument
4727 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4729 #define skb_checksum_validate_zero_check(skb, proto, check, \ argument
4731 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4733 #define skb_checksum_simple_validate(skb) \ argument
4734 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4736 static inline bool __skb_checksum_convert_check(struct sk_buff *skb) in __skb_checksum_convert_check() argument
4738 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); in __skb_checksum_convert_check()
4741 static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) in __skb_checksum_convert() argument
4743 skb->csum = ~pseudo; in __skb_checksum_convert()
4744 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_convert()
4747 #define skb_checksum_try_convert(skb, proto, compute_pseudo) \ argument
4749 if (__skb_checksum_convert_check(skb)) \
4750 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4753 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, in skb_remcsum_adjust_partial() argument
4756 skb->ip_summed = CHECKSUM_PARTIAL; in skb_remcsum_adjust_partial()
4757 skb->csum_start = ((unsigned char *)ptr + start) - skb->head; in skb_remcsum_adjust_partial()
4758 skb->csum_offset = offset - start; in skb_remcsum_adjust_partial()
4762 * When called, ptr indicates the starting point for skb->csum when
4764 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
4766 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, in skb_remcsum_process() argument
4772 skb_remcsum_adjust_partial(skb, ptr, start, offset); in skb_remcsum_process()
4776 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { in skb_remcsum_process()
4777 __skb_checksum_complete(skb); in skb_remcsum_process()
4778 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); in skb_remcsum_process()
4781 delta = remcsum_adjust(ptr, skb->csum, start, offset); in skb_remcsum_process()
4783 /* Adjust skb->csum since we changed the packet */ in skb_remcsum_process()
4784 skb->csum = csum_add(skb->csum, delta); in skb_remcsum_process()
4787 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) in skb_nfct() argument
4790 return (void *)(skb->_nfct & NFCT_PTRMASK); in skb_nfct()
4796 static inline unsigned long skb_get_nfct(const struct sk_buff *skb) in skb_get_nfct() argument
4799 return skb->_nfct; in skb_get_nfct()
4805 static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) in skb_set_nfct() argument
4808 skb->slow_gro |= !!nfct; in skb_set_nfct()
4809 skb->_nfct = nfct; in skb_set_nfct()
4851 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4853 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4854 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4857 static inline void skb_ext_put(struct sk_buff *skb) in skb_ext_put() argument
4859 if (skb->active_extensions) in skb_ext_put()
4860 __skb_ext_put(skb->extensions); in skb_ext_put()
4887 static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) in skb_ext_exist() argument
4889 return skb->active_extensions & (1 << id); in skb_ext_exist()
4892 static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_del() argument
4894 if (skb_ext_exist(skb, id)) in skb_ext_del()
4895 __skb_ext_del(skb, id); in skb_ext_del()
4898 static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) in skb_ext_find() argument
4900 if (skb_ext_exist(skb, id)) { in skb_ext_find()
4901 struct skb_ext *ext = skb->extensions; in skb_ext_find()
4909 static inline void skb_ext_reset(struct sk_buff *skb) in skb_ext_reset() argument
4911 if (unlikely(skb->active_extensions)) { in skb_ext_reset()
4912 __skb_ext_put(skb->extensions); in skb_ext_reset()
4913 skb->active_extensions = 0; in skb_ext_reset()
4917 static inline bool skb_has_extensions(struct sk_buff *skb) in skb_has_extensions() argument
4919 return unlikely(skb->active_extensions); in skb_has_extensions()
4922 static inline void skb_ext_put(struct sk_buff *skb) {} in skb_ext_put() argument
4923 static inline void skb_ext_reset(struct sk_buff *skb) {} in skb_ext_reset() argument
4924 static inline void skb_ext_del(struct sk_buff *skb, int unused) {} in skb_ext_del() argument
4927 static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } in skb_has_extensions() argument
4930 static inline void nf_reset_ct(struct sk_buff *skb) in nf_reset_ct() argument
4933 nf_conntrack_put(skb_nfct(skb)); in nf_reset_ct()
4934 skb->_nfct = 0; in nf_reset_ct()
4938 static inline void nf_reset_trace(struct sk_buff *skb) in nf_reset_trace() argument
4941 skb->nf_trace = 0; in nf_reset_trace()
4945 static inline void ipvs_reset(struct sk_buff *skb) in ipvs_reset() argument
4948 skb->ipvs_property = 0; in ipvs_reset()
4981 static inline void skb_init_secmark(struct sk_buff *skb) in skb_init_secmark() argument
4983 skb->secmark = 0; in skb_init_secmark()
4989 static inline void skb_init_secmark(struct sk_buff *skb) in skb_init_secmark() argument
4993 static inline int secpath_exists(const struct sk_buff *skb) in secpath_exists() argument
4996 return skb_ext_exist(skb, SKB_EXT_SEC_PATH); in secpath_exists()
5002 static inline bool skb_irq_freeable(const struct sk_buff *skb) in skb_irq_freeable() argument
5004 return !skb->destructor && in skb_irq_freeable()
5005 !secpath_exists(skb) && in skb_irq_freeable()
5006 !skb_nfct(skb) && in skb_irq_freeable()
5007 !skb->_skb_refdst && in skb_irq_freeable()
5008 !skb_has_frag_list(skb); in skb_irq_freeable()
5011 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) in skb_set_queue_mapping() argument
5013 skb->queue_mapping = queue_mapping; in skb_set_queue_mapping()
5016 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) in skb_get_queue_mapping() argument
5018 return skb->queue_mapping; in skb_get_queue_mapping()
5026 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) in skb_record_rx_queue() argument
5028 skb->queue_mapping = rx_queue + 1; in skb_record_rx_queue()
5031 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) in skb_get_rx_queue() argument
5033 return skb->queue_mapping - 1; in skb_get_rx_queue()
5036 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) in skb_rx_queue_recorded() argument
5038 return skb->queue_mapping != 0; in skb_rx_queue_recorded()
5041 static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) in skb_set_dst_pending_confirm() argument
5043 skb->dst_pending_confirm = val; in skb_set_dst_pending_confirm()
5046 static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) in skb_get_dst_pending_confirm() argument
5048 return skb->dst_pending_confirm != 0; in skb_get_dst_pending_confirm()
5051 static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) in skb_sec_path() argument
5054 return skb_ext_find(skb, SKB_EXT_SEC_PATH); in skb_sec_path()
5060 static inline bool skb_is_gso(const struct sk_buff *skb) in skb_is_gso() argument
5062 return skb_shinfo(skb)->gso_size; in skb_is_gso()
5065 /* Note: Should be called only if skb_is_gso(skb) is true */
5066 static inline bool skb_is_gso_v6(const struct sk_buff *skb) in skb_is_gso_v6() argument
5068 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; in skb_is_gso_v6()
5071 /* Note: Should be called only if skb_is_gso(skb) is true */
5072 static inline bool skb_is_gso_sctp(const struct sk_buff *skb) in skb_is_gso_sctp() argument
5074 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; in skb_is_gso_sctp()
5077 /* Note: Should be called only if skb_is_gso(skb) is true */
5078 static inline bool skb_is_gso_tcp(const struct sk_buff *skb) in skb_is_gso_tcp() argument
5080 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); in skb_is_gso_tcp()
5083 static inline void skb_gso_reset(struct sk_buff *skb) in skb_gso_reset() argument
5085 skb_shinfo(skb)->gso_size = 0; in skb_gso_reset()
5086 skb_shinfo(skb)->gso_segs = 0; in skb_gso_reset()
5087 skb_shinfo(skb)->gso_type = 0; in skb_gso_reset()
5106 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
5108 static inline bool skb_warn_if_lro(const struct sk_buff *skb) in skb_warn_if_lro() argument
5112 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_warn_if_lro()
5114 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro()
5116 __skb_warn_lro_forwarding(skb); in skb_warn_if_lro()
5122 static inline void skb_forward_csum(struct sk_buff *skb) in skb_forward_csum() argument
5125 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_forward_csum()
5126 skb->ip_summed = CHECKSUM_NONE; in skb_forward_csum()
5130 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
5131 * @skb: skb to check
5137 static inline void skb_checksum_none_assert(const struct sk_buff *skb) in skb_checksum_none_assert() argument
5139 DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE); in skb_checksum_none_assert()
5142 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
5144 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
5145 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5147 __sum16(*skb_chkf)(struct sk_buff *skb));
5150 * skb_head_is_locked - Determine if the skb->head is locked down
5151 * @skb: skb to check
5154 * not cloned. This function returns true if the skb head is locked down
5158 static inline bool skb_head_is_locked(const struct sk_buff *skb) in skb_head_is_locked() argument
5160 return !skb->head_frag || skb_cloned(skb); in skb_head_is_locked()
5172 static inline __wsum lco_csum(struct sk_buff *skb) in lco_csum() argument
5174 unsigned char *csum_start = skb_checksum_start(skb); in lco_csum()
5175 unsigned char *l4_hdr = skb_transport_header(skb); in lco_csum()
5180 skb->csum_offset)); in lco_csum()
5188 static inline bool skb_is_redirected(const struct sk_buff *skb) in skb_is_redirected() argument
5190 return skb->redirected; in skb_is_redirected()
5193 static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) in skb_set_redirected() argument
5195 skb->redirected = 1; in skb_set_redirected()
5197 skb->from_ingress = from_ingress; in skb_set_redirected()
5198 if (skb->from_ingress) in skb_set_redirected()
5199 skb_clear_tstamp(skb); in skb_set_redirected()
5203 static inline void skb_reset_redirect(struct sk_buff *skb) in skb_reset_redirect() argument
5205 skb->redirected = 0; in skb_reset_redirect()
5208 static inline void skb_set_redirected_noclear(struct sk_buff *skb, in skb_set_redirected_noclear() argument
5211 skb->redirected = 1; in skb_set_redirected_noclear()
5213 skb->from_ingress = from_ingress; in skb_set_redirected_noclear()
5217 static inline bool skb_csum_is_sctp(struct sk_buff *skb) in skb_csum_is_sctp() argument
5220 return skb->csum_not_inet; in skb_csum_is_sctp()
5226 static inline void skb_reset_csum_not_inet(struct sk_buff *skb) in skb_reset_csum_not_inet() argument
5228 skb->ip_summed = CHECKSUM_NONE; in skb_reset_csum_not_inet()
5230 skb->csum_not_inet = 0; in skb_reset_csum_not_inet()
5234 static inline void skb_set_kcov_handle(struct sk_buff *skb, in skb_set_kcov_handle() argument
5238 skb->kcov_handle = kcov_handle; in skb_set_kcov_handle()
5242 static inline u64 skb_get_kcov_handle(struct sk_buff *skb) in skb_get_kcov_handle() argument
5245 return skb->kcov_handle; in skb_get_kcov_handle()
5251 static inline void skb_mark_for_recycle(struct sk_buff *skb) in skb_mark_for_recycle() argument
5254 skb->pp_recycle = 1; in skb_mark_for_recycle()
5258 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,