/linux/net/sched/ |
H A D | sch_choke.c | 148 static bool choke_match_flow(struct sk_buff *skb1, in choke_match_flow() argument 153 if (skb1->protocol != skb2->protocol) in choke_match_flow() 156 if (!choke_skb_cb(skb1)->keys_valid) { in choke_match_flow() 157 choke_skb_cb(skb1)->keys_valid = 1; in choke_match_flow() 158 skb_flow_dissect_flow_keys(skb1, &temp, 0); in choke_match_flow() 159 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp); in choke_match_flow() 168 return !memcmp(&choke_skb_cb(skb1)->keys, in choke_match_flow() 170 sizeof(choke_skb_cb(skb1)->keys)); in choke_match_flow()
|
/linux/net/llc/ |
H A D | llc_sap.c | 369 struct sk_buff *skb1; in llc_do_mcast() local 373 skb1 = skb_clone(skb, GFP_ATOMIC); in llc_do_mcast() 374 if (!skb1) { in llc_do_mcast() 379 llc_sap_rcv(sap, skb1, stack[i]); in llc_do_mcast()
|
/linux/net/batman-adv/ |
H A D | send.c | 1056 struct sk_buff *skb1; in batadv_send_outstanding_bcast_packet() local 1075 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); in batadv_send_outstanding_bcast_packet() 1076 if (!skb1) in batadv_send_outstanding_bcast_packet() 1079 batadv_send_broadcast_skb(skb1, forw_packet->if_outgoing); in batadv_send_outstanding_bcast_packet()
|
/linux/net/ipv4/ |
H A D | tcp_input.c | 4922 struct sk_buff *skb1; in tcp_data_queue_ofo() local 4986 skb1 = rb_to_skb(parent); in tcp_data_queue_ofo() 4987 if (before(seq, TCP_SKB_CB(skb1)->seq)) { in tcp_data_queue_ofo() 4991 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { in tcp_data_queue_ofo() 4992 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { in tcp_data_queue_ofo() 5002 if (after(seq, TCP_SKB_CB(skb1)->seq)) { in tcp_data_queue_ofo() 5004 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); in tcp_data_queue_ofo() 5006 /* skb's seq == skb1's seq and skb covers skb1. in tcp_data_queue_ofo() 5007 * Replace skb1 wit in tcp_data_queue_ofo() 5305 struct sk_buff *skb1; tcp_rbtree_insert() local [all...] |
/linux/net/mptcp/ |
H A D | protocol.c | 190 struct sk_buff *skb1; in mptcp_data_queue_ofo() local 238 skb1 = rb_to_skb(parent); in mptcp_data_queue_ofo() 239 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { in mptcp_data_queue_ofo() 243 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo() 244 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo() 250 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { in mptcp_data_queue_ofo() 253 * | skb1 | in mptcp_data_queue_ofo() 257 /* skb's seq == skb1's seq and skb covers skb1. in mptcp_data_queue_ofo() 258 * Replace skb1 wit in mptcp_data_queue_ofo() [all...] |
/linux/drivers/atm/ |
H A D | iphase.c | 641 struct sk_buff *skb = NULL, *skb1 = NULL; in ia_tx_poll() local 666 skb1 = skb_dequeue(&iavcc->txing_skb); in ia_tx_poll() 667 while (skb1 && (skb1 != skb)) { in ia_tx_poll() 668 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) { in ia_tx_poll() 672 if ((vcc->pop) && (skb1->len != 0)) in ia_tx_poll() 674 vcc->pop(vcc, skb1); in ia_tx_poll() 676 (long)skb1);) in ia_tx_poll() 679 dev_kfree_skb_any(skb1); in ia_tx_poll() 680 skb1 in ia_tx_poll() [all...] |
/linux/drivers/net/wireless/ath/ath6kl/ |
H A D | txrx.c | 1315 struct sk_buff *skb1 = NULL; in ath6kl_rx() local 1572 skb1 = skb_copy(skb, GFP_ATOMIC); in ath6kl_rx() 1583 skb1 = skb; in ath6kl_rx() 1590 if (skb1) in ath6kl_rx() 1591 ath6kl_data_tx(skb1, vif->ndev); in ath6kl_rx()
|
/linux/drivers/net/vxlan/ |
H A D | vxlan_mdb.c | 1693 struct sk_buff *skb1; in vxlan_mdb_xmit() local 1705 skb1 = skb_clone(skb, GFP_ATOMIC); in vxlan_mdb_xmit() 1706 if (skb1) in vxlan_mdb_xmit() 1707 vxlan_xmit_one(skb1, vxlan->dev, src_vni, in vxlan_mdb_xmit()
|
H A D | vxlan_core.c | 2795 struct sk_buff *skb1; in vxlan_xmit() local 2801 skb1 = skb_clone(skb, GFP_ATOMIC); in vxlan_xmit() 2802 if (skb1) in vxlan_xmit() 2803 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); in vxlan_xmit()
|
/linux/include/linux/ |
H A D | skbuff.h | 1346 /* Layout of fast clones : [skb1][skb2][fclone_ref] */ 1348 struct sk_buff skb1; member 1369 fclones = container_of(skb, struct sk_buff_fclones, skb1); in skb_fclone_busy() 1647 static inline int skb_cmp_decrypted(const struct sk_buff *skb1, in skb_cmp_decrypted() argument 1651 return skb2->decrypted - skb1->decrypted; in skb_cmp_decrypted() 1760 static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1, in skb_pure_zcopy_same() argument 1763 return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2); in skb_pure_zcopy_same() 4194 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
|
/linux/drivers/net/ethernet/qlogic/ |
H A D | qla3xxx.c | 2042 struct sk_buff *skb1 = NULL, *skb2; in ql_process_macip_rx_intr() local 2056 skb1 = lrg_buf_cb1->skb; in ql_process_macip_rx_intr() 2058 if (*((u16 *) skb1->data) != 0xFFFF) in ql_process_macip_rx_intr() 2078 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, in ql_process_macip_rx_intr()
|
/linux/Documentation/networking/ |
H A D | snmp_counter.rst | 725 10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and 726 15 in skb2 would be moved to skb1. This operation is 'shift'. If a 727 SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has 728 seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
|