Searched refs:process_queue (Results 1 – 4 of 4) sorted by relevance
231 struct sk_buff_head process_queue; in tun_napi_receive() local235 __skb_queue_head_init(&process_queue); in tun_napi_receive()238 skb_queue_splice_tail_init(queue, &process_queue); in tun_napi_receive()241 while (received < budget && (skb = __skb_dequeue(&process_queue))) { in tun_napi_receive()246 if (!skb_queue_empty(&process_queue)) { in tun_napi_receive()248 skb_queue_splice(&process_queue, queue); in tun_napi_receive()1478 struct sk_buff_head process_queue; in tun_rx_batched() local1492 __skb_queue_head_init(&process_queue); in tun_rx_batched()1493 skb_queue_splice_tail_init(queue, &process_queue); in tun_rx_batched()1504 while ((nskb = __skb_dequeue(&process_queue))) { in tun_rx_batched()
92 return skb_queue_len_lockless(&sd->process_queue); in softnet_process_queue_len()
6493 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { in flush_backlog() 6495 __skb_unlink(skb, &sd->process_queue); in flush_backlog() 6514 /* as insertion into process_queue happens with the rps lock held, in flush_required() 6515 * process_queue access may race only with dequeue in flush_required() 6518 !skb_queue_empty_lockless(&sd->process_queue); in flush_required() 6525 * input_pkt_queue and process_queue even if the latter could end-up6642 while ((skb = __skb_dequeue(&sd->process_queue))) { in process_backlog() 6671 &sd->process_queue); in process_backlog() 12729 * We properly handle process_queue & input_pkt_queue later. in dev_cpu_dead() 12756 while ((skb = __skb_dequeue(&oldsd->process_queue))) { in dev_cpu_dead() [all...]
3518 struct sk_buff_head process_queue; member