Lines Matching +full:charge +full:- +full:ctrl +full:- +full:value
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
121 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
131 (vif->pending_tx_info[pending_idx].callback_struct)
137 u16 pending_idx = ubuf->desc; in ubuf_to_queue()
140 return container_of(temp - pending_idx, in ubuf_to_queue()
157 return i & (MAX_PENDING_REQS-1); in pending_index()
162 wake_up(&queue->wq); in xenvif_kick_thread()
169 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); in xenvif_napi_schedule_or_enable_events()
172 napi_schedule(&queue->napi); in xenvif_napi_schedule_or_enable_events()
174 &queue->eoi_pending) & in xenvif_napi_schedule_or_enable_events()
176 xen_irq_lateeoi(queue->tx_irq, 0); in xenvif_napi_schedule_or_enable_events()
187 max_burst = max(131072UL, queue->credit_bytes); in tx_add_credit()
190 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit()
191 if (max_credit < queue->remaining_credit) in tx_add_credit()
194 queue->remaining_credit = min(max_credit, max_burst); in tx_add_credit()
195 queue->rate_limited = false; in tx_add_credit()
209 RING_IDX cons = queue->tx.req_cons; in xenvif_tx_err()
215 RING_COPY_REQUEST(&queue->tx, cons++, txp); in xenvif_tx_err()
218 queue->tx.req_cons = cons; in xenvif_tx_err()
223 netdev_err(vif->dev, "fatal error; disabling device\n"); in xenvif_fatal_tx_err()
224 vif->disabled = true; in xenvif_fatal_tx_err()
226 if (vif->num_queues) in xenvif_fatal_tx_err()
227 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
236 RING_IDX cons = queue->tx.req_cons; in xenvif_count_requests()
241 if (!(first->flags & XEN_NETTXF_more_data)) in xenvif_count_requests()
248 netdev_err(queue->vif->dev, in xenvif_count_requests()
251 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
252 return -ENODATA; in xenvif_count_requests()
259 netdev_err(queue->vif->dev, in xenvif_count_requests()
262 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
263 return -E2BIG; in xenvif_count_requests()
268 * the historical MAX_SKB_FRAGS value 18 to honor the in xenvif_count_requests()
275 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
278 drop_err = -E2BIG; in xenvif_count_requests()
284 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); in xenvif_count_requests()
287 * first->size overflowed and following slots will in xenvif_count_requests()
295 if (!drop_err && txp->size > first->size) { in xenvif_count_requests()
297 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
299 txp->size, first->size); in xenvif_count_requests()
300 drop_err = -EIO; in xenvif_count_requests()
303 first->size -= txp->size; in xenvif_count_requests()
306 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { in xenvif_count_requests()
307 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
308 txp->offset, txp->size); in xenvif_count_requests()
309 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
310 return -EINVAL; in xenvif_count_requests()
313 more_data = txp->flags & XEN_NETTXF_more_data; in xenvif_count_requests()
335 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
336 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
337 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
345 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op()
348 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
350 memcpy(&queue->pending_tx_info[pending_idx].req, txp, in xenvif_tx_create_map_op()
352 queue->pending_tx_info[pending_idx].extra_count = extra_count; in xenvif_tx_create_map_op()
361 BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); in xenvif_alloc_skb()
369 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb()
386 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
390 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; in xenvif_get_requests()
391 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; in xenvif_get_requests()
394 nr_slots = shinfo->nr_frags + frag_overflow + 1; in xenvif_get_requests()
397 XENVIF_TX_CB(skb)->split_mask = 0; in xenvif_get_requests()
402 int amount = data_len > txp->size ? txp->size : data_len; in xenvif_get_requests()
405 cop->source.u.ref = txp->gref; in xenvif_get_requests()
406 cop->source.domid = queue->vif->domid; in xenvif_get_requests()
407 cop->source.offset = txp->offset; in xenvif_get_requests()
409 cop->dest.domid = DOMID_SELF; in xenvif_get_requests()
410 cop->dest.offset = (offset_in_page(skb->data + in xenvif_get_requests()
411 skb_headlen(skb) - in xenvif_get_requests()
413 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) in xenvif_get_requests()
414 - data_len); in xenvif_get_requests()
417 if (cop->dest.offset + amount > XEN_PAGE_SIZE) { in xenvif_get_requests()
418 amount = XEN_PAGE_SIZE - cop->dest.offset; in xenvif_get_requests()
419 XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); in xenvif_get_requests()
423 cop->len = amount; in xenvif_get_requests()
424 cop->flags = GNTCOPY_source_gref; in xenvif_get_requests()
426 index = pending_index(queue->pending_cons); in xenvif_get_requests()
427 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
434 data_len -= amount; in xenvif_get_requests()
436 if (amount == txp->size) { in xenvif_get_requests()
439 memcpy(&queue->pending_tx_info[pending_idx].req, in xenvif_get_requests()
441 queue->pending_tx_info[pending_idx].extra_count = in xenvif_get_requests()
448 queue->pending_cons++; in xenvif_get_requests()
449 nr_slots--; in xenvif_get_requests()
455 txp->offset += amount; in xenvif_get_requests()
456 txp->size -= amount; in xenvif_get_requests()
460 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; in xenvif_get_requests()
461 nr_slots--) { in xenvif_get_requests()
462 if (unlikely(!txp->size)) { in xenvif_get_requests()
468 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
469 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
472 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
473 ++shinfo->nr_frags; in xenvif_get_requests()
485 frags = shinfo->frags; in xenvif_get_requests()
487 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { in xenvif_get_requests()
488 if (unlikely(!txp->size)) { in xenvif_get_requests()
494 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
495 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
498 frag_set_pending_idx(&frags[shinfo->nr_frags], in xenvif_get_requests()
500 ++shinfo->nr_frags; in xenvif_get_requests()
504 if (shinfo->nr_frags) { in xenvif_get_requests()
505 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests()
518 (*copy_ops) = cop - queue->tx_copy_ops; in xenvif_get_requests()
519 (*map_ops) = gop - queue->tx_map_ops; in xenvif_get_requests()
526 if (unlikely(queue->grant_tx_handle[pending_idx] != in xenvif_grant_handle_set()
528 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
533 queue->grant_tx_handle[pending_idx] = handle; in xenvif_grant_handle_set()
539 if (unlikely(queue->grant_tx_handle[pending_idx] == in xenvif_grant_handle_reset()
541 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
546 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; in xenvif_grant_handle_reset()
560 /* If this is non-NULL, we are currently checking the frag_list skb, and in xenvif_tx_check_gop()
564 int nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
566 frag_get_pending_idx(&shinfo->frags[0]) == in xenvif_tx_check_gop()
567 copy_pending_idx(skb, copy_count(skb) - 1); in xenvif_tx_check_gop()
576 newerr = (*gopp_copy)->status; in xenvif_tx_check_gop()
579 if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { in xenvif_tx_check_gop()
582 newerr = (*gopp_copy)->status; in xenvif_tx_check_gop()
586 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
592 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
594 (*gopp_copy)->status, in xenvif_tx_check_gop()
596 (*gopp_copy)->source.u.ref); in xenvif_tx_check_gop()
598 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
609 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); in xenvif_tx_check_gop()
612 newerr = gop_map->status; in xenvif_tx_check_gop()
617 gop_map->handle); in xenvif_tx_check_gop()
637 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
640 gop_map->status, in xenvif_tx_check_gop()
642 gop_map->ref); in xenvif_tx_check_gop()
652 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); in xenvif_tx_check_gop()
662 for (j = 0; j < first_shinfo->nr_frags; j++) { in xenvif_tx_check_gop()
663 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); in xenvif_tx_check_gop()
676 shinfo = skb_shinfo(shinfo->frag_list); in xenvif_tx_check_gop()
677 nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
689 int nr_frags = shinfo->nr_frags; in xenvif_fill_frags()
694 skb_frag_t *frag = shinfo->frags + i; in xenvif_fill_frags()
703 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags()
712 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_fill_frags()
714 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); in xenvif_fill_frags()
715 skb->len += txp->size; in xenvif_fill_frags()
716 skb->data_len += txp->size; in xenvif_fill_frags()
717 skb->truesize += txp->size; in xenvif_fill_frags()
720 get_page(queue->mmap_pages[pending_idx]); in xenvif_fill_frags()
730 RING_IDX cons = queue->tx.req_cons; in xenvif_get_extras()
733 if (unlikely(work_to_do-- <= 0)) { in xenvif_get_extras()
734 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
735 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
736 return -EBADR; in xenvif_get_extras()
739 RING_COPY_REQUEST(&queue->tx, cons, &extra); in xenvif_get_extras()
741 queue->tx.req_cons = ++cons; in xenvif_get_extras()
746 netdev_err(queue->vif->dev, in xenvif_get_extras()
748 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
749 return -EINVAL; in xenvif_get_extras()
752 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); in xenvif_get_extras()
762 if (!gso->u.gso.size) { in xenvif_set_skb_gso()
763 netdev_err(vif->dev, "GSO size must not be zero.\n"); in xenvif_set_skb_gso()
765 return -EINVAL; in xenvif_set_skb_gso()
768 switch (gso->u.gso.type) { in xenvif_set_skb_gso()
770 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso()
773 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso()
776 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); in xenvif_set_skb_gso()
778 return -EINVAL; in xenvif_set_skb_gso()
781 skb_shinfo(skb)->gso_size = gso->u.gso.size; in xenvif_set_skb_gso()
796 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { in checksum_setup()
797 queue->stats.rx_gso_checksum_fixup++; in checksum_setup()
798 skb->ip_summed = CHECKSUM_PARTIAL; in checksum_setup()
802 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ in checksum_setup()
803 if (skb->ip_summed != CHECKSUM_PARTIAL) in checksum_setup()
812 u64 next_credit = queue->credit_window_start + in tx_credit_exceeded()
813 msecs_to_jiffies(queue->credit_usec / 1000); in tx_credit_exceeded()
816 if (timer_pending(&queue->credit_timeout)) { in tx_credit_exceeded()
817 queue->rate_limited = true; in tx_credit_exceeded()
823 queue->credit_window_start = now; in tx_credit_exceeded()
828 if (size > queue->remaining_credit) { in tx_credit_exceeded()
829 mod_timer(&queue->credit_timeout, in tx_credit_exceeded()
831 queue->credit_window_start = next_credit; in tx_credit_exceeded()
832 queue->rate_limited = true; in tx_credit_exceeded()
849 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { in xenvif_mcast_add()
851 netdev_err(vif->dev, in xenvif_mcast_add()
853 return -ENOSPC; in xenvif_mcast_add()
858 return -ENOMEM; in xenvif_mcast_add()
860 ether_addr_copy(mcast->addr, addr); in xenvif_mcast_add()
861 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); in xenvif_mcast_add()
862 vif->fe_mcast_count++; in xenvif_mcast_add()
871 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_del()
872 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_del()
873 --vif->fe_mcast_count; in xenvif_mcast_del()
874 list_del_rcu(&mcast->entry); in xenvif_mcast_del()
886 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_match()
887 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_match()
902 while (!list_empty(&vif->fe_mcast_addr)) { in xenvif_mcast_addr_list_free()
905 mcast = list_first_entry(&vif->fe_mcast_addr, in xenvif_mcast_addr_list_free()
908 --vif->fe_mcast_count; in xenvif_mcast_addr_list_free()
909 list_del(&mcast->entry); in xenvif_mcast_addr_list_free()
923 while (skb_queue_len(&queue->tx_queue) < budget) { in xenvif_tx_build_gops()
926 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; in xenvif_tx_build_gops()
932 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops()
934 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
937 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops()
939 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
943 work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_tx_build_gops()
947 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
949 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
951 /* Credit-based scheduling. */ in xenvif_tx_build_gops()
952 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
956 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
958 work_to_do--; in xenvif_tx_build_gops()
959 queue->tx.req_cons = ++idx; in xenvif_tx_build_gops()
967 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
972 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { in xenvif_tx_build_gops()
975 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; in xenvif_tx_build_gops()
976 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
985 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { in xenvif_tx_build_gops()
988 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; in xenvif_tx_build_gops()
989 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1008 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1016 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", in xenvif_tx_build_gops()
1018 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1022 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) in xenvif_tx_build_gops()
1027 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1033 skb_shinfo(skb)->nr_frags = ret; in xenvif_tx_build_gops()
1034 /* At this point shinfo->nr_frags is in fact the number of in xenvif_tx_build_gops()
1039 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { in xenvif_tx_build_gops()
1040 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1042 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1045 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1049 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1055 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { in xenvif_tx_build_gops()
1057 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; in xenvif_tx_build_gops()
1059 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
1061 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1068 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) { in xenvif_tx_build_gops()
1072 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; in xenvif_tx_build_gops()
1074 switch (extra->u.hash.type) { in xenvif_tx_build_gops()
1091 *(u32 *)extra->u.hash.value, in xenvif_tx_build_gops()
1099 __skb_queue_tail(&queue->tx_queue, skb); in xenvif_tx_build_gops()
1101 queue->tx.req_cons = idx; in xenvif_tx_build_gops()
1108 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1116 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_handle_frag_list()
1118 queue->stats.tx_zerocopy_sent += 2; in xenvif_handle_frag_list()
1119 queue->stats.tx_frag_overflow++; in xenvif_handle_frag_list()
1123 skb->truesize -= skb->data_len; in xenvif_handle_frag_list()
1124 skb->len += nskb->len; in xenvif_handle_frag_list()
1125 skb->data_len += nskb->len; in xenvif_handle_frag_list()
1128 for (i = 0; offset < skb->len; i++) { in xenvif_handle_frag_list()
1136 skb->truesize += skb->data_len; in xenvif_handle_frag_list()
1139 return -ENOMEM; in xenvif_handle_frag_list()
1142 if (offset + PAGE_SIZE < skb->len) in xenvif_handle_frag_list()
1145 len = skb->len - offset; in xenvif_handle_frag_list()
1154 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in xenvif_handle_frag_list()
1156 uarg = skb_shinfo(skb)->destructor_arg; in xenvif_handle_frag_list()
1158 atomic_inc(&queue->inflight_packets); in xenvif_handle_frag_list()
1159 uarg->callback(NULL, uarg, true); in xenvif_handle_frag_list()
1160 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_handle_frag_list()
1163 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); in xenvif_handle_frag_list()
1164 skb_shinfo(skb)->nr_frags = i; in xenvif_handle_frag_list()
1165 skb->truesize += i * PAGE_SIZE; in xenvif_handle_frag_list()
1172 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; in xenvif_tx_submit()
1173 struct gnttab_copy *gop_copy = queue->tx_copy_ops; in xenvif_tx_submit()
1177 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { in xenvif_tx_submit()
1182 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_tx_submit()
1190 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_submit()
1193 skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1194 skb_shinfo(nskb)->nr_frags = 0; in xenvif_tx_submit()
1200 if (txp->flags & XEN_NETTXF_csum_blank) in xenvif_tx_submit()
1201 skb->ip_summed = CHECKSUM_PARTIAL; in xenvif_tx_submit()
1202 else if (txp->flags & XEN_NETTXF_data_validated) in xenvif_tx_submit()
1203 skb->ip_summed = CHECKSUM_UNNECESSARY; in xenvif_tx_submit()
1208 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1212 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1218 /* Copied all the bits from the frag list -- free it. */ in xenvif_tx_submit()
1223 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1224 skb->protocol = eth_type_trans(skb, skb->dev); in xenvif_tx_submit()
1228 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1231 if (skb_shinfo(skb)->destructor_arg) in xenvif_tx_submit()
1253 mss = skb_shinfo(skb)->gso_size; in xenvif_tx_submit()
1256 skb_shinfo(skb)->gso_segs = in xenvif_tx_submit()
1257 DIV_ROUND_UP(skb->len - hdrlen, mss); in xenvif_tx_submit()
1260 queue->stats.rx_bytes += skb->len; in xenvif_tx_submit()
1261 queue->stats.rx_packets++; in xenvif_tx_submit()
1270 if (skb_shinfo(skb)->destructor_arg) { in xenvif_tx_submit()
1272 queue->stats.tx_zerocopy_sent++; in xenvif_tx_submit()
1292 spin_lock_irqsave(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1294 u16 pending_idx = ubuf->desc; in xenvif_zerocopy_callback()
1295 ubuf = (struct ubuf_info_msgzc *) ubuf->ctx; in xenvif_zerocopy_callback()
1296 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback()
1298 index = pending_index(queue->dealloc_prod); in xenvif_zerocopy_callback()
1299 queue->dealloc_ring[index] = pending_idx; in xenvif_zerocopy_callback()
1304 queue->dealloc_prod++; in xenvif_zerocopy_callback()
1306 spin_unlock_irqrestore(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1309 queue->stats.tx_zerocopy_success++; in xenvif_zerocopy_callback()
1311 queue->stats.tx_zerocopy_fail++; in xenvif_zerocopy_callback()
1322 dc = queue->dealloc_cons; in xenvif_tx_dealloc_action()
1323 gop = queue->tx_unmap_ops; in xenvif_tx_dealloc_action()
1327 dp = queue->dealloc_prod; in xenvif_tx_dealloc_action()
1335 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); in xenvif_tx_dealloc_action()
1337 queue->dealloc_ring[pending_index(dc++)]; in xenvif_tx_dealloc_action()
1339 pending_idx_release[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1341 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1342 queue->mmap_pages[pending_idx]; in xenvif_tx_dealloc_action()
1346 queue->grant_tx_handle[pending_idx]); in xenvif_tx_dealloc_action()
1351 } while (dp != queue->dealloc_prod); in xenvif_tx_dealloc_action()
1353 queue->dealloc_cons = dc; in xenvif_tx_dealloc_action()
1355 if (gop - queue->tx_unmap_ops > 0) { in xenvif_tx_dealloc_action()
1357 ret = gnttab_unmap_refs(queue->tx_unmap_ops, in xenvif_tx_dealloc_action()
1359 queue->pages_to_unmap, in xenvif_tx_dealloc_action()
1360 gop - queue->tx_unmap_ops); in xenvif_tx_dealloc_action()
1362 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1363 gop - queue->tx_unmap_ops, ret); in xenvif_tx_dealloc_action()
1364 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { in xenvif_tx_dealloc_action()
1366 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1376 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) in xenvif_tx_dealloc_action()
1396 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); in xenvif_tx_action()
1398 ret = gnttab_map_refs(queue->tx_map_ops, in xenvif_tx_action()
1400 queue->pages_to_map, in xenvif_tx_action()
1405 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", in xenvif_tx_action()
1408 WARN_ON_ONCE(queue->tx_map_ops[i].status == in xenvif_tx_action()
1423 RING_IDX i = queue->tx.rsp_prod_pvt; in _make_tx_response()
1426 resp = RING_GET_RESPONSE(&queue->tx, i); in _make_tx_response()
1427 resp->id = txp->id; in _make_tx_response()
1428 resp->status = status; in _make_tx_response()
1430 while (extra_count-- != 0) in _make_tx_response()
1431 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; in _make_tx_response()
1433 queue->tx.rsp_prod_pvt = ++i; in _make_tx_response()
1440 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); in push_tx_responses()
1442 notify_remote_via_irq(queue->tx_irq); in push_tx_responses()
1452 pending_tx_info = &queue->pending_tx_info[pending_idx]; in xenvif_idx_release()
1454 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_idx_release()
1456 _make_tx_response(queue, &pending_tx_info->req, in xenvif_idx_release()
1457 pending_tx_info->extra_count, status); in xenvif_idx_release()
1463 index = pending_index(queue->pending_prod++); in xenvif_idx_release()
1464 queue->pending_ring[index] = pending_idx; in xenvif_idx_release()
1468 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_idx_release()
1478 spin_lock_irqsave(&queue->response_lock, flags); in make_tx_response()
1483 spin_unlock_irqrestore(&queue->response_lock, flags); in make_tx_response()
1494 queue->grant_tx_handle[pending_idx]); in xenvif_idx_unmap()
1498 &queue->mmap_pages[pending_idx], 1); in xenvif_idx_unmap()
1500 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1513 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) in tx_work_todo()
1521 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo()
1526 if (queue->tx.sring) in xenvif_unmap_frontend_data_rings()
1527 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1528 queue->tx.sring); in xenvif_unmap_frontend_data_rings()
1529 if (queue->rx.sring) in xenvif_unmap_frontend_data_rings()
1530 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1531 queue->rx.sring); in xenvif_unmap_frontend_data_rings()
1544 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1550 rsp_prod = READ_ONCE(txs->rsp_prod); in xenvif_map_frontend_data_rings()
1551 req_prod = READ_ONCE(txs->req_prod); in xenvif_map_frontend_data_rings()
1553 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1555 err = -EIO; in xenvif_map_frontend_data_rings()
1556 if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) in xenvif_map_frontend_data_rings()
1559 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1565 rsp_prod = READ_ONCE(rxs->rsp_prod); in xenvif_map_frontend_data_rings()
1566 req_prod = READ_ONCE(rxs->req_prod); in xenvif_map_frontend_data_rings()
1568 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1570 err = -EIO; in xenvif_map_frontend_data_rings()
1571 if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) in xenvif_map_frontend_data_rings()
1587 !atomic_read(&queue->inflight_packets); in xenvif_dealloc_kthread_should_stop()
1595 wait_event_interruptible(queue->dealloc_wq, in xenvif_dealloc_kthread()
1616 RING_IDX idx = vif->ctrl.rsp_prod_pvt; in make_ctrl_response()
1618 .id = req->id, in make_ctrl_response()
1619 .type = req->type, in make_ctrl_response()
1624 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; in make_ctrl_response()
1625 vif->ctrl.rsp_prod_pvt = ++idx; in make_ctrl_response()
1632 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); in push_ctrl_response()
1634 notify_remote_via_irq(vif->ctrl_irq); in push_ctrl_response()
1643 switch (req->type) { in process_ctrl_request()
1645 status = xenvif_set_hash_alg(vif, req->data[0]); in process_ctrl_request()
1653 status = xenvif_set_hash_flags(vif, req->data[0]); in process_ctrl_request()
1657 status = xenvif_set_hash_key(vif, req->data[0], in process_ctrl_request()
1658 req->data[1]); in process_ctrl_request()
1668 req->data[0]); in process_ctrl_request()
1672 status = xenvif_set_hash_mapping(vif, req->data[0], in process_ctrl_request()
1673 req->data[1], in process_ctrl_request()
1674 req->data[2]); in process_ctrl_request()
1690 req_prod = vif->ctrl.sring->req_prod; in xenvif_ctrl_action()
1691 req_cons = vif->ctrl.req_cons; in xenvif_ctrl_action()
1702 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); in xenvif_ctrl_action()
1708 vif->ctrl.req_cons = req_cons; in xenvif_ctrl_action()
1709 vif->ctrl.sring->req_event = req_cons + 1; in xenvif_ctrl_action()
1715 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) in xenvif_ctrl_work_todo()
1741 return -ENODEV; in netback_init()
1744 * specified a value. in netback_init()
1761 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); in netback_init()
1783 MODULE_ALIAS("xen-backend:vif");