Lines Matching +full:mixed +full:- +full:burst

1 // SPDX-License-Identifier: GPL-2.0-only
38 struct hv_device *dev = net_device_ctx->device_ctx; in netvsc_switch_datapath()
39 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); in netvsc_switch_datapath()
40 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; in netvsc_switch_datapath()
45 net_device_ctx->data_path_is_vf = vf; in netvsc_switch_datapath()
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; in netvsc_switch_datapath()
50 init_pkt->msg.v4_msg.active_dp.active_datapath = in netvsc_switch_datapath()
53 init_pkt->msg.v4_msg.active_dp.active_datapath = in netvsc_switch_datapath()
59 ret = vmbus_sendpacket(dev->channel, init_pkt, in netvsc_switch_datapath()
68 if (ret != -EAGAIN) { in netvsc_switch_datapath()
87 wait_for_completion(&nv_dev->channel_init_wait); in netvsc_switch_datapath()
88 net_device_ctx->data_path_is_vf = vf; in netvsc_switch_datapath()
110 rdev = nvdev->extension; in netvsc_subchan_work()
112 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); in netvsc_subchan_work()
114 netif_device_attach(rdev->ndev); in netvsc_subchan_work()
117 for (i = 1; i < nvdev->num_chn; i++) in netvsc_subchan_work()
118 netif_napi_del(&nvdev->chan_table[i].napi); in netvsc_subchan_work()
120 nvdev->max_chn = 1; in netvsc_subchan_work()
121 nvdev->num_chn = 1; in netvsc_subchan_work()
136 init_waitqueue_head(&net_device->wait_drain); in alloc_net_device()
137 net_device->destroy = false; in alloc_net_device()
138 net_device->tx_disable = true; in alloc_net_device()
140 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; in alloc_net_device()
141 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; in alloc_net_device()
143 init_completion(&net_device->channel_init_wait); in alloc_net_device()
144 init_waitqueue_head(&net_device->subchan_open); in alloc_net_device()
145 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); in alloc_net_device()
156 kfree(nvdev->extension); in free_netvsc_device()
157 vfree(nvdev->recv_buf); in free_netvsc_device()
158 vfree(nvdev->send_buf); in free_netvsc_device()
159 bitmap_free(nvdev->send_section_map); in free_netvsc_device()
162 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); in free_netvsc_device()
163 kfree(nvdev->chan_table[i].recv_buf); in free_netvsc_device()
164 vfree(nvdev->chan_table[i].mrc.slots); in free_netvsc_device()
172 call_rcu(&nvdev->rcu, free_netvsc_device); in free_netvsc_device_rcu()
188 if (net_device->recv_section_cnt) { in netvsc_revoke_recv_buf()
190 revoke_packet = &net_device->revoke_packet; in netvsc_revoke_recv_buf()
193 revoke_packet->hdr.msg_type = in netvsc_revoke_recv_buf()
195 revoke_packet->msg.v1_msg. in netvsc_revoke_recv_buf()
200 ret = vmbus_sendpacket(device->channel, in netvsc_revoke_recv_buf()
210 if (device->channel->rescind) in netvsc_revoke_recv_buf()
221 net_device->recv_section_cnt = 0; in netvsc_revoke_recv_buf()
238 if (net_device->send_section_cnt) { in netvsc_revoke_send_buf()
240 revoke_packet = &net_device->revoke_packet; in netvsc_revoke_send_buf()
243 revoke_packet->hdr.msg_type = in netvsc_revoke_send_buf()
245 revoke_packet->msg.v1_msg.revoke_send_buf.id = in netvsc_revoke_send_buf()
250 ret = vmbus_sendpacket(device->channel, in netvsc_revoke_send_buf()
261 if (device->channel->rescind) in netvsc_revoke_send_buf()
272 net_device->send_section_cnt = 0; in netvsc_revoke_send_buf()
282 if (net_device->recv_buf_gpadl_handle.gpadl_handle) { in netvsc_teardown_recv_gpadl()
283 ret = vmbus_teardown_gpadl(device->channel, in netvsc_teardown_recv_gpadl()
284 &net_device->recv_buf_gpadl_handle); in netvsc_teardown_recv_gpadl()
303 if (net_device->send_buf_gpadl_handle.gpadl_handle) { in netvsc_teardown_send_gpadl()
304 ret = vmbus_teardown_gpadl(device->channel, in netvsc_teardown_send_gpadl()
305 &net_device->send_buf_gpadl_handle); in netvsc_teardown_send_gpadl()
320 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring()
321 int node = cpu_to_node(nvchan->channel->target_cpu); in netvsc_alloc_recv_comp_ring()
324 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); in netvsc_alloc_recv_comp_ring()
325 nvchan->mrc.slots = vzalloc_node(size, node); in netvsc_alloc_recv_comp_ring()
326 if (!nvchan->mrc.slots) in netvsc_alloc_recv_comp_ring()
327 nvchan->mrc.slots = vzalloc(size); in netvsc_alloc_recv_comp_ring()
329 return nvchan->mrc.slots ? 0 : -ENOMEM; in netvsc_alloc_recv_comp_ring()
343 buf_size = device_info->recv_sections * device_info->recv_section_size; in netvsc_init_buf()
347 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) in netvsc_init_buf()
351 net_device->recv_buf = vzalloc(buf_size); in netvsc_init_buf()
352 if (!net_device->recv_buf) { in netvsc_init_buf()
356 ret = -ENOMEM; in netvsc_init_buf()
360 net_device->recv_buf_size = buf_size; in netvsc_init_buf()
367 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, in netvsc_init_buf()
369 &net_device->recv_buf_gpadl_handle); in netvsc_init_buf()
377 init_packet = &net_device->channel_init_pkt; in netvsc_init_buf()
379 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; in netvsc_init_buf()
380 init_packet->msg.v1_msg.send_recv_buf. in netvsc_init_buf()
381 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle; in netvsc_init_buf()
382 init_packet->msg.v1_msg. in netvsc_init_buf()
388 ret = vmbus_sendpacket(device->channel, init_packet, in netvsc_init_buf()
399 wait_for_completion(&net_device->channel_init_wait); in netvsc_init_buf()
402 resp = &init_packet->msg.v1_msg.send_recv_buf_complete; in netvsc_init_buf()
403 if (resp->status != NVSP_STAT_SUCCESS) { in netvsc_init_buf()
405 "Unable to complete receive buffer initialization with NetVsp - status %d\n", in netvsc_init_buf()
406 resp->status); in netvsc_init_buf()
407 ret = -EINVAL; in netvsc_init_buf()
413 resp->num_sections, resp->sections[0].sub_alloc_size, in netvsc_init_buf()
414 resp->sections[0].num_sub_allocs); in netvsc_init_buf()
417 if (resp->num_sections != 1 || resp->sections[0].offset != 0) { in netvsc_init_buf()
418 ret = -EINVAL; in netvsc_init_buf()
422 net_device->recv_section_size = resp->sections[0].sub_alloc_size; in netvsc_init_buf()
423 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; in netvsc_init_buf()
426 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * in netvsc_init_buf()
427 (u64)net_device->recv_section_cnt > (u64)buf_size) { in netvsc_init_buf()
429 net_device->recv_section_size); in netvsc_init_buf()
430 ret = -EINVAL; in netvsc_init_buf()
435 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_init_buf()
437 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL); in netvsc_init_buf()
438 if (nvchan->recv_buf == NULL) { in netvsc_init_buf()
439 ret = -ENOMEM; in netvsc_init_buf()
448 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1; in netvsc_init_buf()
454 buf_size = device_info->send_sections * device_info->send_section_size; in netvsc_init_buf()
457 net_device->send_buf = vzalloc(buf_size); in netvsc_init_buf()
458 if (!net_device->send_buf) { in netvsc_init_buf()
461 ret = -ENOMEM; in netvsc_init_buf()
464 net_device->send_buf_size = buf_size; in netvsc_init_buf()
470 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, in netvsc_init_buf()
472 &net_device->send_buf_gpadl_handle); in netvsc_init_buf()
480 init_packet = &net_device->channel_init_pkt; in netvsc_init_buf()
482 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; in netvsc_init_buf()
483 init_packet->msg.v1_msg.send_send_buf.gpadl_handle = in netvsc_init_buf()
484 net_device->send_buf_gpadl_handle.gpadl_handle; in netvsc_init_buf()
485 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; in netvsc_init_buf()
490 ret = vmbus_sendpacket(device->channel, init_packet, in netvsc_init_buf()
501 wait_for_completion(&net_device->channel_init_wait); in netvsc_init_buf()
504 if (init_packet->msg.v1_msg. in netvsc_init_buf()
507 "initialization with NetVsp - status %d\n", in netvsc_init_buf()
508 init_packet->msg.v1_msg. in netvsc_init_buf()
510 ret = -EINVAL; in netvsc_init_buf()
515 net_device->send_section_size = init_packet->msg. in netvsc_init_buf()
517 if (net_device->send_section_size < NETVSC_MTU_MIN) { in netvsc_init_buf()
519 net_device->send_section_size); in netvsc_init_buf()
520 ret = -EINVAL; in netvsc_init_buf()
525 net_device->send_section_cnt = buf_size / net_device->send_section_size; in netvsc_init_buf()
528 net_device->send_section_size, net_device->send_section_cnt); in netvsc_init_buf()
531 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt, in netvsc_init_buf()
533 if (!net_device->send_section_map) { in netvsc_init_buf()
534 ret = -ENOMEM; in netvsc_init_buf()
560 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; in negotiate_nvsp_ver()
561 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; in negotiate_nvsp_ver()
562 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; in negotiate_nvsp_ver()
566 ret = vmbus_sendpacket(device->channel, init_packet, in negotiate_nvsp_ver()
575 wait_for_completion(&net_device->channel_init_wait); in negotiate_nvsp_ver()
577 if (init_packet->msg.init_msg.init_complete.status != in negotiate_nvsp_ver()
579 return -EINVAL; in negotiate_nvsp_ver()
586 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; in negotiate_nvsp_ver()
587 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; in negotiate_nvsp_ver()
588 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; in negotiate_nvsp_ver()
592 netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n"); in negotiate_nvsp_ver()
594 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; in negotiate_nvsp_ver()
597 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; in negotiate_nvsp_ver()
601 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1; in negotiate_nvsp_ver()
605 ret = vmbus_sendpacket(device->channel, init_packet, in negotiate_nvsp_ver()
626 init_packet = &net_device->channel_init_pkt; in netvsc_connect_vsp()
629 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) in netvsc_connect_vsp()
632 net_device->nvsp_version = ver_list[i]; in netvsc_connect_vsp()
637 ret = -EPROTO; in netvsc_connect_vsp()
641 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) { in netvsc_connect_vsp()
643 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61); in netvsc_connect_vsp()
644 ret = -EPROTO; in netvsc_connect_vsp()
648 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); in netvsc_connect_vsp()
653 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) in netvsc_connect_vsp()
658 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; in netvsc_connect_vsp()
659 init_packet->msg.v1_msg. in netvsc_connect_vsp()
662 init_packet->msg.v1_msg. in netvsc_connect_vsp()
669 ret = vmbus_sendpacket(device->channel, init_packet, in netvsc_connect_vsp()
684 * netvsc_device_remove - Callback when the root bus device is removed
691 = rtnl_dereference(net_device_ctx->nvdev); in netvsc_device_remove()
695 * Revoke receive buffer. If host is pre-Win2016 then tear down in netvsc_device_remove()
706 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); in netvsc_device_remove()
709 for (i = 0; i < net_device->num_chn; i++) { in netvsc_device_remove()
712 if (i < ndev->real_num_rx_queues) in netvsc_device_remove()
713 napi_disable(&net_device->chan_table[i].napi); in netvsc_device_remove()
715 netif_napi_del(&net_device->chan_table[i].napi); in netvsc_device_remove()
725 vmbus_close(device->channel); in netvsc_device_remove()
746 sync_change_bit(index, net_device->send_section_map); in netvsc_free_send_slot()
761 cmd_rqst = channel->request_addr_callback(channel, desc->trans_id); in netvsc_send_tx_complete()
763 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); in netvsc_send_tx_complete()
772 = (struct hv_netvsc_packet *)skb->cb; in netvsc_send_tx_complete()
773 u32 send_index = packet->send_buf_index; in netvsc_send_tx_complete()
778 q_idx = packet->q_idx; in netvsc_send_tx_complete()
780 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete()
782 u64_stats_update_begin(&tx_stats->syncp); in netvsc_send_tx_complete()
783 tx_stats->packets += packet->total_packets; in netvsc_send_tx_complete()
784 tx_stats->bytes += packet->total_bytes; in netvsc_send_tx_complete()
785 u64_stats_update_end(&tx_stats->syncp); in netvsc_send_tx_complete()
787 netvsc_dma_unmap(ndev_ctx->device_ctx, packet); in netvsc_send_tx_complete()
792 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete()
794 if (unlikely(net_device->destroy)) { in netvsc_send_tx_complete()
796 wake_up(&net_device->wait_drain); in netvsc_send_tx_complete()
800 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && in netvsc_send_tx_complete()
801 (hv_get_avail_to_write_percent(&channel->outbound) > in netvsc_send_tx_complete()
804 ndev_ctx->eth_stats.wake_queue++; in netvsc_send_tx_complete()
823 cmd_rqst = incoming_channel->request_addr_callback(incoming_channel, in netvsc_send_completion()
824 desc->trans_id); in netvsc_send_completion()
826 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); in netvsc_send_completion()
831 switch (pkt_rqst->hdr.msg_type) { in netvsc_send_completion()
833 complete(&net_device->channel_init_wait); in netvsc_send_completion()
849 switch (nvsp_packet->hdr.msg_type) { in netvsc_send_completion()
899 status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status; in netvsc_send_completion()
911 nvsp_packet->hdr.msg_type); in netvsc_send_completion()
916 memcpy(&net_device->channel_init_pkt, nvsp_packet, in netvsc_send_completion()
918 complete(&net_device->channel_init_wait); in netvsc_send_completion()
923 unsigned long *map_addr = net_device->send_section_map; in netvsc_get_next_send_section()
926 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { in netvsc_get_next_send_section()
942 char *start = net_device->send_buf; in netvsc_copy_to_send_buf()
943 char *dest = start + (section_index * net_device->send_section_size) in netvsc_copy_to_send_buf()
947 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : in netvsc_copy_to_send_buf()
948 packet->page_buf_cnt; in netvsc_copy_to_send_buf()
952 remain = packet->total_data_buflen & (net_device->pkt_align - 1); in netvsc_copy_to_send_buf()
954 padding = net_device->pkt_align - remain; in netvsc_copy_to_send_buf()
955 rndis_msg->msg_len += padding; in netvsc_copy_to_send_buf()
956 packet->total_data_buflen += padding; in netvsc_copy_to_send_buf()
980 if (!packet->dma_range) in netvsc_dma_unmap()
983 for (i = 0; i < packet->page_buf_cnt; i++) in netvsc_dma_unmap()
984 dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma, in netvsc_dma_unmap()
985 packet->dma_range[i].mapping_size, in netvsc_dma_unmap()
988 kfree(packet->dma_range); in netvsc_dma_unmap()
991 /* netvsc_dma_map - Map swiotlb bounce buffer with data page of
1004 * len that may be non-zero, even for entries in the middle of the
1013 u32 page_count = packet->page_buf_cnt; in netvsc_dma_map()
1020 packet->dma_range = kcalloc(page_count, in netvsc_dma_map()
1021 sizeof(*packet->dma_range), in netvsc_dma_map()
1023 if (!packet->dma_range) in netvsc_dma_map()
1024 return -ENOMEM; in netvsc_dma_map()
1031 dma = dma_map_single(&hv_dev->device, src, len, in netvsc_dma_map()
1033 if (dma_mapping_error(&hv_dev->device, dma)) { in netvsc_dma_map()
1034 kfree(packet->dma_range); in netvsc_dma_map()
1035 return -ENOMEM; in netvsc_dma_map()
1041 packet->dma_range[i].dma = dma; in netvsc_dma_map()
1042 packet->dma_range[i].mapping_size = len; in netvsc_dma_map()
1060 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt()
1061 struct vmbus_channel *out_channel = nvchan->channel; in netvsc_send_pkt()
1064 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt()
1067 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound); in netvsc_send_pkt()
1072 rpkt->channel_type = 0; /* 0 is RMC_DATA */ in netvsc_send_pkt()
1074 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ in netvsc_send_pkt()
1076 rpkt->send_buf_section_index = packet->send_buf_index; in netvsc_send_pkt()
1077 if (packet->send_buf_index == NETVSC_INVALID_INDEX) in netvsc_send_pkt()
1078 rpkt->send_buf_section_size = 0; in netvsc_send_pkt()
1080 rpkt->send_buf_section_size = packet->total_data_buflen; in netvsc_send_pkt()
1084 if (out_channel->rescind) in netvsc_send_pkt()
1085 return -ENODEV; in netvsc_send_pkt()
1089 packet->dma_range = NULL; in netvsc_send_pkt()
1090 if (packet->page_buf_cnt) { in netvsc_send_pkt()
1091 if (packet->cp_partial) in netvsc_send_pkt()
1092 pb += packet->rmsg_pgcnt; in netvsc_send_pkt()
1094 ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); in netvsc_send_pkt()
1096 ret = -EAGAIN; in netvsc_send_pkt()
1101 pb, packet->page_buf_cnt, in netvsc_send_pkt()
1106 netvsc_dma_unmap(ndev_ctx->device_ctx, packet); in netvsc_send_pkt()
1116 atomic_inc_return(&nvchan->queue_sends); in netvsc_send_pkt()
1120 ndev_ctx->eth_stats.stop_queue++; in netvsc_send_pkt()
1122 } else if (ret == -EAGAIN) { in netvsc_send_pkt()
1124 ndev_ctx->eth_stats.stop_queue++; in netvsc_send_pkt()
1128 packet->page_buf_cnt, packet->total_data_buflen, in netvsc_send_pkt()
1133 atomic_read(&nvchan->queue_sends) < 1 && in netvsc_send_pkt()
1134 !net_device->tx_disable) { in netvsc_send_pkt()
1136 ndev_ctx->eth_stats.wake_queue++; in netvsc_send_pkt()
1137 if (ret == -EAGAIN) in netvsc_send_pkt()
1138 ret = -ENOSPC; in netvsc_send_pkt()
1149 *msd_skb = msdp->skb; in move_pkt_msd()
1150 *msd_send = msdp->pkt; in move_pkt_msd()
1151 msdp->skb = NULL; in move_pkt_msd()
1152 msdp->pkt = NULL; in move_pkt_msd()
1153 msdp->count = 0; in move_pkt_msd()
1160 * For small, non-LSO packets we copy the packet to a send buffer
1161 * which is pre-registered with the Hyper-V side. This enables the
1166 * a burst of packets, keep on copying into the buffer until it is
1167 * full or we are done collecting a burst. If there is an existing
1186 = rcu_dereference_bh(ndev_ctx->nvdev); in netvsc_send()
1187 struct hv_device *device = ndev_ctx->device_ctx; in netvsc_send()
1190 u32 pktlen = packet->total_data_buflen, msd_len = 0; in netvsc_send()
1198 if (unlikely(!net_device || net_device->destroy)) in netvsc_send()
1199 return -ENODEV; in netvsc_send()
1201 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
1202 packet->send_buf_index = NETVSC_INVALID_INDEX; in netvsc_send()
1203 packet->cp_partial = false; in netvsc_send()
1206 * msd (Multi-Send Data) field which may be changed during data packet in netvsc_send()
1213 msdp = &nvchan->msd; in netvsc_send()
1214 if (msdp->pkt) in netvsc_send()
1215 msd_len = msdp->pkt->total_data_buflen; in netvsc_send()
1217 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; in netvsc_send()
1218 if (try_batch && msd_len + pktlen + net_device->pkt_align < in netvsc_send()
1219 net_device->send_section_size) { in netvsc_send()
1220 section_index = msdp->pkt->send_buf_index; in netvsc_send()
1222 } else if (try_batch && msd_len + packet->rmsg_size < in netvsc_send()
1223 net_device->send_section_size) { in netvsc_send()
1224 section_index = msdp->pkt->send_buf_index; in netvsc_send()
1225 packet->cp_partial = true; in netvsc_send()
1227 } else if (pktlen + net_device->pkt_align < in netvsc_send()
1228 net_device->send_section_size) { in netvsc_send()
1231 ++ndev_ctx->eth_stats.tx_send_full; in netvsc_send()
1239 * and not doing mixed modes send and not flow blocked in netvsc_send()
1242 !packet->cp_partial && in netvsc_send()
1243 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); in netvsc_send()
1250 packet->send_buf_index = section_index; in netvsc_send()
1252 if (packet->cp_partial) { in netvsc_send()
1253 packet->page_buf_cnt -= packet->rmsg_pgcnt; in netvsc_send()
1254 packet->total_data_buflen = msd_len + packet->rmsg_size; in netvsc_send()
1256 packet->page_buf_cnt = 0; in netvsc_send()
1257 packet->total_data_buflen += msd_len; in netvsc_send()
1260 if (msdp->pkt) { in netvsc_send()
1261 packet->total_packets += msdp->pkt->total_packets; in netvsc_send()
1262 packet->total_bytes += msdp->pkt->total_bytes; in netvsc_send()
1265 if (msdp->skb) in netvsc_send()
1266 dev_consume_skb_any(msdp->skb); in netvsc_send()
1269 msdp->skb = skb; in netvsc_send()
1270 msdp->pkt = packet; in netvsc_send()
1271 msdp->count++; in netvsc_send()
1274 msdp->skb = NULL; in netvsc_send()
1275 msdp->pkt = NULL; in netvsc_send()
1276 msdp->count = 0; in netvsc_send()
1289 msd_send->send_buf_index); in netvsc_send()
1308 struct multi_recv_comp *mrc = &nvchan->mrc; in send_recv_completions()
1318 while (mrc->first != mrc->next) { in send_recv_completions()
1320 = mrc->slots + mrc->first; in send_recv_completions()
1322 msg.status = rcd->status; in send_recv_completions()
1323 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), in send_recv_completions()
1324 rcd->tid, VM_PKT_COMP, 0); in send_recv_completions()
1328 ++ndev_ctx->eth_stats.rx_comp_busy; in send_recv_completions()
1332 if (++mrc->first == nvdev->recv_completion_cnt) in send_recv_completions()
1333 mrc->first = 0; in send_recv_completions()
1337 if (unlikely(nvdev->destroy)) in send_recv_completions()
1338 wake_up(&nvdev->wait_drain); in send_recv_completions()
1348 u32 count = nvdev->recv_completion_cnt; in recv_comp_slot_avail()
1350 if (mrc->next >= mrc->first) in recv_comp_slot_avail()
1351 *filled = mrc->next - mrc->first; in recv_comp_slot_avail()
1353 *filled = (count - mrc->first) + mrc->next; in recv_comp_slot_avail()
1355 *avail = count - *filled - 1; in recv_comp_slot_avail()
1363 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; in enq_receive_complete()
1364 struct multi_recv_comp *mrc = &nvchan->mrc; in enq_receive_complete()
1381 rcd = mrc->slots + mrc->next; in enq_receive_complete()
1382 rcd->tid = tid; in enq_receive_complete()
1383 rcd->status = status; in enq_receive_complete()
1385 if (++mrc->next == nvdev->recv_completion_cnt) in enq_receive_complete()
1386 mrc->next = 0; in enq_receive_complete()
1395 struct vmbus_channel *channel = nvchan->channel; in netvsc_receive()
1400 u16 q_idx = channel->offermsg.offer.sub_channel_index; in netvsc_receive()
1401 char *recv_buf = net_device->recv_buf; in netvsc_receive()
1415 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { in netvsc_receive()
1418 nvsp->hdr.msg_type); in netvsc_receive()
1423 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) { in netvsc_receive()
1426 desc->offset8 << 3); in netvsc_receive()
1430 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { in netvsc_receive()
1432 "Invalid xfer page set id - expecting %x got %x\n", in netvsc_receive()
1434 vmxferpage_packet->xfer_pageset_id); in netvsc_receive()
1438 count = vmxferpage_packet->range_cnt; in netvsc_receive()
1441 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) { in netvsc_receive()
1450 u32 offset = vmxferpage_packet->ranges[i].byte_offset; in netvsc_receive()
1451 u32 buflen = vmxferpage_packet->ranges[i].byte_count; in netvsc_receive()
1455 if (unlikely(offset > net_device->recv_buf_size || in netvsc_receive()
1456 buflen > net_device->recv_buf_size - offset)) { in netvsc_receive()
1457 nvchan->rsc.cnt = 0; in netvsc_receive()
1466 /* We're going to copy (sections of) the packet into nvchan->recv_buf; in netvsc_receive()
1467 * make sure that nvchan->recv_buf is large enough to hold the packet. in netvsc_receive()
1469 if (unlikely(buflen > net_device->recv_section_size)) { in netvsc_receive()
1470 nvchan->rsc.cnt = 0; in netvsc_receive()
1474 buflen, net_device->recv_section_size); in netvsc_receive()
1481 nvchan->rsc.is_last = (i == count - 1); in netvsc_receive()
1491 nvchan->rsc.cnt = 0; in netvsc_receive()
1497 vmxferpage_packet->d.trans_id, status); in netvsc_receive()
1518 count = nvmsg->msg.v5_msg.send_table.count; in netvsc_send_table()
1519 offset = nvmsg->msg.v5_msg.send_table.offset; in netvsc_send_table()
1522 netdev_err(ndev, "Received wrong send-table size:%u\n", count); in netvsc_send_table()
1529 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 && in netvsc_send_table()
1536 if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) { in netvsc_send_table()
1537 netdev_err(ndev, "Received send-table offset too big:%u\n", in netvsc_send_table()
1545 net_device_ctx->tx_table[i] = tab[i]; in netvsc_send_table()
1561 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; in netvsc_send_vf()
1562 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; in netvsc_send_vf()
1564 if (net_device_ctx->vf_alloc) in netvsc_send_vf()
1565 complete(&net_device_ctx->vf_add); in netvsc_send_vf()
1568 net_device_ctx->vf_serial, in netvsc_send_vf()
1569 net_device_ctx->vf_alloc ? "added" : "removed"); in netvsc_send_vf()
1585 switch (nvmsg->hdr.msg_type) { in netvsc_receive_inband()
1606 struct vmbus_channel *channel = nvchan->channel; in netvsc_process_raw_pkt()
1611 switch (desc->type) { in netvsc_process_raw_pkt()
1625 desc->type, desc->trans_id); in netvsc_process_raw_pkt()
1634 struct vmbus_channel *primary = channel->primary_channel; in netvsc_channel_to_device()
1636 return primary ? primary->device_obj : channel->device_obj; in netvsc_channel_to_device()
1647 struct netvsc_device *net_device = nvchan->net_device; in netvsc_poll()
1648 struct vmbus_channel *channel = nvchan->channel; in netvsc_poll()
1655 if (!nvchan->desc) in netvsc_poll()
1656 nvchan->desc = hv_pkt_iter_first(channel); in netvsc_poll()
1658 nvchan->xdp_flush = false; in netvsc_poll()
1660 while (nvchan->desc && work_done < budget) { in netvsc_poll()
1662 ndev, nvchan->desc, budget); in netvsc_poll()
1663 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); in netvsc_poll()
1666 if (nvchan->xdp_flush) in netvsc_poll()
1674 * then re-enable host interrupts in netvsc_poll()
1680 (ret || hv_end_read(&channel->inbound)) && in netvsc_poll()
1682 hv_begin_read(&channel->inbound); in netvsc_poll()
1696 struct vmbus_channel *channel = nvchan->channel; in netvsc_channel_cb()
1697 struct hv_ring_buffer_info *rbi = &channel->inbound; in netvsc_channel_cb()
1700 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); in netvsc_channel_cb()
1702 if (napi_schedule_prep(&nvchan->napi)) { in netvsc_channel_cb()
1706 __napi_schedule_irqoff(&nvchan->napi); in netvsc_channel_cb()
1711 * netvsc_device_add - Callback when the device belonging to this
1724 return ERR_PTR(-ENOMEM); in netvsc_device_add()
1727 net_device_ctx->tx_table[i] = 0; in netvsc_device_add()
1732 set_channel_read_mode(device->channel, HV_CALL_ISR); in netvsc_device_add()
1742 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_device_add()
1744 nvchan->channel = device->channel; in netvsc_device_add()
1745 nvchan->net_device = net_device; in netvsc_device_add()
1746 u64_stats_init(&nvchan->tx_stats.syncp); in netvsc_device_add()
1747 u64_stats_init(&nvchan->rx_stats.syncp); in netvsc_device_add()
1749 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0); in netvsc_device_add()
1756 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, in netvsc_device_add()
1766 netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll); in netvsc_device_add()
1769 device->channel->next_request_id_callback = vmbus_next_request_id; in netvsc_device_add()
1770 device->channel->request_addr_callback = vmbus_request_addr; in netvsc_device_add()
1771 device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes); in netvsc_device_add()
1772 device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE; in netvsc_device_add()
1774 ret = vmbus_open(device->channel, netvsc_ring_bytes, in netvsc_device_add()
1776 netvsc_channel_cb, net_device->chan_table); in netvsc_device_add()
1786 napi_enable(&net_device->chan_table[0].napi); in netvsc_device_add()
1792 "unable to connect to NetVSP - %d\n", ret); in netvsc_device_add()
1799 rcu_assign_pointer(net_device_ctx->nvdev, net_device); in netvsc_device_add()
1804 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); in netvsc_device_add()
1805 napi_disable(&net_device->chan_table[0].napi); in netvsc_device_add()
1808 vmbus_close(device->channel); in netvsc_device_add()
1811 netif_napi_del(&net_device->chan_table[0].napi); in netvsc_device_add()
1814 free_netvsc_device(&net_device->rcu); in netvsc_device_add()