Lines Matching defs:priv

43 static int gve_verify_driver_compatibility(struct gve_priv *priv)
49 driver_info = dma_alloc_coherent(&priv->pdev->dev,
72 err = gve_adminq_verify_driver_compatibility(priv,
80 dma_free_coherent(&priv->pdev->dev,
90 struct gve_priv *priv = netdev_priv(dev);
92 if (!gve_is_gqi(priv))
100 struct gve_priv *priv = netdev_priv(dev);
102 if (gve_is_gqi(priv))
110 struct gve_priv *priv = netdev_priv(dev);
116 num_tx_queues = gve_num_tx_queues(priv);
117 if (priv->rx) {
118 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
121 u64_stats_fetch_begin(&priv->rx[ring].statss);
122 packets = priv->rx[ring].rpackets;
123 bytes = priv->rx[ring].rbytes;
124 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
130 if (priv->tx) {
134 u64_stats_fetch_begin(&priv->tx[ring].statss);
135 packets = priv->tx[ring].pkt_done;
136 bytes = priv->tx[ring].bytes_done;
137 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
145 static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
147 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
150 if (!priv->max_flow_rules)
157 dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
165 dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
178 static void gve_free_flow_rule_caches(struct gve_priv *priv)
180 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
188 static int gve_alloc_rss_config_cache(struct gve_priv *priv)
190 struct gve_rss_config *rss_config = &priv->rss_config;
192 if (!priv->cache_rss_config)
195 rss_config->hash_key = kcalloc(priv->rss_key_size,
201 rss_config->hash_lut = kcalloc(priv->rss_lut_size,
215 static void gve_free_rss_config_cache(struct gve_priv *priv)
217 struct gve_rss_config *rss_config = &priv->rss_config;
225 static int gve_alloc_counter_array(struct gve_priv *priv)
227 priv->counter_array =
228 dma_alloc_coherent(&priv->pdev->dev,
229 priv->num_event_counters *
230 sizeof(*priv->counter_array),
231 &priv->counter_array_bus, GFP_KERNEL);
232 if (!priv->counter_array)
238 static void gve_free_counter_array(struct gve_priv *priv)
240 if (!priv->counter_array)
243 dma_free_coherent(&priv->pdev->dev,
244 priv->num_event_counters *
245 sizeof(*priv->counter_array),
246 priv->counter_array, priv->counter_array_bus);
247 priv->counter_array = NULL;
253 struct gve_priv *priv = container_of(work, struct gve_priv,
255 if (gve_get_do_report_stats(priv)) {
256 gve_handle_report_stats(priv);
257 gve_clear_do_report_stats(priv);
261 static void gve_stats_report_schedule(struct gve_priv *priv)
263 if (!gve_get_probe_in_progress(priv) &&
264 !gve_get_reset_in_progress(priv)) {
265 gve_set_do_report_stats(priv);
266 queue_work(priv->gve_wq, &priv->stats_report_task);
272 struct gve_priv *priv = timer_container_of(priv, t,
275 mod_timer(&priv->stats_report_timer,
277 msecs_to_jiffies(priv->stats_report_timer_period)));
278 gve_stats_report_schedule(priv);
281 static int gve_alloc_stats_report(struct gve_priv *priv)
286 gve_num_tx_queues(priv);
288 priv->rx_cfg.num_queues;
289 priv->stats_report_len = struct_size(priv->stats_report, stats,
291 priv->stats_report =
292 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
293 &priv->stats_report_bus, GFP_KERNEL);
294 if (!priv->stats_report)
297 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
298 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
302 static void gve_free_stats_report(struct gve_priv *priv)
304 if (!priv->stats_report)
307 timer_delete_sync(&priv->stats_report_timer);
308 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
309 priv->stats_report, priv->stats_report_bus);
310 priv->stats_report = NULL;
315 struct gve_priv *priv = arg;
317 queue_work(priv->gve_wq, &priv->service_task);
324 struct gve_priv *priv = block->priv;
326 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
340 static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
357 struct gve_priv *priv;
361 priv = block->priv;
364 if (block->tx->q_num < priv->tx_cfg.num_queues)
379 if (priv->xdp_prog)
391 irq_doorbell = gve_irq_doorbell(priv, block);
400 reschedule |= gve_tx_clean_pending(priv, block->tx);
414 struct gve_priv *priv = block->priv;
419 if (block->tx->q_num < priv->tx_cfg.num_queues)
434 if (priv->xdp_prog)
443 if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
464 gve_write_irq_doorbell_dqo(priv, block,
471 static const struct cpumask *gve_get_node_mask(struct gve_priv *priv)
473 if (priv->numa_node == NUMA_NO_NODE)
476 return cpumask_of_node(priv->numa_node);
479 static int gve_alloc_notify_blocks(struct gve_priv *priv)
481 int num_vecs_requested = priv->num_ntfy_blks + 1;
488 priv->msix_vectors = kvcalloc(num_vecs_requested,
489 sizeof(*priv->msix_vectors), GFP_KERNEL);
490 if (!priv->msix_vectors)
493 priv->msix_vectors[i].entry = i;
494 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
497 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
507 priv->num_ntfy_blks = new_num_ntfy_blks;
508 priv->mgmt_msix_idx = priv->num_ntfy_blks;
509 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
511 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
513 dev_err(&priv->pdev->dev,
515 vecs_enabled, priv->tx_cfg.max_queues,
516 priv->rx_cfg.max_queues);
517 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
518 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
519 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
520 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
524 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
525 pci_name(priv->pdev));
526 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
527 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
529 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
532 priv->irq_db_indices =
533 dma_alloc_coherent(&priv->pdev->dev,
534 priv->num_ntfy_blks *
535 sizeof(*priv->irq_db_indices),
536 &priv->irq_db_indices_bus, GFP_KERNEL);
537 if (!priv->irq_db_indices) {
542 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
543 sizeof(*priv->ntfy_blocks), GFP_KERNEL);
544 if (!priv->ntfy_blocks) {
550 node_mask = gve_get_node_mask(priv);
552 for (i = 0; i < priv->num_ntfy_blks; i++) {
553 struct gve_notify_block *block = &priv->ntfy_blocks[i];
557 i, pci_name(priv->pdev));
558 block->priv = priv;
559 err = request_irq(priv->msix_vectors[msix_idx].vector,
560 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
563 dev_err(&priv->pdev->dev,
567 block->irq = priv->msix_vectors[msix_idx].vector;
570 block->irq_db_index = &priv->irq_db_indices[i].index;
577 if (cur_cpu >= nr_cpu_ids || (i + 1) == priv->tx_cfg.max_queues)
583 struct gve_notify_block *block = &priv->ntfy_blocks[j];
586 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
588 free_irq(priv->msix_vectors[msix_idx].vector, block);
591 kvfree(priv->ntfy_blocks);
592 priv->ntfy_blocks = NULL;
594 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
595 sizeof(*priv->irq_db_indices),
596 priv->irq_db_indices, priv->irq_db_indices_bus);
597 priv->irq_db_indices = NULL;
599 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
601 pci_disable_msix(priv->pdev);
603 kvfree(priv->msix_vectors);
604 priv->msix_vectors = NULL;
608 static void gve_free_notify_blocks(struct gve_priv *priv)
612 if (!priv->msix_vectors)
616 for (i = 0; i < priv->num_ntfy_blks; i++) {
617 struct gve_notify_block *block = &priv->ntfy_blocks[i];
620 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
622 free_irq(priv->msix_vectors[msix_idx].vector, block);
625 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
626 kvfree(priv->ntfy_blocks);
627 priv->ntfy_blocks = NULL;
628 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
629 sizeof(*priv->irq_db_indices),
630 priv->irq_db_indices, priv->irq_db_indices_bus);
631 priv->irq_db_indices = NULL;
632 pci_disable_msix(priv->pdev);
633 kvfree(priv->msix_vectors);
634 priv->msix_vectors = NULL;
637 static int gve_setup_device_resources(struct gve_priv *priv)
641 err = gve_alloc_flow_rule_caches(priv);
644 err = gve_alloc_rss_config_cache(priv);
647 err = gve_alloc_counter_array(priv);
650 err = gve_init_clock(priv);
653 err = gve_alloc_notify_blocks(priv);
656 err = gve_alloc_stats_report(priv);
659 err = gve_adminq_configure_device_resources(priv,
660 priv->counter_array_bus,
661 priv->num_event_counters,
662 priv->irq_db_indices_bus,
663 priv->num_ntfy_blks);
665 dev_err(&priv->pdev->dev,
671 if (!gve_is_gqi(priv)) {
672 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
674 if (!priv->ptype_lut_dqo) {
678 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
680 dev_err(&priv->pdev->dev,
686 err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
688 dev_err(&priv->pdev->dev, "Failed to init RSS config");
692 err = gve_adminq_report_stats(priv, priv->stats_report_len,
693 priv->stats_report_bus,
696 dev_err(&priv->pdev->dev,
698 gve_set_device_resources_ok(priv);
702 kvfree(priv->ptype_lut_dqo);
703 priv->ptype_lut_dqo = NULL;
705 gve_free_stats_report(priv);
707 gve_free_notify_blocks(priv);
709 gve_teardown_clock(priv);
711 gve_free_counter_array(priv);
713 gve_free_rss_config_cache(priv);
715 gve_free_flow_rule_caches(priv);
720 static void gve_trigger_reset(struct gve_priv *priv);
722 static void gve_teardown_device_resources(struct gve_priv *priv)
727 if (gve_get_device_resources_ok(priv)) {
728 err = gve_flow_rules_reset(priv);
730 dev_err(&priv->pdev->dev,
732 gve_trigger_reset(priv);
735 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
737 dev_err(&priv->pdev->dev,
739 gve_trigger_reset(priv);
741 err = gve_adminq_deconfigure_device_resources(priv);
743 dev_err(&priv->pdev->dev,
746 gve_trigger_reset(priv);
750 kvfree(priv->ptype_lut_dqo);
751 priv->ptype_lut_dqo = NULL;
753 gve_free_flow_rule_caches(priv);
754 gve_free_rss_config_cache(priv);
755 gve_free_counter_array(priv);
756 gve_free_notify_blocks(priv);
757 gve_free_stats_report(priv);
758 gve_teardown_clock(priv);
759 gve_clear_device_resources_ok(priv);
762 static int gve_unregister_qpl(struct gve_priv *priv,
770 err = gve_adminq_unregister_page_list(priv, qpl->id);
772 netif_err(priv, drv, priv->dev,
778 priv->num_registered_pages -= qpl->num_entries;
782 static int gve_register_qpl(struct gve_priv *priv,
793 if (pages + priv->num_registered_pages > priv->max_registered_pages) {
794 netif_err(priv, drv, priv->dev,
796 pages + priv->num_registered_pages,
797 priv->max_registered_pages);
801 err = gve_adminq_register_page_list(priv, qpl);
803 netif_err(priv, drv, priv->dev,
809 priv->num_registered_pages += pages;
813 static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
815 struct gve_tx_ring *tx = &priv->tx[idx];
817 if (gve_is_gqi(priv))
823 static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
825 struct gve_rx_ring *rx = &priv->rx[idx];
827 if (gve_is_gqi(priv))
833 static int gve_register_qpls(struct gve_priv *priv)
839 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
840 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
843 err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
849 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
857 static int gve_unregister_qpls(struct gve_priv *priv)
863 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
864 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
867 err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
874 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
882 static int gve_create_rings(struct gve_priv *priv)
884 int num_tx_queues = gve_num_tx_queues(priv);
888 err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
890 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
897 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
900 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
902 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
903 priv->rx_cfg.num_queues);
909 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
910 priv->rx_cfg.num_queues);
912 if (gve_is_gqi(priv)) {
919 for (i = 0; i < priv->rx_cfg.num_queues; i++)
920 gve_rx_write_doorbell(priv, &priv->rx[i]);
922 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
924 gve_rx_post_buffers_dqo(&priv->rx[i]);
931 static void init_xdp_sync_stats(struct gve_priv *priv)
933 int start_id = gve_xdp_tx_start_queue_id(priv);
937 for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
938 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
940 u64_stats_init(&priv->tx[i].statss);
941 priv->tx[i].ntfy_id = ntfy_idx;
945 static void gve_init_sync_stats(struct gve_priv *priv)
949 for (i = 0; i < priv->tx_cfg.num_queues; i++)
950 u64_stats_init(&priv->tx[i].statss);
953 init_xdp_sync_stats(priv);
955 for (i = 0; i < priv->rx_cfg.num_queues; i++)
956 u64_stats_init(&priv->rx[i].statss);
959 static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
962 cfg->qcfg = &priv->tx_cfg;
963 cfg->raw_addressing = !gve_is_qpl(priv);
964 cfg->ring_size = priv->tx_desc_cnt;
966 cfg->tx = priv->tx;
969 static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
973 if (!priv->tx)
977 if (gve_is_gqi(priv))
978 gve_tx_stop_ring_gqi(priv, i);
980 gve_tx_stop_ring_dqo(priv, i);
984 static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
989 if (gve_is_gqi(priv))
990 gve_tx_start_ring_gqi(priv, i);
992 gve_tx_start_ring_dqo(priv, i);
996 static int gve_queues_mem_alloc(struct gve_priv *priv,
1002 if (gve_is_gqi(priv))
1003 err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
1005 err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
1009 if (gve_is_gqi(priv))
1010 err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
1012 err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
1019 if (gve_is_gqi(priv))
1020 gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
1022 gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
1026 static int gve_destroy_rings(struct gve_priv *priv)
1028 int num_tx_queues = gve_num_tx_queues(priv);
1031 err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
1033 netif_err(priv, drv, priv->dev,
1038 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
1039 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
1041 netif_err(priv, drv, priv->dev,
1046 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
1050 static void gve_queues_mem_free(struct gve_priv *priv,
1054 if (gve_is_gqi(priv)) {
1055 gve_tx_free_rings_gqi(priv, tx_cfg);
1056 gve_rx_free_rings_gqi(priv, rx_cfg);
1058 gve_tx_free_rings_dqo(priv, tx_cfg);
1059 gve_rx_free_rings_dqo(priv, rx_cfg);
1063 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1067 *page = alloc_pages_node(priv->numa_node, gfp_flags, 0);
1069 priv->page_alloc_fail++;
1074 priv->dma_mapping_error++;
1081 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1103 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
1105 gve_qpl_dma_dir(priv, id), GFP_KERNEL);
1114 gve_free_queue_page_list(priv, qpl, id);
1127 void gve_free_queue_page_list(struct gve_priv *priv,
1141 gve_free_page(&priv->pdev->dev, qpl->pages[i],
1142 qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
1157 void gve_schedule_reset(struct gve_priv *priv)
1159 gve_set_do_reset(priv);
1160 queue_work(priv->gve_wq, &priv->service_task);
1163 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
1164 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1165 static void gve_turndown(struct gve_priv *priv);
1166 static void gve_turnup(struct gve_priv *priv);
1168 static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
1172 if (!priv->rx)
1175 rx = &priv->rx[qid];
1180 if (!priv->tx)
1182 priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
1185 static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
1192 rx = &priv->rx[qid];
1196 gve_unreg_xsk_pool(priv, qid);
1202 tx_qid = gve_xdp_tx_queue_id(priv, qid);
1203 priv->tx[tx_qid].xsk_pool = pool;
1208 static void gve_unreg_xdp_info(struct gve_priv *priv)
1212 if (!priv->tx_cfg.num_xdp_queues || !priv->rx)
1215 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1216 struct gve_rx_ring *rx = &priv->rx[i];
1221 gve_unreg_xsk_pool(priv, i);
1225 static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
1227 if (!test_bit(qid, priv->xsk_pools))
1230 return xsk_get_pool_from_qid(priv->dev, qid);
1233 static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
1240 if (!priv->tx_cfg.num_xdp_queues)
1243 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1246 rx = &priv->rx[i];
1247 napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1254 xsk_pool = gve_get_xsk_pool(priv, i);
1256 err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
1257 else if (gve_is_qpl(priv))
1271 gve_unreg_xdp_info(priv);
1276 static void gve_drain_page_cache(struct gve_priv *priv)
1280 for (i = 0; i < priv->rx_cfg.num_queues; i++)
1281 page_frag_cache_drain(&priv->rx[i].page_cache);
1284 static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
1287 cfg->qcfg_rx = &priv->rx_cfg;
1288 cfg->qcfg_tx = &priv->tx_cfg;
1289 cfg->raw_addressing = !gve_is_qpl(priv);
1290 cfg->enable_header_split = priv->header_split_enabled;
1291 cfg->ring_size = priv->rx_desc_cnt;
1292 cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
1293 cfg->rx = priv->rx;
1297 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1301 gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
1302 gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
1305 static void gve_rx_start_ring(struct gve_priv *priv, int i)
1307 if (gve_is_gqi(priv))
1308 gve_rx_start_ring_gqi(priv, i);
1310 gve_rx_start_ring_dqo(priv, i);
1313 static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
1318 gve_rx_start_ring(priv, i);
1321 static void gve_rx_stop_ring(struct gve_priv *priv, int i)
1323 if (gve_is_gqi(priv))
1324 gve_rx_stop_ring_gqi(priv, i);
1326 gve_rx_stop_ring_dqo(priv, i);
1329 static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
1333 if (!priv->rx)
1337 gve_rx_stop_ring(priv, i);
1340 static void gve_queues_mem_remove(struct gve_priv *priv)
1345 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1346 gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1347 priv->tx = NULL;
1348 priv->rx = NULL;
1351 /* The passed-in queue memory is stored into priv and the queues are made live.
1354 static int gve_queues_start(struct gve_priv *priv,
1358 struct net_device *dev = priv->dev;
1361 /* Record new resources into priv */
1362 priv->tx = tx_alloc_cfg->tx;
1363 priv->rx = rx_alloc_cfg->rx;
1365 /* Record new configs into priv */
1366 priv->tx_cfg = *tx_alloc_cfg->qcfg;
1367 priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
1368 priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
1369 priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
1370 priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
1372 gve_tx_start_rings(priv, gve_num_tx_queues(priv));
1373 gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
1374 gve_init_sync_stats(priv);
1376 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
1379 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
1383 err = gve_reg_xdp_info(priv, dev);
1388 err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
1393 err = gve_register_qpls(priv);
1397 priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
1398 priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
1400 err = gve_create_rings(priv);
1404 gve_set_device_rings_ok(priv);
1406 if (gve_get_report_stats(priv))
1407 mod_timer(&priv->stats_report_timer,
1409 msecs_to_jiffies(priv->stats_report_timer_period)));
1411 gve_turnup(priv);
1412 queue_work(priv->gve_wq, &priv->service_task);
1413 priv->interface_up_cnt++;
1417 if (gve_get_reset_in_progress(priv))
1419 gve_reset_and_teardown(priv, true);
1421 gve_reset_recovery(priv, false);
1425 gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
1426 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1427 gve_queues_mem_remove(priv);
1435 struct gve_priv *priv = netdev_priv(dev);
1438 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1440 err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1447 err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1454 static int gve_queues_stop(struct gve_priv *priv)
1458 netif_carrier_off(priv->dev);
1459 if (gve_get_device_rings_ok(priv)) {
1460 gve_turndown(priv);
1461 gve_drain_page_cache(priv);
1462 err = gve_destroy_rings(priv);
1465 err = gve_unregister_qpls(priv);
1468 gve_clear_device_rings_ok(priv);
1470 timer_delete_sync(&priv->stats_report_timer);
1472 gve_unreg_xdp_info(priv);
1474 gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
1475 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1477 priv->interface_down_cnt++;
1484 if (gve_get_reset_in_progress(priv))
1487 gve_reset_and_teardown(priv, true);
1488 return gve_reset_recovery(priv, false);
1493 struct gve_priv *priv = netdev_priv(dev);
1496 err = gve_queues_stop(priv);
1500 gve_queues_mem_remove(priv);
1504 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1506 if (!gve_get_napi_enabled(priv))
1509 if (link_status == netif_carrier_ok(priv->dev))
1513 netdev_info(priv->dev, "Device link is up.\n");
1514 netif_carrier_on(priv->dev);
1516 netdev_info(priv->dev, "Device link is down.\n");
1517 netif_carrier_off(priv->dev);
1521 static int gve_configure_rings_xdp(struct gve_priv *priv,
1527 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1531 return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1534 static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
1541 old_prog = READ_ONCE(priv->xdp_prog);
1542 if (!netif_running(priv->dev)) {
1543 WRITE_ONCE(priv->xdp_prog, prog);
1547 /* Update priv XDP queue configuration */
1548 priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
1549 priv->rx_cfg.num_queues : 0;
1554 err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
1556 err = gve_configure_rings_xdp(priv, 0);
1561 WRITE_ONCE(priv->xdp_prog, prog);
1566 status = ioread32be(&priv->reg_bar0->device_status);
1567 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1574 struct gve_priv *priv = netdev_priv(dev);
1576 if (priv->queue_format == GVE_GQI_QPL_FORMAT)
1578 else if (priv->queue_format == GVE_DQO_RDA_FORMAT)
1588 struct gve_priv *priv = netdev_priv(dev);
1591 if (qid >= priv->rx_cfg.num_queues) {
1592 dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
1596 priv->dev->max_mtu + sizeof(struct ethhdr)) {
1597 dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
1601 err = xsk_pool_dma_map(pool, &priv->pdev->dev,
1606 set_bit(qid, priv->xsk_pools);
1609 if (!priv->xdp_prog || !netif_running(dev))
1612 err = gve_reg_xsk_pool(priv, dev, pool, qid);
1617 if (!gve_is_qpl(priv)) {
1618 err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
1625 gve_unreg_xsk_pool(priv, qid);
1627 clear_bit(qid, priv->xsk_pools);
1637 struct gve_priv *priv = netdev_priv(dev);
1644 if (qid >= priv->rx_cfg.num_queues)
1647 clear_bit(qid, priv->xsk_pools);
1655 if (!netif_running(dev) || !priv->tx_cfg.num_xdp_queues)
1659 if (!gve_is_qpl(priv) && priv->xdp_prog) {
1660 err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
1665 napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
1668 tx_qid = gve_xdp_tx_queue_id(priv, qid);
1669 napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
1672 gve_unreg_xsk_pool(priv, qid);
1677 if (gve_is_gqi(priv)) {
1678 if (gve_rx_work_pending(&priv->rx[qid]))
1681 if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
1690 struct gve_priv *priv = netdev_priv(dev);
1693 if (!gve_get_napi_enabled(priv))
1696 if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
1699 napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
1712 struct gve_priv *priv = netdev_priv(dev);
1720 if (priv->header_split_enabled) {
1725 max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
1726 if (priv->queue_format == GVE_GQI_QPL_FORMAT)
1735 if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
1736 (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
1738 priv->rx_cfg.num_queues,
1739 priv->tx_cfg.num_queues,
1740 priv->tx_cfg.max_queues);
1748 struct gve_priv *priv = netdev_priv(dev);
1756 return gve_set_xdp(priv, xdp->prog, xdp->extack);
1767 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
1769 struct gve_rss_config *rss_config = &priv->rss_config;
1773 if (!priv->cache_rss_config)
1776 for (i = 0; i < priv->rss_lut_size; i++)
1780 netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
1784 return gve_adminq_configure_rss(priv, &rxfh);
1787 int gve_flow_rules_reset(struct gve_priv *priv)
1789 if (!priv->max_flow_rules)
1792 return gve_adminq_reset_flow_rules(priv);
1795 int gve_adjust_config(struct gve_priv *priv,
1802 err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
1804 netif_err(priv, drv, priv->dev,
1810 err = gve_close(priv->dev);
1812 netif_err(priv, drv, priv->dev,
1814 gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
1819 err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
1821 netif_err(priv, drv, priv->dev,
1826 gve_turndown(priv);
1833 int gve_adjust_queues(struct gve_priv *priv,
1842 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1850 if (netif_running(priv->dev)) {
1851 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1856 err = gve_init_rss_config(priv, new_rx_config.num_queues);
1860 priv->tx_cfg = new_tx_config;
1861 priv->rx_cfg = new_rx_config;
1866 static void gve_turndown(struct gve_priv *priv)
1870 if (netif_carrier_ok(priv->dev))
1871 netif_carrier_off(priv->dev);
1873 if (!gve_get_napi_enabled(priv))
1877 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1878 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1879 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1881 if (!gve_tx_was_added_to_block(priv, idx))
1884 if (idx < priv->tx_cfg.num_queues)
1885 netif_queue_set_napi(priv->dev, idx,
1890 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1891 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1892 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1894 if (!gve_rx_was_added_to_block(priv, idx))
1897 netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
1903 netif_tx_disable(priv->dev);
1905 xdp_features_clear_redirect_target_locked(priv->dev);
1907 gve_clear_napi_enabled(priv);
1908 gve_clear_report_stats(priv);
1914 static void gve_turnup(struct gve_priv *priv)
1919 netif_tx_start_all_queues(priv->dev);
1922 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1923 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1924 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1926 if (!gve_tx_was_added_to_block(priv, idx))
1931 if (idx < priv->tx_cfg.num_queues)
1932 netif_queue_set_napi(priv->dev, idx,
1936 if (gve_is_gqi(priv)) {
1937 iowrite32be(0, gve_irq_doorbell(priv, block));
1939 gve_set_itr_coalesce_usecs_dqo(priv, block,
1940 priv->tx_coalesce_usecs);
1950 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1951 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1952 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1954 if (!gve_rx_was_added_to_block(priv, idx))
1958 netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
1961 if (gve_is_gqi(priv)) {
1962 iowrite32be(0, gve_irq_doorbell(priv, block));
1964 gve_set_itr_coalesce_usecs_dqo(priv, block,
1965 priv->rx_coalesce_usecs);
1976 if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
1977 xdp_features_set_redirect_target_locked(priv->dev, false);
1979 gve_set_napi_enabled(priv);
1982 static void gve_turnup_and_check_status(struct gve_priv *priv)
1986 gve_turnup(priv);
1987 status = ioread32be(&priv->reg_bar0->device_status);
1988 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1991 static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
1996 if (txqueue > priv->tx_cfg.num_queues)
1999 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
2000 if (ntfy_idx >= priv->num_ntfy_blks)
2003 return &priv->ntfy_blocks[ntfy_idx];
2006 static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
2012 block = gve_get_tx_notify_block(priv, txqueue);
2021 netdev_info(priv->dev, "Kicking queue %d", txqueue);
2030 struct gve_priv *priv;
2033 priv = netdev_priv(dev);
2035 if (!gve_tx_timeout_try_q_kick(priv, txqueue))
2036 gve_schedule_reset(priv);
2038 block = gve_get_tx_notify_block(priv, txqueue);
2041 priv->tx_timeo_cnt++;
2044 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
2046 if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
2055 bool gve_header_split_supported(const struct gve_priv *priv)
2057 return priv->header_buf_size &&
2058 priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
2061 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
2071 if (!gve_header_split_supported(priv)) {
2072 dev_err(&priv->pdev->dev, "Header-split not supported\n");
2081 if (enable_hdr_split == priv->header_split_enabled)
2084 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2087 rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
2089 if (netif_running(priv->dev))
2090 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2100 struct gve_priv *priv = netdev_priv(netdev);
2103 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2107 if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
2114 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2120 err = gve_flow_rules_reset(priv);
2135 struct gve_priv *priv = netdev_priv(dev);
2137 *kernel_config = priv->ts_config;
2145 struct gve_priv *priv = netdev_priv(dev);
2153 if (!priv->nic_ts_report) {
2161 gve_clock_nic_ts_read(priv);
2162 ptp_schedule_worker(priv->ptp->clock, 0);
2164 ptp_cancel_worker_sync(priv->ptp->clock);
2167 priv->ts_config.rx_filter = kernel_config->rx_filter;
2187 static void gve_handle_status(struct gve_priv *priv, u32 status)
2190 dev_info(&priv->pdev->dev, "Device requested reset.\n");
2191 gve_set_do_reset(priv);
2194 priv->stats_report_trigger_cnt++;
2195 gve_set_do_report_stats(priv);
2199 static void gve_handle_reset(struct gve_priv *priv)
2205 if (gve_get_probe_in_progress(priv))
2208 if (gve_get_do_reset(priv)) {
2210 netdev_lock(priv->dev);
2211 gve_reset(priv, false);
2212 netdev_unlock(priv->dev);
2217 void gve_handle_report_stats(struct gve_priv *priv)
2219 struct stats *stats = priv->stats_report->stats;
2224 if (!gve_get_report_stats(priv))
2227 be64_add_cpu(&priv->stats_report->written_count, 1);
2229 if (priv->tx) {
2230 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
2235 if (gve_is_gqi(priv)) {
2236 last_completion = priv->tx[idx].done;
2237 tx_frames = priv->tx[idx].req;
2241 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
2242 tx_bytes = priv->tx[idx].bytes_done;
2243 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
2246 .value = cpu_to_be64(priv->tx[idx].wake_queue),
2251 .value = cpu_to_be64(priv->tx[idx].stop_queue),
2271 .value = cpu_to_be64(priv->tx[idx].queue_timeout),
2277 if (priv->rx) {
2278 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
2281 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
2286 .value = cpu_to_be64(priv->rx[idx].fill_cnt),
2296 struct gve_priv *priv = container_of(work, struct gve_priv,
2298 u32 status = ioread32be(&priv->reg_bar0->device_status);
2300 gve_handle_status(priv, status);
2302 gve_handle_reset(priv);
2303 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
2306 static void gve_set_netdev_xdp_features(struct gve_priv *priv)
2310 if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
2314 } else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
2322 xdp_set_features_flag_locked(priv->dev, xdp_features);
2325 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
2331 err = gve_adminq_alloc(&priv->pdev->dev, priv);
2333 dev_err(&priv->pdev->dev,
2338 err = gve_verify_driver_compatibility(priv);
2340 dev_err(&priv->pdev->dev,
2345 priv->num_registered_pages = 0;
2350 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
2352 err = gve_adminq_describe_device(priv);
2354 dev_err(&priv->pdev->dev,
2358 priv->dev->mtu = priv->dev->max_mtu;
2359 num_ntfy = pci_msix_vec_count(priv->pdev);
2361 dev_err(&priv->pdev->dev,
2366 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
2373 if (!gve_is_gqi(priv))
2374 netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
2376 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
2380 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
2381 priv->mgmt_msix_idx = priv->num_ntfy_blks;
2382 priv->numa_node = dev_to_node(&priv->pdev->dev);
2384 priv->tx_cfg.max_queues =
2385 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
2386 priv->rx_cfg.max_queues =
2387 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
2389 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
2390 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
2391 if (priv->default_num_queues > 0) {
2392 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
2393 priv->tx_cfg.num_queues);
2394 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
2395 priv->rx_cfg.num_queues);
2397 priv->tx_cfg.num_xdp_queues = 0;
2399 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
2400 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
2401 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
2402 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
2404 if (!gve_is_gqi(priv)) {
2405 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
2406 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
2409 priv->ts_config.tx_type = HWTSTAMP_TX_OFF;
2410 priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
2413 priv->xsk_pools = bitmap_zalloc(priv->rx_cfg.max_queues, GFP_KERNEL);
2414 if (!priv->xsk_pools) {
2419 gve_set_netdev_xdp_features(priv);
2420 err = gve_setup_device_resources(priv);
2427 bitmap_free(priv->xsk_pools);
2428 priv->xsk_pools = NULL;
2430 gve_adminq_free(&priv->pdev->dev, priv);
2434 static void gve_teardown_priv_resources(struct gve_priv *priv)
2436 gve_teardown_device_resources(priv);
2437 gve_adminq_free(&priv->pdev->dev, priv);
2438 bitmap_free(priv->xsk_pools);
2439 priv->xsk_pools = NULL;
2442 static void gve_trigger_reset(struct gve_priv *priv)
2445 gve_adminq_release(priv);
2448 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
2450 gve_trigger_reset(priv);
2453 gve_close(priv->dev);
2454 gve_teardown_priv_resources(priv);
2457 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
2461 err = gve_init_priv(priv, true);
2465 err = gve_open(priv->dev);
2471 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
2472 gve_turndown(priv);
2476 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
2478 bool was_up = netif_running(priv->dev);
2481 dev_info(&priv->pdev->dev, "Performing reset\n");
2482 gve_clear_do_reset(priv);
2483 gve_set_reset_in_progress(priv);
2488 gve_turndown(priv);
2489 gve_reset_and_teardown(priv, was_up);
2493 err = gve_close(priv->dev);
2496 gve_reset_and_teardown(priv, was_up);
2499 gve_teardown_priv_resources(priv);
2503 err = gve_reset_recovery(priv, was_up);
2504 gve_clear_reset_in_progress(priv);
2505 priv->reset_cnt++;
2506 priv->interface_up_cnt = 0;
2507 priv->interface_down_cnt = 0;
2508 priv->stats_report_trigger_cnt = 0;
2531 struct gve_priv *priv = netdev_priv(dev);
2535 if (!priv->rx)
2539 if (!gve_is_gqi(priv) && idx == 0)
2543 gve_turndown(priv);
2546 err = gve_adminq_destroy_single_rx_queue(priv, idx);
2550 if (gve_is_qpl(priv)) {
2552 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
2557 gve_rx_stop_ring(priv, idx);
2560 gve_turnup_and_check_status(priv);
2563 *gve_per_q_mem = priv->rx[idx];
2564 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2570 struct gve_priv *priv = netdev_priv(dev);
2575 gve_rx_get_curr_alloc_cfg(priv, &cfg);
2577 if (gve_is_gqi(priv))
2578 gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
2580 gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
2586 struct gve_priv *priv = netdev_priv(dev);
2591 if (!priv->rx)
2595 gve_rx_get_curr_alloc_cfg(priv, &cfg);
2597 if (gve_is_gqi(priv))
2598 err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
2600 err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
2607 struct gve_priv *priv = netdev_priv(dev);
2611 if (!priv->rx)
2615 priv->rx[idx] = *gve_per_q_mem;
2618 gve_turndown(priv);
2620 gve_rx_start_ring(priv, idx);
2622 if (gve_is_qpl(priv)) {
2624 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
2630 err = gve_adminq_create_single_rx_queue(priv, idx);
2634 if (gve_is_gqi(priv))
2635 gve_rx_write_doorbell(priv, &priv->rx[idx]);
2637 gve_rx_post_buffers_dqo(&priv->rx[idx]);
2640 gve_turnup_and_check_status(priv);
2644 gve_rx_stop_ring(priv, idx);
2651 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2666 struct gve_priv *priv = netdev_priv(dev);
2667 struct gve_rx_ring *rx = &priv->rx[idx];
2682 struct gve_priv *priv = netdev_priv(dev);
2683 struct gve_tx_ring *tx = &priv->tx[idx];
2717 struct gve_priv *priv;
2754 /* Alloc and setup the netdev and priv */
2755 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
2786 priv = netdev_priv(dev);
2787 priv->dev = dev;
2788 priv->pdev = pdev;
2789 priv->msg_enable = DEFAULT_MSG_LEVEL;
2790 priv->reg_bar0 = reg_bar;
2791 priv->db_bar2 = db_bar;
2792 priv->service_task_flags = 0x0;
2793 priv->state_flags = 0x0;
2794 priv->ethtool_flags = 0x0;
2795 priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
2796 priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
2798 gve_set_probe_in_progress(priv);
2799 priv->gve_wq = alloc_ordered_workqueue("gve", 0);
2800 if (!priv->gve_wq) {
2805 INIT_WORK(&priv->service_task, gve_service_task);
2806 INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
2807 priv->tx_cfg.max_queues = max_tx_queues;
2808 priv->rx_cfg.max_queues = max_rx_queues;
2810 err = gve_init_priv(priv, false);
2814 if (!gve_is_gqi(priv) && !gve_is_qpl(priv))
2822 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
2823 gve_clear_probe_in_progress(priv);
2824 queue_work(priv->gve_wq, &priv->service_task);
2828 gve_teardown_priv_resources(priv);
2831 destroy_workqueue(priv->gve_wq);
2853 struct gve_priv *priv = netdev_priv(netdev);
2854 __be32 __iomem *db_bar = priv->db_bar2;
2855 void __iomem *reg_bar = priv->reg_bar0;
2858 gve_teardown_priv_resources(priv);
2859 destroy_workqueue(priv->gve_wq);
2870 struct gve_priv *priv = netdev_priv(netdev);
2871 bool was_up = netif_running(priv->dev);
2875 if (was_up && gve_close(priv->dev)) {
2877 gve_reset_and_teardown(priv, was_up);
2880 gve_teardown_priv_resources(priv);
2890 struct gve_priv *priv = netdev_priv(netdev);
2891 bool was_up = netif_running(priv->dev);
2893 priv->suspend_cnt++;
2896 if (was_up && gve_close(priv->dev)) {
2898 gve_reset_and_teardown(priv, was_up);
2901 gve_teardown_priv_resources(priv);
2903 priv->up_before_suspend = was_up;
2912 struct gve_priv *priv = netdev_priv(netdev);
2915 priv->resume_cnt++;
2918 err = gve_reset_recovery(priv, priv->up_before_suspend);