xref: /src/sys/dev/gve/gve_main.c (revision 71702df6126226b31dc3ec66459388e32b993be1)
154dfc97bSShailend Chand /*-
254dfc97bSShailend Chand  * SPDX-License-Identifier: BSD-3-Clause
354dfc97bSShailend Chand  *
4d438b4efSShailend Chand  * Copyright (c) 2023-2024 Google LLC
554dfc97bSShailend Chand  *
654dfc97bSShailend Chand  * Redistribution and use in source and binary forms, with or without modification,
754dfc97bSShailend Chand  * are permitted provided that the following conditions are met:
854dfc97bSShailend Chand  *
954dfc97bSShailend Chand  * 1. Redistributions of source code must retain the above copyright notice, this
1054dfc97bSShailend Chand  *    list of conditions and the following disclaimer.
1154dfc97bSShailend Chand  *
1254dfc97bSShailend Chand  * 2. Redistributions in binary form must reproduce the above copyright notice,
1354dfc97bSShailend Chand  *    this list of conditions and the following disclaimer in the documentation
1454dfc97bSShailend Chand  *    and/or other materials provided with the distribution.
1554dfc97bSShailend Chand  *
1654dfc97bSShailend Chand  * 3. Neither the name of the copyright holder nor the names of its contributors
1754dfc97bSShailend Chand  *    may be used to endorse or promote products derived from this software without
1854dfc97bSShailend Chand  *    specific prior written permission.
1954dfc97bSShailend Chand  *
2054dfc97bSShailend Chand  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
2154dfc97bSShailend Chand  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2254dfc97bSShailend Chand  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2354dfc97bSShailend Chand  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
2454dfc97bSShailend Chand  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2554dfc97bSShailend Chand  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2654dfc97bSShailend Chand  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
2754dfc97bSShailend Chand  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2854dfc97bSShailend Chand  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2954dfc97bSShailend Chand  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3054dfc97bSShailend Chand  */
3154dfc97bSShailend Chand #include "gve.h"
3254dfc97bSShailend Chand #include "gve_adminq.h"
33d438b4efSShailend Chand #include "gve_dqo.h"
3454dfc97bSShailend Chand 
353d295733SJasper Tran O'Leary #define GVE_DRIVER_VERSION "GVE-FBSD-1.3.4\n"
365f62584aSShailend Chand #define GVE_VERSION_MAJOR 1
372348ac89SShailend Chand #define GVE_VERSION_MINOR 3
3871702df6SVee Agarwal #define GVE_VERSION_SUB 5
3954dfc97bSShailend Chand 
4054dfc97bSShailend Chand #define GVE_DEFAULT_RX_COPYBREAK 256
4154dfc97bSShailend Chand 
421bbdfb0bSXin LI /* Devices supported by this driver. */
431bbdfb0bSXin LI static struct gve_dev {
441bbdfb0bSXin LI         uint16_t vendor_id;
451bbdfb0bSXin LI         uint16_t device_id;
461bbdfb0bSXin LI         const char *name;
471bbdfb0bSXin LI } gve_devs[] = {
481bbdfb0bSXin LI 	{ PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC, "gVNIC" }
491bbdfb0bSXin LI };
501bbdfb0bSXin LI 
5154dfc97bSShailend Chand struct sx gve_global_lock;
5254dfc97bSShailend Chand 
533d295733SJasper Tran O'Leary static void gve_start_tx_timeout_service(struct gve_priv *priv);
543d295733SJasper Tran O'Leary static void gve_stop_tx_timeout_service(struct gve_priv *priv);
553d295733SJasper Tran O'Leary 
5654dfc97bSShailend Chand static int
gve_verify_driver_compatibility(struct gve_priv * priv)5754dfc97bSShailend Chand gve_verify_driver_compatibility(struct gve_priv *priv)
5854dfc97bSShailend Chand {
5954dfc97bSShailend Chand 	int err;
6054dfc97bSShailend Chand 	struct gve_driver_info *driver_info;
6154dfc97bSShailend Chand 	struct gve_dma_handle driver_info_mem;
6254dfc97bSShailend Chand 
6354dfc97bSShailend Chand 	err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info),
6454dfc97bSShailend Chand 	    PAGE_SIZE, &driver_info_mem);
6554dfc97bSShailend Chand 
6654dfc97bSShailend Chand 	if (err != 0)
6754dfc97bSShailend Chand 		return (ENOMEM);
6854dfc97bSShailend Chand 
6954dfc97bSShailend Chand 	driver_info = driver_info_mem.cpu_addr;
7054dfc97bSShailend Chand 
7154dfc97bSShailend Chand 	*driver_info = (struct gve_driver_info) {
7254dfc97bSShailend Chand 		.os_type = 3, /* Freebsd */
7354dfc97bSShailend Chand 		.driver_major = GVE_VERSION_MAJOR,
7454dfc97bSShailend Chand 		.driver_minor = GVE_VERSION_MINOR,
7554dfc97bSShailend Chand 		.driver_sub = GVE_VERSION_SUB,
7654dfc97bSShailend Chand 		.os_version_major = htobe32(FBSD_VERSION_MAJOR),
7754dfc97bSShailend Chand 		.os_version_minor = htobe32(FBSD_VERSION_MINOR),
7854dfc97bSShailend Chand 		.os_version_sub = htobe32(FBSD_VERSION_PATCH),
7954dfc97bSShailend Chand 		.driver_capability_flags = {
8054dfc97bSShailend Chand 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS1),
8154dfc97bSShailend Chand 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS2),
8254dfc97bSShailend Chand 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS3),
8354dfc97bSShailend Chand 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS4),
8454dfc97bSShailend Chand 		},
8554dfc97bSShailend Chand 	};
8654dfc97bSShailend Chand 
8754dfc97bSShailend Chand 	snprintf(driver_info->os_version_str1, sizeof(driver_info->os_version_str1),
8854dfc97bSShailend Chand 	    "FreeBSD %u", __FreeBSD_version);
8954dfc97bSShailend Chand 
9054dfc97bSShailend Chand 	bus_dmamap_sync(driver_info_mem.tag, driver_info_mem.map,
9154dfc97bSShailend Chand 	    BUS_DMASYNC_PREREAD);
9254dfc97bSShailend Chand 
9354dfc97bSShailend Chand 	err = gve_adminq_verify_driver_compatibility(priv,
9454dfc97bSShailend Chand 	    sizeof(struct gve_driver_info), driver_info_mem.bus_addr);
9554dfc97bSShailend Chand 
9654dfc97bSShailend Chand 	/* It's ok if the device doesn't support this */
9754dfc97bSShailend Chand 	if (err == EOPNOTSUPP)
9854dfc97bSShailend Chand 		err = 0;
9954dfc97bSShailend Chand 
10054dfc97bSShailend Chand 	gve_dma_free_coherent(&driver_info_mem);
10154dfc97bSShailend Chand 
10254dfc97bSShailend Chand 	return (err);
10354dfc97bSShailend Chand }
10454dfc97bSShailend Chand 
1053d295733SJasper Tran O'Leary static void
gve_handle_tx_timeout(struct gve_priv * priv,struct gve_tx_ring * tx,int num_timeout_pkts)1063d295733SJasper Tran O'Leary gve_handle_tx_timeout(struct gve_priv *priv, struct gve_tx_ring *tx,
1073d295733SJasper Tran O'Leary     int num_timeout_pkts)
1083d295733SJasper Tran O'Leary {
1093d295733SJasper Tran O'Leary 	int64_t time_since_last_kick;
1103d295733SJasper Tran O'Leary 
1113d295733SJasper Tran O'Leary 	counter_u64_add_protected(tx->stats.tx_timeout, 1);
1123d295733SJasper Tran O'Leary 
1133d295733SJasper Tran O'Leary 	/* last_kicked is never GVE_TIMESTAMP_INVALID so we can skip checking */
1143d295733SJasper Tran O'Leary 	time_since_last_kick = gve_seconds_since(&tx->last_kicked);
1153d295733SJasper Tran O'Leary 
1163d295733SJasper Tran O'Leary 	/* Try kicking first in case the timeout is due to a missed interrupt */
1173d295733SJasper Tran O'Leary 	if (time_since_last_kick > GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC) {
1183d295733SJasper Tran O'Leary 		device_printf(priv->dev,
1193d295733SJasper Tran O'Leary 		    "Found %d timed out packet(s) on txq%d, kicking it for completions\n",
1203d295733SJasper Tran O'Leary 		    num_timeout_pkts, tx->com.id);
1213d295733SJasper Tran O'Leary 		gve_set_timestamp(&tx->last_kicked);
1223d295733SJasper Tran O'Leary 		taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task);
1233d295733SJasper Tran O'Leary 	} else {
1243d295733SJasper Tran O'Leary 		device_printf(priv->dev,
1253d295733SJasper Tran O'Leary 		    "Found %d timed out packet(s) on txq%d with its last kick %jd sec ago which is less than the cooldown period %d. Resetting device\n",
1263d295733SJasper Tran O'Leary 		    num_timeout_pkts, tx->com.id,
1273d295733SJasper Tran O'Leary 		    (intmax_t)time_since_last_kick,
1283d295733SJasper Tran O'Leary 		    GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC);
1293d295733SJasper Tran O'Leary 		gve_schedule_reset(priv);
1303d295733SJasper Tran O'Leary 	}
1313d295733SJasper Tran O'Leary }
1323d295733SJasper Tran O'Leary 
1333d295733SJasper Tran O'Leary static void
gve_tx_timeout_service_callback(void * data)1343d295733SJasper Tran O'Leary gve_tx_timeout_service_callback(void *data)
1353d295733SJasper Tran O'Leary {
1363d295733SJasper Tran O'Leary 	struct gve_priv *priv = (struct gve_priv *)data;
1373d295733SJasper Tran O'Leary 	struct gve_tx_ring *tx;
1383d295733SJasper Tran O'Leary 	uint16_t num_timeout_pkts;
1393d295733SJasper Tran O'Leary 
1403d295733SJasper Tran O'Leary 	tx = &priv->tx[priv->check_tx_queue_idx];
1413d295733SJasper Tran O'Leary 
1423d295733SJasper Tran O'Leary 	num_timeout_pkts = gve_is_gqi(priv) ?
1433d295733SJasper Tran O'Leary 	    gve_check_tx_timeout_gqi(priv, tx) :
1443d295733SJasper Tran O'Leary 	    gve_check_tx_timeout_dqo(priv, tx);
1453d295733SJasper Tran O'Leary 	if (num_timeout_pkts)
1463d295733SJasper Tran O'Leary 		gve_handle_tx_timeout(priv, tx, num_timeout_pkts);
1473d295733SJasper Tran O'Leary 
1483d295733SJasper Tran O'Leary 	priv->check_tx_queue_idx = (priv->check_tx_queue_idx + 1) %
1493d295733SJasper Tran O'Leary 	    priv->tx_cfg.num_queues;
1503d295733SJasper Tran O'Leary 	callout_reset_sbt(&priv->tx_timeout_service,
1513d295733SJasper Tran O'Leary 	    SBT_1S * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC, 0,
1523d295733SJasper Tran O'Leary 	    gve_tx_timeout_service_callback, (void *)priv, 0);
1533d295733SJasper Tran O'Leary }
1543d295733SJasper Tran O'Leary 
1553d295733SJasper Tran O'Leary static void
gve_start_tx_timeout_service(struct gve_priv * priv)1563d295733SJasper Tran O'Leary gve_start_tx_timeout_service(struct gve_priv *priv)
1573d295733SJasper Tran O'Leary {
1583d295733SJasper Tran O'Leary 	priv->check_tx_queue_idx = 0;
1593d295733SJasper Tran O'Leary 	callout_init(&priv->tx_timeout_service, true);
1603d295733SJasper Tran O'Leary 	callout_reset_sbt(&priv->tx_timeout_service,
1613d295733SJasper Tran O'Leary 	    SBT_1S * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC, 0,
1623d295733SJasper Tran O'Leary 	    gve_tx_timeout_service_callback, (void *)priv, 0);
1633d295733SJasper Tran O'Leary }
1643d295733SJasper Tran O'Leary 
1653d295733SJasper Tran O'Leary static void
gve_stop_tx_timeout_service(struct gve_priv * priv)1663d295733SJasper Tran O'Leary gve_stop_tx_timeout_service(struct gve_priv *priv)
1673d295733SJasper Tran O'Leary {
1683d295733SJasper Tran O'Leary 	callout_drain(&priv->tx_timeout_service);
1693d295733SJasper Tran O'Leary }
1703d295733SJasper Tran O'Leary 
17154dfc97bSShailend Chand static int
gve_up(struct gve_priv * priv)17254dfc97bSShailend Chand gve_up(struct gve_priv *priv)
17354dfc97bSShailend Chand {
17454dfc97bSShailend Chand 	if_t ifp = priv->ifp;
17554dfc97bSShailend Chand 	int err;
17654dfc97bSShailend Chand 
17754dfc97bSShailend Chand 	GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
17854dfc97bSShailend Chand 
17954dfc97bSShailend Chand 	if (device_is_attached(priv->dev) == 0) {
18054dfc97bSShailend Chand 		device_printf(priv->dev, "Cannot bring the iface up when detached\n");
18154dfc97bSShailend Chand 		return (ENXIO);
18254dfc97bSShailend Chand 	}
18354dfc97bSShailend Chand 
18454dfc97bSShailend Chand 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
18554dfc97bSShailend Chand 		return (0);
18654dfc97bSShailend Chand 
18754dfc97bSShailend Chand 	if_clearhwassist(ifp);
18854dfc97bSShailend Chand 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
18954dfc97bSShailend Chand 		if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
19054dfc97bSShailend Chand 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
19154dfc97bSShailend Chand 		if_sethwassistbits(ifp, CSUM_IP6_TCP | CSUM_IP6_UDP, 0);
19254dfc97bSShailend Chand 	if (if_getcapenable(ifp) & IFCAP_TSO4)
19354dfc97bSShailend Chand 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
19454dfc97bSShailend Chand 	if (if_getcapenable(ifp) & IFCAP_TSO6)
19554dfc97bSShailend Chand 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
19654dfc97bSShailend Chand 
1972348ac89SShailend Chand 	if (gve_is_qpl(priv)) {
19854dfc97bSShailend Chand 		err = gve_register_qpls(priv);
19954dfc97bSShailend Chand 		if (err != 0)
20054dfc97bSShailend Chand 			goto reset;
201d438b4efSShailend Chand 	}
20254dfc97bSShailend Chand 
20354dfc97bSShailend Chand 	err = gve_create_rx_rings(priv);
20454dfc97bSShailend Chand 	if (err != 0)
20554dfc97bSShailend Chand 		goto reset;
20654dfc97bSShailend Chand 
20754dfc97bSShailend Chand 	err = gve_create_tx_rings(priv);
20854dfc97bSShailend Chand 	if (err != 0)
20954dfc97bSShailend Chand 		goto reset;
21054dfc97bSShailend Chand 
21154dfc97bSShailend Chand 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
21254dfc97bSShailend Chand 
21354dfc97bSShailend Chand 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
21454dfc97bSShailend Chand 		if_link_state_change(ifp, LINK_STATE_UP);
21554dfc97bSShailend Chand 		gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
21654dfc97bSShailend Chand 	}
21754dfc97bSShailend Chand 
21854dfc97bSShailend Chand 	gve_unmask_all_queue_irqs(priv);
21954dfc97bSShailend Chand 	gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
22054dfc97bSShailend Chand 	priv->interface_up_cnt++;
2213d295733SJasper Tran O'Leary 
2223d295733SJasper Tran O'Leary 	gve_start_tx_timeout_service(priv);
2233d295733SJasper Tran O'Leary 
22454dfc97bSShailend Chand 	return (0);
22554dfc97bSShailend Chand 
22654dfc97bSShailend Chand reset:
22754dfc97bSShailend Chand 	gve_schedule_reset(priv);
22854dfc97bSShailend Chand 	return (err);
22954dfc97bSShailend Chand }
23054dfc97bSShailend Chand 
23154dfc97bSShailend Chand static void
gve_down(struct gve_priv * priv)23254dfc97bSShailend Chand gve_down(struct gve_priv *priv)
23354dfc97bSShailend Chand {
23454dfc97bSShailend Chand 	GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
23554dfc97bSShailend Chand 
23654dfc97bSShailend Chand 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
23754dfc97bSShailend Chand 		return;
23854dfc97bSShailend Chand 
2393d295733SJasper Tran O'Leary 	gve_stop_tx_timeout_service(priv);
2403d295733SJasper Tran O'Leary 
24154dfc97bSShailend Chand 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
24254dfc97bSShailend Chand 		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
24354dfc97bSShailend Chand 		gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
24454dfc97bSShailend Chand 	}
24554dfc97bSShailend Chand 
24654dfc97bSShailend Chand 	if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
24754dfc97bSShailend Chand 
24854dfc97bSShailend Chand 	if (gve_destroy_rx_rings(priv) != 0)
24954dfc97bSShailend Chand 		goto reset;
25054dfc97bSShailend Chand 
25154dfc97bSShailend Chand 	if (gve_destroy_tx_rings(priv) != 0)
25254dfc97bSShailend Chand 		goto reset;
25354dfc97bSShailend Chand 
2542348ac89SShailend Chand 	if (gve_is_qpl(priv)) {
25554dfc97bSShailend Chand 		if (gve_unregister_qpls(priv) != 0)
25654dfc97bSShailend Chand 			goto reset;
257d438b4efSShailend Chand 	}
25854dfc97bSShailend Chand 
259d438b4efSShailend Chand 	if (gve_is_gqi(priv))
26054dfc97bSShailend Chand 		gve_mask_all_queue_irqs(priv);
26154dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
26254dfc97bSShailend Chand 	priv->interface_down_cnt++;
26354dfc97bSShailend Chand 	return;
26454dfc97bSShailend Chand 
26554dfc97bSShailend Chand reset:
26654dfc97bSShailend Chand 	gve_schedule_reset(priv);
26754dfc97bSShailend Chand }
26854dfc97bSShailend Chand 
269e0464f74SVee Agarwal int
gve_adjust_rx_queues(struct gve_priv * priv,uint16_t new_queue_cnt)270e0464f74SVee Agarwal gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt)
271e0464f74SVee Agarwal {
272e0464f74SVee Agarwal 	int err;
273e0464f74SVee Agarwal 
274e0464f74SVee Agarwal 	GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
275e0464f74SVee Agarwal 
276e0464f74SVee Agarwal 	gve_down(priv);
277e0464f74SVee Agarwal 
278e0464f74SVee Agarwal 	if (new_queue_cnt < priv->rx_cfg.num_queues) {
279e0464f74SVee Agarwal 		/*
280e0464f74SVee Agarwal 		 * Freeing a ring still preserves its ntfy_id,
281e0464f74SVee Agarwal 		 * which is needed if we create the ring again.
282e0464f74SVee Agarwal 		 */
283e0464f74SVee Agarwal 		gve_free_rx_rings(priv, new_queue_cnt, priv->rx_cfg.num_queues);
284e0464f74SVee Agarwal 	} else {
285e0464f74SVee Agarwal 		err = gve_alloc_rx_rings(priv, priv->rx_cfg.num_queues, new_queue_cnt);
286e0464f74SVee Agarwal 		if (err != 0) {
287e0464f74SVee Agarwal 			device_printf(priv->dev, "Failed to allocate new queues");
288e0464f74SVee Agarwal 			/* Failed to allocate rings, start back up with old ones */
289e0464f74SVee Agarwal 			gve_up(priv);
290e0464f74SVee Agarwal 			return (err);
291e0464f74SVee Agarwal 
292e0464f74SVee Agarwal 		}
293e0464f74SVee Agarwal 	}
294e0464f74SVee Agarwal 	priv->rx_cfg.num_queues = new_queue_cnt;
295e0464f74SVee Agarwal 
296e0464f74SVee Agarwal 	err = gve_up(priv);
297e0464f74SVee Agarwal 	if (err != 0)
298e0464f74SVee Agarwal 		gve_schedule_reset(priv);
299e0464f74SVee Agarwal 
300e0464f74SVee Agarwal 	return (err);
301e0464f74SVee Agarwal }
302e0464f74SVee Agarwal 
303e0464f74SVee Agarwal int
gve_adjust_tx_queues(struct gve_priv * priv,uint16_t new_queue_cnt)304e0464f74SVee Agarwal gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt)
305e0464f74SVee Agarwal {
306e0464f74SVee Agarwal 	int err;
307e0464f74SVee Agarwal 
308e0464f74SVee Agarwal 	GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
309e0464f74SVee Agarwal 
310e0464f74SVee Agarwal 	gve_down(priv);
311e0464f74SVee Agarwal 
312e0464f74SVee Agarwal 	if (new_queue_cnt < priv->tx_cfg.num_queues) {
313e0464f74SVee Agarwal 		/*
314e0464f74SVee Agarwal 		 * Freeing a ring still preserves its ntfy_id,
315e0464f74SVee Agarwal 		 * which is needed if we create the ring again.
316e0464f74SVee Agarwal 		 */
317e0464f74SVee Agarwal 		gve_free_tx_rings(priv, new_queue_cnt, priv->tx_cfg.num_queues);
318e0464f74SVee Agarwal 	} else {
319e0464f74SVee Agarwal 		err = gve_alloc_tx_rings(priv, priv->tx_cfg.num_queues, new_queue_cnt);
320e0464f74SVee Agarwal 		if (err != 0) {
321e0464f74SVee Agarwal 			device_printf(priv->dev, "Failed to allocate new queues");
322e0464f74SVee Agarwal 			/* Failed to allocate rings, start back up with old ones */
323e0464f74SVee Agarwal 			gve_up(priv);
324e0464f74SVee Agarwal 			return (err);
325e0464f74SVee Agarwal 
326e0464f74SVee Agarwal 		}
327e0464f74SVee Agarwal 	}
328e0464f74SVee Agarwal 	priv->tx_cfg.num_queues = new_queue_cnt;
329e0464f74SVee Agarwal 
330e0464f74SVee Agarwal 	err = gve_up(priv);
331e0464f74SVee Agarwal 	if (err != 0)
332e0464f74SVee Agarwal 		gve_schedule_reset(priv);
333e0464f74SVee Agarwal 
334e0464f74SVee Agarwal 	return (err);
335e0464f74SVee Agarwal }
336e0464f74SVee Agarwal 
33722fe926aSVee Agarwal int
gve_adjust_ring_sizes(struct gve_priv * priv,uint16_t new_desc_cnt,bool is_rx)33822fe926aSVee Agarwal gve_adjust_ring_sizes(struct gve_priv *priv, uint16_t new_desc_cnt, bool is_rx)
33922fe926aSVee Agarwal {
34022fe926aSVee Agarwal 	int err;
34122fe926aSVee Agarwal 	uint16_t prev_desc_cnt;
34222fe926aSVee Agarwal 
34322fe926aSVee Agarwal 	GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
34422fe926aSVee Agarwal 
34522fe926aSVee Agarwal 	gve_down(priv);
34622fe926aSVee Agarwal 
34722fe926aSVee Agarwal 	if (is_rx) {
34822fe926aSVee Agarwal 		gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues);
34922fe926aSVee Agarwal 		prev_desc_cnt = priv->rx_desc_cnt;
35022fe926aSVee Agarwal 		priv->rx_desc_cnt = new_desc_cnt;
35122fe926aSVee Agarwal 		err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
35222fe926aSVee Agarwal 		if (err != 0) {
35322fe926aSVee Agarwal 			device_printf(priv->dev,
35422fe926aSVee Agarwal 			    "Failed to allocate rings. Trying to start back up with previous ring size.");
35522fe926aSVee Agarwal 			priv->rx_desc_cnt = prev_desc_cnt;
35622fe926aSVee Agarwal 			err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
35722fe926aSVee Agarwal 		}
35822fe926aSVee Agarwal 	} else {
35922fe926aSVee Agarwal 		gve_free_tx_rings(priv, 0, priv->tx_cfg.num_queues);
36022fe926aSVee Agarwal 		prev_desc_cnt = priv->tx_desc_cnt;
36122fe926aSVee Agarwal 		priv->tx_desc_cnt = new_desc_cnt;
36222fe926aSVee Agarwal 		err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.num_queues);
36322fe926aSVee Agarwal 		if (err != 0) {
36422fe926aSVee Agarwal 			device_printf(priv->dev,
36522fe926aSVee Agarwal 			    "Failed to allocate rings. Trying to start back up with previous ring size.");
36622fe926aSVee Agarwal 			priv->tx_desc_cnt = prev_desc_cnt;
36722fe926aSVee Agarwal 			err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.num_queues);
36822fe926aSVee Agarwal 		}
36922fe926aSVee Agarwal 	}
37022fe926aSVee Agarwal 
37122fe926aSVee Agarwal 	if (err != 0) {
37222fe926aSVee Agarwal 		device_printf(priv->dev, "Failed to allocate rings! Cannot start device back up!");
37322fe926aSVee Agarwal 		return (err);
37422fe926aSVee Agarwal 	}
37522fe926aSVee Agarwal 
37622fe926aSVee Agarwal 	err = gve_up(priv);
37722fe926aSVee Agarwal 	if (err != 0) {
37822fe926aSVee Agarwal 		gve_schedule_reset(priv);
37922fe926aSVee Agarwal 		return (err);
38022fe926aSVee Agarwal 	}
38122fe926aSVee Agarwal 
38222fe926aSVee Agarwal 	return (0);
38322fe926aSVee Agarwal }
38422fe926aSVee Agarwal 
38554dfc97bSShailend Chand static int
gve_get_dqo_rx_buf_size(struct gve_priv * priv,uint16_t mtu)38671702df6SVee Agarwal gve_get_dqo_rx_buf_size(struct gve_priv *priv, uint16_t mtu)
38771702df6SVee Agarwal {
38871702df6SVee Agarwal 	/*
38971702df6SVee Agarwal 	 * Use 4k buffers only if mode is DQ, 4k buffers flag is on,
39071702df6SVee Agarwal 	 * and either hw LRO is enabled or mtu is greater than 2048
39171702df6SVee Agarwal 	 */
39271702df6SVee Agarwal 	if (!gve_is_gqi(priv) && gve_allow_4k_rx_buffers &&
39371702df6SVee Agarwal 	    (!gve_disable_hw_lro || mtu > GVE_DEFAULT_RX_BUFFER_SIZE))
39471702df6SVee Agarwal 		return (GVE_4K_RX_BUFFER_SIZE_DQO);
39571702df6SVee Agarwal 
39671702df6SVee Agarwal 	return (GVE_DEFAULT_RX_BUFFER_SIZE);
39771702df6SVee Agarwal }
39871702df6SVee Agarwal 
39971702df6SVee Agarwal static int
gve_set_mtu(if_t ifp,uint32_t new_mtu)40054dfc97bSShailend Chand gve_set_mtu(if_t ifp, uint32_t new_mtu)
40154dfc97bSShailend Chand {
40254dfc97bSShailend Chand 	struct gve_priv *priv = if_getsoftc(ifp);
403909e2d7bSJasper Tran O'Leary 	const uint32_t max_problem_range = 8227;
404909e2d7bSJasper Tran O'Leary 	const uint32_t min_problem_range = 7822;
40571702df6SVee Agarwal 	uint16_t new_rx_buf_size = gve_get_dqo_rx_buf_size(priv, new_mtu);
40654dfc97bSShailend Chand 	int err;
40754dfc97bSShailend Chand 
40854dfc97bSShailend Chand 	if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) {
40954dfc97bSShailend Chand 		device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n",
41054dfc97bSShailend Chand 		    new_mtu, priv->max_mtu, ETHERMIN);
41154dfc97bSShailend Chand 		return (EINVAL);
41254dfc97bSShailend Chand 	}
41354dfc97bSShailend Chand 
414909e2d7bSJasper Tran O'Leary 	/*
415909e2d7bSJasper Tran O'Leary 	 * When hardware LRO is enabled in DQ mode, MTUs within the range
416909e2d7bSJasper Tran O'Leary 	 * [7822, 8227] trigger hardware issues which cause a drastic drop
417909e2d7bSJasper Tran O'Leary 	 * in throughput.
418909e2d7bSJasper Tran O'Leary 	 */
419909e2d7bSJasper Tran O'Leary 	if (!gve_is_gqi(priv) && !gve_disable_hw_lro &&
42071702df6SVee Agarwal 	    new_mtu >= min_problem_range && new_mtu <= max_problem_range &&
42171702df6SVee Agarwal 	    new_rx_buf_size != GVE_4K_RX_BUFFER_SIZE_DQO) {
422909e2d7bSJasper Tran O'Leary 		device_printf(priv->dev,
42371702df6SVee Agarwal 		    "Cannot set to MTU to %d within the range [%d, %d] while HW LRO is enabled and not using 4k RX Buffers\n",
424909e2d7bSJasper Tran O'Leary 		    new_mtu, min_problem_range, max_problem_range);
425909e2d7bSJasper Tran O'Leary 		return (EINVAL);
426909e2d7bSJasper Tran O'Leary 	}
427909e2d7bSJasper Tran O'Leary 
42854dfc97bSShailend Chand 	err = gve_adminq_set_mtu(priv, new_mtu);
42954dfc97bSShailend Chand 	if (err == 0) {
43054dfc97bSShailend Chand 		if (bootverbose)
43154dfc97bSShailend Chand 			device_printf(priv->dev, "MTU set to %d\n", new_mtu);
43254dfc97bSShailend Chand 		if_setmtu(ifp, new_mtu);
43371702df6SVee Agarwal 		/* Need to re-alloc RX queues if RX buffer size changed */
43471702df6SVee Agarwal 		if (!gve_is_gqi(priv) &&
43571702df6SVee Agarwal 		    new_rx_buf_size != priv->rx_buf_size_dqo) {
43671702df6SVee Agarwal 			gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues);
43771702df6SVee Agarwal 			priv->rx_buf_size_dqo = new_rx_buf_size;
43871702df6SVee Agarwal 			gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
43971702df6SVee Agarwal 		}
44054dfc97bSShailend Chand 	} else {
44154dfc97bSShailend Chand 		device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu);
44254dfc97bSShailend Chand 	}
44354dfc97bSShailend Chand 
44454dfc97bSShailend Chand 	return (err);
44554dfc97bSShailend Chand }
44654dfc97bSShailend Chand 
44754dfc97bSShailend Chand static void
gve_init(void * arg)44854dfc97bSShailend Chand gve_init(void *arg)
44954dfc97bSShailend Chand {
45054dfc97bSShailend Chand 	struct gve_priv *priv = (struct gve_priv *)arg;
45154dfc97bSShailend Chand 
45254dfc97bSShailend Chand 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) {
45354dfc97bSShailend Chand 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
45454dfc97bSShailend Chand 		gve_up(priv);
45554dfc97bSShailend Chand 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
45654dfc97bSShailend Chand 	}
45754dfc97bSShailend Chand }
45854dfc97bSShailend Chand 
45954dfc97bSShailend Chand static int
gve_ioctl(if_t ifp,u_long command,caddr_t data)46054dfc97bSShailend Chand gve_ioctl(if_t ifp, u_long command, caddr_t data)
46154dfc97bSShailend Chand {
46254dfc97bSShailend Chand 	struct gve_priv *priv;
46354dfc97bSShailend Chand 	struct ifreq *ifr;
46454dfc97bSShailend Chand 	int rc = 0;
46554dfc97bSShailend Chand 
46654dfc97bSShailend Chand 	priv = if_getsoftc(ifp);
46754dfc97bSShailend Chand 	ifr = (struct ifreq *)data;
46854dfc97bSShailend Chand 
46954dfc97bSShailend Chand 	switch (command) {
47054dfc97bSShailend Chand 	case SIOCSIFMTU:
47154dfc97bSShailend Chand 		if (if_getmtu(ifp) == ifr->ifr_mtu)
47254dfc97bSShailend Chand 			break;
47354dfc97bSShailend Chand 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
47454dfc97bSShailend Chand 		gve_down(priv);
47554dfc97bSShailend Chand 		gve_set_mtu(ifp, ifr->ifr_mtu);
47654dfc97bSShailend Chand 		rc = gve_up(priv);
47754dfc97bSShailend Chand 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
47854dfc97bSShailend Chand 		break;
47954dfc97bSShailend Chand 
48054dfc97bSShailend Chand 	case SIOCSIFFLAGS:
48154dfc97bSShailend Chand 		if ((if_getflags(ifp) & IFF_UP) != 0) {
48254dfc97bSShailend Chand 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
48354dfc97bSShailend Chand 				GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
48454dfc97bSShailend Chand 				rc = gve_up(priv);
48554dfc97bSShailend Chand 				GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
48654dfc97bSShailend Chand 			}
48754dfc97bSShailend Chand 		} else {
48854dfc97bSShailend Chand 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
48954dfc97bSShailend Chand 				GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
49054dfc97bSShailend Chand 				gve_down(priv);
49154dfc97bSShailend Chand 				GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
49254dfc97bSShailend Chand 			}
49354dfc97bSShailend Chand 		}
49454dfc97bSShailend Chand 		break;
49554dfc97bSShailend Chand 
49654dfc97bSShailend Chand 	case SIOCSIFCAP:
49754dfc97bSShailend Chand 		if (ifr->ifr_reqcap == if_getcapenable(ifp))
49854dfc97bSShailend Chand 			break;
49954dfc97bSShailend Chand 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
50054dfc97bSShailend Chand 		gve_down(priv);
50154dfc97bSShailend Chand 		if_setcapenable(ifp, ifr->ifr_reqcap);
50254dfc97bSShailend Chand 		rc = gve_up(priv);
50354dfc97bSShailend Chand 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
50454dfc97bSShailend Chand 		break;
50554dfc97bSShailend Chand 
50654dfc97bSShailend Chand 	case SIOCSIFMEDIA:
50754dfc97bSShailend Chand 		/* FALLTHROUGH */
50854dfc97bSShailend Chand 	case SIOCGIFMEDIA:
50954dfc97bSShailend Chand 		rc = ifmedia_ioctl(ifp, ifr, &priv->media, command);
51054dfc97bSShailend Chand 		break;
51154dfc97bSShailend Chand 
51254dfc97bSShailend Chand 	default:
51354dfc97bSShailend Chand 		rc = ether_ioctl(ifp, command, data);
51454dfc97bSShailend Chand 		break;
51554dfc97bSShailend Chand 	}
51654dfc97bSShailend Chand 
51754dfc97bSShailend Chand 	return (rc);
51854dfc97bSShailend Chand }
51954dfc97bSShailend Chand 
52054dfc97bSShailend Chand static int
gve_media_change(if_t ifp)52154dfc97bSShailend Chand gve_media_change(if_t ifp)
52254dfc97bSShailend Chand {
52354dfc97bSShailend Chand 	struct gve_priv *priv = if_getsoftc(ifp);
52454dfc97bSShailend Chand 
52554dfc97bSShailend Chand 	device_printf(priv->dev, "Media change not supported\n");
52654dfc97bSShailend Chand 	return (0);
52754dfc97bSShailend Chand }
52854dfc97bSShailend Chand 
52954dfc97bSShailend Chand static void
gve_media_status(if_t ifp,struct ifmediareq * ifmr)53054dfc97bSShailend Chand gve_media_status(if_t ifp, struct ifmediareq *ifmr)
53154dfc97bSShailend Chand {
53254dfc97bSShailend Chand 	struct gve_priv *priv = if_getsoftc(ifp);
53354dfc97bSShailend Chand 
53454dfc97bSShailend Chand 	GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
53554dfc97bSShailend Chand 
53654dfc97bSShailend Chand 	ifmr->ifm_status = IFM_AVALID;
53754dfc97bSShailend Chand 	ifmr->ifm_active = IFM_ETHER;
53854dfc97bSShailend Chand 
53954dfc97bSShailend Chand 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
54054dfc97bSShailend Chand 		ifmr->ifm_status |= IFM_ACTIVE;
54154dfc97bSShailend Chand 		ifmr->ifm_active |= IFM_AUTO;
54254dfc97bSShailend Chand 	} else {
54354dfc97bSShailend Chand 		ifmr->ifm_active |= IFM_NONE;
54454dfc97bSShailend Chand 	}
54554dfc97bSShailend Chand 
54654dfc97bSShailend Chand 	GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
54754dfc97bSShailend Chand }
54854dfc97bSShailend Chand 
54954dfc97bSShailend Chand static uint64_t
gve_get_counter(if_t ifp,ift_counter cnt)55054dfc97bSShailend Chand gve_get_counter(if_t ifp, ift_counter cnt)
55154dfc97bSShailend Chand {
55254dfc97bSShailend Chand 	struct gve_priv *priv;
55354dfc97bSShailend Chand 	uint64_t rpackets = 0;
55454dfc97bSShailend Chand 	uint64_t tpackets = 0;
55554dfc97bSShailend Chand 	uint64_t rbytes = 0;
55654dfc97bSShailend Chand 	uint64_t tbytes = 0;
55754dfc97bSShailend Chand 	uint64_t rx_dropped_pkt = 0;
55854dfc97bSShailend Chand 	uint64_t tx_dropped_pkt = 0;
55954dfc97bSShailend Chand 
56054dfc97bSShailend Chand 	priv = if_getsoftc(ifp);
56154dfc97bSShailend Chand 
56254dfc97bSShailend Chand 	gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets,
56354dfc97bSShailend Chand 	    &tbytes, &tx_dropped_pkt);
56454dfc97bSShailend Chand 
56554dfc97bSShailend Chand 	switch (cnt) {
56654dfc97bSShailend Chand 	case IFCOUNTER_IPACKETS:
56754dfc97bSShailend Chand 		return (rpackets);
56854dfc97bSShailend Chand 
56954dfc97bSShailend Chand 	case IFCOUNTER_OPACKETS:
57054dfc97bSShailend Chand 		return (tpackets);
57154dfc97bSShailend Chand 
57254dfc97bSShailend Chand 	case IFCOUNTER_IBYTES:
57354dfc97bSShailend Chand 		return (rbytes);
57454dfc97bSShailend Chand 
57554dfc97bSShailend Chand 	case IFCOUNTER_OBYTES:
57654dfc97bSShailend Chand 		return (tbytes);
57754dfc97bSShailend Chand 
57854dfc97bSShailend Chand 	case IFCOUNTER_IQDROPS:
57954dfc97bSShailend Chand 		return (rx_dropped_pkt);
58054dfc97bSShailend Chand 
58154dfc97bSShailend Chand 	case IFCOUNTER_OQDROPS:
58254dfc97bSShailend Chand 		return (tx_dropped_pkt);
58354dfc97bSShailend Chand 
58454dfc97bSShailend Chand 	default:
58554dfc97bSShailend Chand 		return (if_get_counter_default(ifp, cnt));
58654dfc97bSShailend Chand 	}
58754dfc97bSShailend Chand }
58854dfc97bSShailend Chand 
589aa386085SZhenlei Huang static void
gve_setup_ifnet(device_t dev,struct gve_priv * priv)59054dfc97bSShailend Chand gve_setup_ifnet(device_t dev, struct gve_priv *priv)
59154dfc97bSShailend Chand {
59254dfc97bSShailend Chand 	int caps = 0;
59354dfc97bSShailend Chand 	if_t ifp;
59454dfc97bSShailend Chand 
59554dfc97bSShailend Chand 	ifp = priv->ifp = if_alloc(IFT_ETHER);
59654dfc97bSShailend Chand 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
59754dfc97bSShailend Chand 	if_setsoftc(ifp, priv);
59854dfc97bSShailend Chand 	if_setdev(ifp, dev);
59954dfc97bSShailend Chand 	if_setinitfn(ifp, gve_init);
60054dfc97bSShailend Chand 	if_setioctlfn(ifp, gve_ioctl);
60154dfc97bSShailend Chand 	if_settransmitfn(ifp, gve_xmit_ifp);
60254dfc97bSShailend Chand 	if_setqflushfn(ifp, gve_qflush);
60354dfc97bSShailend Chand 
604d438b4efSShailend Chand 	/*
605d438b4efSShailend Chand 	 * Set TSO limits, must match the arguments to bus_dma_tag_create
6062348ac89SShailend Chand 	 * when creating tx->dqo.buf_dmatag. Only applies to the RDA mode
607031800c7SJasper Tran O'Leary 	 * because in QPL we copy the entire packet into the bounce buffer
6082348ac89SShailend Chand 	 * and thus it does not matter how fragmented the mbuf is.
609d438b4efSShailend Chand 	 */
6102348ac89SShailend Chand 	if (!gve_is_gqi(priv) && !gve_is_qpl(priv)) {
611d438b4efSShailend Chand 		if_sethwtsomaxsegcount(ifp, GVE_TX_MAX_DATA_DESCS_DQO);
612d438b4efSShailend Chand 		if_sethwtsomaxsegsize(ifp, GVE_TX_MAX_BUF_SIZE_DQO);
613d438b4efSShailend Chand 	}
6142348ac89SShailend Chand 	if_sethwtsomax(ifp, GVE_TSO_MAXSIZE_DQO);
615d438b4efSShailend Chand 
61654dfc97bSShailend Chand #if __FreeBSD_version >= 1400086
61754dfc97bSShailend Chand 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
61854dfc97bSShailend Chand #else
61954dfc97bSShailend Chand 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH);
62054dfc97bSShailend Chand #endif
62154dfc97bSShailend Chand 
62254dfc97bSShailend Chand 	ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status);
62354dfc97bSShailend Chand 	if_setgetcounterfn(ifp, gve_get_counter);
62454dfc97bSShailend Chand 
62554dfc97bSShailend Chand 	caps = IFCAP_RXCSUM |
62654dfc97bSShailend Chand 	       IFCAP_TXCSUM |
62754dfc97bSShailend Chand 	       IFCAP_TXCSUM_IPV6 |
62854dfc97bSShailend Chand 	       IFCAP_TSO |
62954dfc97bSShailend Chand 	       IFCAP_LRO;
63054dfc97bSShailend Chand 
63154dfc97bSShailend Chand 	if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0)
63254dfc97bSShailend Chand 		caps |= IFCAP_JUMBO_MTU;
63354dfc97bSShailend Chand 
63454dfc97bSShailend Chand 	if_setcapabilities(ifp, caps);
63554dfc97bSShailend Chand 	if_setcapenable(ifp, caps);
63654dfc97bSShailend Chand 
63754dfc97bSShailend Chand 	if (bootverbose)
63854dfc97bSShailend Chand 		device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu);
63954dfc97bSShailend Chand 	if_setmtu(ifp, priv->max_mtu);
64054dfc97bSShailend Chand 
64154dfc97bSShailend Chand 	ether_ifattach(ifp, priv->mac);
64254dfc97bSShailend Chand 
64354dfc97bSShailend Chand 	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
64454dfc97bSShailend Chand 	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
64554dfc97bSShailend Chand }
64654dfc97bSShailend Chand 
64754dfc97bSShailend Chand static int
gve_alloc_counter_array(struct gve_priv * priv)64854dfc97bSShailend Chand gve_alloc_counter_array(struct gve_priv *priv)
64954dfc97bSShailend Chand {
65054dfc97bSShailend Chand 	int err;
65154dfc97bSShailend Chand 
65254dfc97bSShailend Chand 	err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters,
65354dfc97bSShailend Chand 	    PAGE_SIZE, &priv->counter_array_mem);
65454dfc97bSShailend Chand 	if (err != 0)
65554dfc97bSShailend Chand 		return (err);
65654dfc97bSShailend Chand 
65754dfc97bSShailend Chand 	priv->counters = priv->counter_array_mem.cpu_addr;
65854dfc97bSShailend Chand 	return (0);
65954dfc97bSShailend Chand }
66054dfc97bSShailend Chand 
66154dfc97bSShailend Chand static void
gve_free_counter_array(struct gve_priv * priv)66254dfc97bSShailend Chand gve_free_counter_array(struct gve_priv *priv)
66354dfc97bSShailend Chand {
66454dfc97bSShailend Chand 	if (priv->counters != NULL)
66554dfc97bSShailend Chand 		gve_dma_free_coherent(&priv->counter_array_mem);
66654dfc97bSShailend Chand 	priv->counter_array_mem = (struct gve_dma_handle){};
66754dfc97bSShailend Chand }
66854dfc97bSShailend Chand 
66954dfc97bSShailend Chand static int
gve_alloc_irq_db_array(struct gve_priv * priv)67054dfc97bSShailend Chand gve_alloc_irq_db_array(struct gve_priv *priv)
67154dfc97bSShailend Chand {
67254dfc97bSShailend Chand 	int err;
67354dfc97bSShailend Chand 
67454dfc97bSShailend Chand 	err = gve_dma_alloc_coherent(priv,
67554dfc97bSShailend Chand 	    sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE,
67654dfc97bSShailend Chand 	    &priv->irqs_db_mem);
67754dfc97bSShailend Chand 	if (err != 0)
67854dfc97bSShailend Chand 		return (err);
67954dfc97bSShailend Chand 
68054dfc97bSShailend Chand 	priv->irq_db_indices = priv->irqs_db_mem.cpu_addr;
68154dfc97bSShailend Chand 	return (0);
68254dfc97bSShailend Chand }
68354dfc97bSShailend Chand 
68454dfc97bSShailend Chand static void
gve_free_irq_db_array(struct gve_priv * priv)68554dfc97bSShailend Chand gve_free_irq_db_array(struct gve_priv *priv)
68654dfc97bSShailend Chand {
68754dfc97bSShailend Chand 	if (priv->irq_db_indices != NULL)
68854dfc97bSShailend Chand 		gve_dma_free_coherent(&priv->irqs_db_mem);
68954dfc97bSShailend Chand 	priv->irqs_db_mem = (struct gve_dma_handle){};
69054dfc97bSShailend Chand }
69154dfc97bSShailend Chand 
69254dfc97bSShailend Chand static void
gve_free_rings(struct gve_priv * priv)69354dfc97bSShailend Chand gve_free_rings(struct gve_priv *priv)
69454dfc97bSShailend Chand {
69554dfc97bSShailend Chand 	gve_free_irqs(priv);
696e0464f74SVee Agarwal 
697e0464f74SVee Agarwal 	gve_free_tx_rings(priv, 0, priv->tx_cfg.num_queues);
698e0464f74SVee Agarwal 	free(priv->tx, M_GVE);
699e0464f74SVee Agarwal 	priv->tx = NULL;
700e0464f74SVee Agarwal 
701e0464f74SVee Agarwal 	gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues);
702e0464f74SVee Agarwal 	free(priv->rx, M_GVE);
703e0464f74SVee Agarwal 	priv->rx = NULL;
70454dfc97bSShailend Chand }
70554dfc97bSShailend Chand 
70654dfc97bSShailend Chand static int
gve_alloc_rings(struct gve_priv * priv)70754dfc97bSShailend Chand gve_alloc_rings(struct gve_priv *priv)
70854dfc97bSShailend Chand {
70954dfc97bSShailend Chand 	int err;
71054dfc97bSShailend Chand 
711e0464f74SVee Agarwal 	priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.max_queues,
712e0464f74SVee Agarwal 	    M_GVE, M_WAITOK | M_ZERO);
713e0464f74SVee Agarwal 	err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
71454dfc97bSShailend Chand 	if (err != 0)
71554dfc97bSShailend Chand 		goto abort;
71654dfc97bSShailend Chand 
717e0464f74SVee Agarwal 	priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.max_queues,
718e0464f74SVee Agarwal 	    M_GVE, M_WAITOK | M_ZERO);
719e0464f74SVee Agarwal 	err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.num_queues);
72054dfc97bSShailend Chand 	if (err != 0)
72154dfc97bSShailend Chand 		goto abort;
72254dfc97bSShailend Chand 
72354dfc97bSShailend Chand 	err = gve_alloc_irqs(priv);
72454dfc97bSShailend Chand 	if (err != 0)
72554dfc97bSShailend Chand 		goto abort;
72654dfc97bSShailend Chand 
72754dfc97bSShailend Chand 	return (0);
72854dfc97bSShailend Chand 
72954dfc97bSShailend Chand abort:
73054dfc97bSShailend Chand 	gve_free_rings(priv);
73154dfc97bSShailend Chand 	return (err);
73254dfc97bSShailend Chand }
73354dfc97bSShailend Chand 
73454dfc97bSShailend Chand static void
gve_deconfigure_and_free_device_resources(struct gve_priv * priv)73562b2d0c3SJasper Tran O'Leary gve_deconfigure_and_free_device_resources(struct gve_priv *priv)
73654dfc97bSShailend Chand {
73754dfc97bSShailend Chand 	int err;
73854dfc97bSShailend Chand 
73954dfc97bSShailend Chand 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) {
74054dfc97bSShailend Chand 		err = gve_adminq_deconfigure_device_resources(priv);
74154dfc97bSShailend Chand 		if (err != 0) {
74254dfc97bSShailend Chand 			device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n",
74354dfc97bSShailend Chand 			    err);
74454dfc97bSShailend Chand 			return;
74554dfc97bSShailend Chand 		}
74654dfc97bSShailend Chand 		if (bootverbose)
74754dfc97bSShailend Chand 			device_printf(priv->dev, "Deconfigured device resources\n");
74854dfc97bSShailend Chand 		gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
74954dfc97bSShailend Chand 	}
75054dfc97bSShailend Chand 
75154dfc97bSShailend Chand 	gve_free_irq_db_array(priv);
75254dfc97bSShailend Chand 	gve_free_counter_array(priv);
753d438b4efSShailend Chand 
754d438b4efSShailend Chand 	if (priv->ptype_lut_dqo) {
755d438b4efSShailend Chand 		free(priv->ptype_lut_dqo, M_GVE);
756d438b4efSShailend Chand 		priv->ptype_lut_dqo = NULL;
757d438b4efSShailend Chand 	}
75854dfc97bSShailend Chand }
75954dfc97bSShailend Chand 
76054dfc97bSShailend Chand static int
gve_alloc_and_configure_device_resources(struct gve_priv * priv)76162b2d0c3SJasper Tran O'Leary gve_alloc_and_configure_device_resources(struct gve_priv *priv)
76254dfc97bSShailend Chand {
76354dfc97bSShailend Chand 	int err;
76454dfc97bSShailend Chand 
76554dfc97bSShailend Chand 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK))
76654dfc97bSShailend Chand 		return (0);
76754dfc97bSShailend Chand 
76854dfc97bSShailend Chand 	err = gve_alloc_counter_array(priv);
76954dfc97bSShailend Chand 	if (err != 0)
77054dfc97bSShailend Chand 		return (err);
77154dfc97bSShailend Chand 
77254dfc97bSShailend Chand 	err = gve_alloc_irq_db_array(priv);
77354dfc97bSShailend Chand 	if (err != 0)
77454dfc97bSShailend Chand 		goto abort;
77554dfc97bSShailend Chand 
77654dfc97bSShailend Chand 	err = gve_adminq_configure_device_resources(priv);
77754dfc97bSShailend Chand 	if (err != 0) {
77854dfc97bSShailend Chand 		device_printf(priv->dev, "Failed to configure device resources: err=%d\n",
77954dfc97bSShailend Chand 			      err);
78054dfc97bSShailend Chand 		err = (ENXIO);
78154dfc97bSShailend Chand 		goto abort;
78254dfc97bSShailend Chand 	}
78354dfc97bSShailend Chand 
784d438b4efSShailend Chand 	if (!gve_is_gqi(priv)) {
785d438b4efSShailend Chand 		priv->ptype_lut_dqo = malloc(sizeof(*priv->ptype_lut_dqo), M_GVE,
786d438b4efSShailend Chand 		    M_WAITOK | M_ZERO);
787d438b4efSShailend Chand 
788d438b4efSShailend Chand 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
789d438b4efSShailend Chand 		if (err != 0) {
790d438b4efSShailend Chand 			device_printf(priv->dev, "Failed to configure ptype lut: err=%d\n",
791d438b4efSShailend Chand 			    err);
792d438b4efSShailend Chand 			goto abort;
793d438b4efSShailend Chand 		}
794d438b4efSShailend Chand 	}
795d438b4efSShailend Chand 
79654dfc97bSShailend Chand 	gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
79754dfc97bSShailend Chand 	if (bootverbose)
79854dfc97bSShailend Chand 		device_printf(priv->dev, "Configured device resources\n");
79954dfc97bSShailend Chand 	return (0);
80054dfc97bSShailend Chand 
80154dfc97bSShailend Chand abort:
80262b2d0c3SJasper Tran O'Leary 	gve_deconfigure_and_free_device_resources(priv);
80354dfc97bSShailend Chand 	return (err);
80454dfc97bSShailend Chand }
80554dfc97bSShailend Chand 
80654dfc97bSShailend Chand static void
gve_set_queue_cnts(struct gve_priv * priv)80754dfc97bSShailend Chand gve_set_queue_cnts(struct gve_priv *priv)
80854dfc97bSShailend Chand {
80954dfc97bSShailend Chand 	priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES);
81054dfc97bSShailend Chand 	priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES);
81154dfc97bSShailend Chand 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
81254dfc97bSShailend Chand 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
81354dfc97bSShailend Chand 
81454dfc97bSShailend Chand 	if (priv->default_num_queues > 0) {
81554dfc97bSShailend Chand 		priv->tx_cfg.num_queues = MIN(priv->default_num_queues,
81654dfc97bSShailend Chand 		    priv->tx_cfg.num_queues);
81754dfc97bSShailend Chand 		priv->rx_cfg.num_queues = MIN(priv->default_num_queues,
81854dfc97bSShailend Chand 		    priv->rx_cfg.num_queues);
81954dfc97bSShailend Chand 	}
82054dfc97bSShailend Chand 
821e0464f74SVee Agarwal 	priv->num_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
82254dfc97bSShailend Chand 	priv->mgmt_msix_idx = priv->num_queues;
82354dfc97bSShailend Chand }
82454dfc97bSShailend Chand 
82554dfc97bSShailend Chand static int
gve_alloc_adminq_and_describe_device(struct gve_priv * priv)82654dfc97bSShailend Chand gve_alloc_adminq_and_describe_device(struct gve_priv *priv)
82754dfc97bSShailend Chand {
82854dfc97bSShailend Chand 	int err;
82954dfc97bSShailend Chand 
83054dfc97bSShailend Chand 	if ((err = gve_adminq_alloc(priv)) != 0)
83154dfc97bSShailend Chand 		return (err);
83254dfc97bSShailend Chand 
83354dfc97bSShailend Chand 	if ((err = gve_verify_driver_compatibility(priv)) != 0) {
83454dfc97bSShailend Chand 		device_printf(priv->dev,
83554dfc97bSShailend Chand 		    "Failed to verify driver compatibility: err=%d\n", err);
83654dfc97bSShailend Chand 		goto abort;
83754dfc97bSShailend Chand 	}
83854dfc97bSShailend Chand 
83954dfc97bSShailend Chand 	if ((err = gve_adminq_describe_device(priv)) != 0)
84054dfc97bSShailend Chand 		goto abort;
84154dfc97bSShailend Chand 
84254dfc97bSShailend Chand 	gve_set_queue_cnts(priv);
84354dfc97bSShailend Chand 
84454dfc97bSShailend Chand 	priv->num_registered_pages = 0;
84554dfc97bSShailend Chand 	return (0);
84654dfc97bSShailend Chand 
84754dfc97bSShailend Chand abort:
84854dfc97bSShailend Chand 	gve_release_adminq(priv);
84954dfc97bSShailend Chand 	return (err);
85054dfc97bSShailend Chand }
85154dfc97bSShailend Chand 
85254dfc97bSShailend Chand void
gve_schedule_reset(struct gve_priv * priv)85354dfc97bSShailend Chand gve_schedule_reset(struct gve_priv *priv)
85454dfc97bSShailend Chand {
85554dfc97bSShailend Chand 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET))
85654dfc97bSShailend Chand 		return;
85754dfc97bSShailend Chand 
85854dfc97bSShailend Chand 	device_printf(priv->dev, "Scheduling reset task!\n");
85954dfc97bSShailend Chand 	gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
86054dfc97bSShailend Chand 	taskqueue_enqueue(priv->service_tq, &priv->service_task);
86154dfc97bSShailend Chand }
86254dfc97bSShailend Chand 
86354dfc97bSShailend Chand static void
gve_destroy(struct gve_priv * priv)86454dfc97bSShailend Chand gve_destroy(struct gve_priv *priv)
86554dfc97bSShailend Chand {
86654dfc97bSShailend Chand 	gve_down(priv);
86762b2d0c3SJasper Tran O'Leary 	gve_deconfigure_and_free_device_resources(priv);
86854dfc97bSShailend Chand 	gve_release_adminq(priv);
86954dfc97bSShailend Chand }
87054dfc97bSShailend Chand 
87154dfc97bSShailend Chand static void
gve_restore(struct gve_priv * priv)87254dfc97bSShailend Chand gve_restore(struct gve_priv *priv)
87354dfc97bSShailend Chand {
87454dfc97bSShailend Chand 	int err;
87554dfc97bSShailend Chand 
87654dfc97bSShailend Chand 	err = gve_adminq_alloc(priv);
87754dfc97bSShailend Chand 	if (err != 0)
87854dfc97bSShailend Chand 		goto abort;
87954dfc97bSShailend Chand 
88062b2d0c3SJasper Tran O'Leary 	err = gve_adminq_configure_device_resources(priv);
88162b2d0c3SJasper Tran O'Leary 	if (err != 0) {
88262b2d0c3SJasper Tran O'Leary 		device_printf(priv->dev, "Failed to configure device resources: err=%d\n",
88362b2d0c3SJasper Tran O'Leary 		    err);
88462b2d0c3SJasper Tran O'Leary 		err = (ENXIO);
88554dfc97bSShailend Chand 		goto abort;
88662b2d0c3SJasper Tran O'Leary 	}
88762b2d0c3SJasper Tran O'Leary 	if (!gve_is_gqi(priv)) {
88862b2d0c3SJasper Tran O'Leary 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
88962b2d0c3SJasper Tran O'Leary 		if (err != 0) {
89062b2d0c3SJasper Tran O'Leary 			device_printf(priv->dev, "Failed to configure ptype lut: err=%d\n",
89162b2d0c3SJasper Tran O'Leary 			    err);
89262b2d0c3SJasper Tran O'Leary 			goto abort;
89362b2d0c3SJasper Tran O'Leary 		}
89462b2d0c3SJasper Tran O'Leary 	}
89554dfc97bSShailend Chand 
89654dfc97bSShailend Chand 	err = gve_up(priv);
89754dfc97bSShailend Chand 	if (err != 0)
89854dfc97bSShailend Chand 		goto abort;
89954dfc97bSShailend Chand 
90054dfc97bSShailend Chand 	return;
90154dfc97bSShailend Chand 
90254dfc97bSShailend Chand abort:
90354dfc97bSShailend Chand 	device_printf(priv->dev, "Restore failed!\n");
90454dfc97bSShailend Chand 	return;
90554dfc97bSShailend Chand }
90654dfc97bSShailend Chand 
90754dfc97bSShailend Chand static void
gve_clear_device_resources(struct gve_priv * priv)90862b2d0c3SJasper Tran O'Leary gve_clear_device_resources(struct gve_priv *priv)
90962b2d0c3SJasper Tran O'Leary {
91062b2d0c3SJasper Tran O'Leary 	int i;
91162b2d0c3SJasper Tran O'Leary 
91262b2d0c3SJasper Tran O'Leary 	for (i = 0; i < priv->num_event_counters; i++)
91362b2d0c3SJasper Tran O'Leary 		priv->counters[i] = 0;
91462b2d0c3SJasper Tran O'Leary 	bus_dmamap_sync(priv->counter_array_mem.tag, priv->counter_array_mem.map,
91562b2d0c3SJasper Tran O'Leary 	    BUS_DMASYNC_PREWRITE);
91662b2d0c3SJasper Tran O'Leary 
91762b2d0c3SJasper Tran O'Leary 	for (i = 0; i < priv->num_queues; i++)
91862b2d0c3SJasper Tran O'Leary 		priv->irq_db_indices[i] = (struct gve_irq_db){};
91962b2d0c3SJasper Tran O'Leary 	bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map,
92062b2d0c3SJasper Tran O'Leary 	    BUS_DMASYNC_PREWRITE);
92162b2d0c3SJasper Tran O'Leary 
92262b2d0c3SJasper Tran O'Leary 	if (priv->ptype_lut_dqo)
92362b2d0c3SJasper Tran O'Leary 		*priv->ptype_lut_dqo = (struct gve_ptype_lut){0};
92462b2d0c3SJasper Tran O'Leary }
92562b2d0c3SJasper Tran O'Leary 
92662b2d0c3SJasper Tran O'Leary static void
gve_handle_reset(struct gve_priv * priv)92754dfc97bSShailend Chand gve_handle_reset(struct gve_priv *priv)
92854dfc97bSShailend Chand {
92954dfc97bSShailend Chand 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET))
93054dfc97bSShailend Chand 		return;
93154dfc97bSShailend Chand 
93254dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
93354dfc97bSShailend Chand 	gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
93454dfc97bSShailend Chand 
93554dfc97bSShailend Chand 	GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
93654dfc97bSShailend Chand 
93754dfc97bSShailend Chand 	if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
93854dfc97bSShailend Chand 	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
93954dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
94054dfc97bSShailend Chand 
94154dfc97bSShailend Chand 	/*
94254dfc97bSShailend Chand 	 * Releasing the adminq causes the NIC to destroy all resources
94354dfc97bSShailend Chand 	 * registered with it, so by clearing the flags beneath we cause
94454dfc97bSShailend Chand 	 * the subsequent gve_down call below to not attempt to tell the
94554dfc97bSShailend Chand 	 * NIC to destroy these resources again.
94654dfc97bSShailend Chand 	 *
94754dfc97bSShailend Chand 	 * The call to gve_down is needed in the first place to refresh
94854dfc97bSShailend Chand 	 * the state and the DMA-able memory within each driver ring.
94954dfc97bSShailend Chand 	 */
95054dfc97bSShailend Chand 	gve_release_adminq(priv);
95154dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
95254dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
95354dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK);
95454dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK);
95554dfc97bSShailend Chand 
95654dfc97bSShailend Chand 	gve_down(priv);
95762b2d0c3SJasper Tran O'Leary 	gve_clear_device_resources(priv);
95862b2d0c3SJasper Tran O'Leary 
95954dfc97bSShailend Chand 	gve_restore(priv);
96054dfc97bSShailend Chand 
96154dfc97bSShailend Chand 	GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
96254dfc97bSShailend Chand 
96354dfc97bSShailend Chand 	priv->reset_cnt++;
96454dfc97bSShailend Chand 	gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
96554dfc97bSShailend Chand }
96654dfc97bSShailend Chand 
96754dfc97bSShailend Chand static void
gve_handle_link_status(struct gve_priv * priv)96854dfc97bSShailend Chand gve_handle_link_status(struct gve_priv *priv)
96954dfc97bSShailend Chand {
97054dfc97bSShailend Chand 	uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
97154dfc97bSShailend Chand 	bool link_up = status & GVE_DEVICE_STATUS_LINK_STATUS;
97254dfc97bSShailend Chand 
97354dfc97bSShailend Chand 	if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP))
97454dfc97bSShailend Chand 		return;
97554dfc97bSShailend Chand 
97654dfc97bSShailend Chand 	if (link_up) {
97754dfc97bSShailend Chand 		if (bootverbose)
97854dfc97bSShailend Chand 			device_printf(priv->dev, "Device link is up.\n");
97954dfc97bSShailend Chand 		if_link_state_change(priv->ifp, LINK_STATE_UP);
98054dfc97bSShailend Chand 		gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
98154dfc97bSShailend Chand 	} else {
98254dfc97bSShailend Chand 		device_printf(priv->dev, "Device link is down.\n");
98354dfc97bSShailend Chand 		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
98454dfc97bSShailend Chand 		gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
98554dfc97bSShailend Chand 	}
98654dfc97bSShailend Chand }
98754dfc97bSShailend Chand 
98854dfc97bSShailend Chand static void
gve_service_task(void * arg,int pending)98954dfc97bSShailend Chand gve_service_task(void *arg, int pending)
99054dfc97bSShailend Chand {
99154dfc97bSShailend Chand 	struct gve_priv *priv = (struct gve_priv *)arg;
99254dfc97bSShailend Chand 	uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
99354dfc97bSShailend Chand 
99454dfc97bSShailend Chand 	if (((GVE_DEVICE_STATUS_RESET_MASK & status) != 0) &&
99554dfc97bSShailend Chand 	    !gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) {
99654dfc97bSShailend Chand 		device_printf(priv->dev, "Device requested reset\n");
99754dfc97bSShailend Chand 		gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
99854dfc97bSShailend Chand 	}
99954dfc97bSShailend Chand 
100054dfc97bSShailend Chand 	gve_handle_reset(priv);
100154dfc97bSShailend Chand 	gve_handle_link_status(priv);
100254dfc97bSShailend Chand }
100354dfc97bSShailend Chand 
100454dfc97bSShailend Chand static int
gve_probe(device_t dev)100554dfc97bSShailend Chand gve_probe(device_t dev)
100654dfc97bSShailend Chand {
10071bbdfb0bSXin LI 	uint16_t deviceid, vendorid;
10081bbdfb0bSXin LI 	int i;
10091bbdfb0bSXin LI 
10101bbdfb0bSXin LI 	vendorid = pci_get_vendor(dev);
10111bbdfb0bSXin LI 	deviceid = pci_get_device(dev);
10121bbdfb0bSXin LI 
10131177a6c8SXin LI 	for (i = 0; i < nitems(gve_devs); i++) {
10141bbdfb0bSXin LI 		if (vendorid == gve_devs[i].vendor_id &&
10151bbdfb0bSXin LI 		    deviceid == gve_devs[i].device_id) {
10161bbdfb0bSXin LI 			device_set_desc(dev, gve_devs[i].name);
101754dfc97bSShailend Chand 			return (BUS_PROBE_DEFAULT);
101854dfc97bSShailend Chand 		}
10191bbdfb0bSXin LI 	}
102054dfc97bSShailend Chand 	return (ENXIO);
102154dfc97bSShailend Chand }
102254dfc97bSShailend Chand 
102354dfc97bSShailend Chand static void
gve_free_sys_res_mem(struct gve_priv * priv)102454dfc97bSShailend Chand gve_free_sys_res_mem(struct gve_priv *priv)
102554dfc97bSShailend Chand {
102654dfc97bSShailend Chand 	if (priv->msix_table != NULL)
102754dfc97bSShailend Chand 		bus_release_resource(priv->dev, SYS_RES_MEMORY,
102854dfc97bSShailend Chand 		    rman_get_rid(priv->msix_table), priv->msix_table);
102954dfc97bSShailend Chand 
103054dfc97bSShailend Chand 	if (priv->db_bar != NULL)
103154dfc97bSShailend Chand 		bus_release_resource(priv->dev, SYS_RES_MEMORY,
103254dfc97bSShailend Chand 		    rman_get_rid(priv->db_bar), priv->db_bar);
103354dfc97bSShailend Chand 
103454dfc97bSShailend Chand 	if (priv->reg_bar != NULL)
103554dfc97bSShailend Chand 		bus_release_resource(priv->dev, SYS_RES_MEMORY,
103654dfc97bSShailend Chand 		    rman_get_rid(priv->reg_bar), priv->reg_bar);
103754dfc97bSShailend Chand }
103854dfc97bSShailend Chand 
103954dfc97bSShailend Chand static int
gve_attach(device_t dev)104054dfc97bSShailend Chand gve_attach(device_t dev)
104154dfc97bSShailend Chand {
104254dfc97bSShailend Chand 	struct gve_priv *priv;
104354dfc97bSShailend Chand 	int rid;
104454dfc97bSShailend Chand 	int err;
104554dfc97bSShailend Chand 
1046d438b4efSShailend Chand 	snprintf(gve_version, sizeof(gve_version), "%d.%d.%d",
1047d438b4efSShailend Chand 	    GVE_VERSION_MAJOR, GVE_VERSION_MINOR, GVE_VERSION_SUB);
1048d438b4efSShailend Chand 
104954dfc97bSShailend Chand 	priv = device_get_softc(dev);
105054dfc97bSShailend Chand 	priv->dev = dev;
105154dfc97bSShailend Chand 	GVE_IFACE_LOCK_INIT(priv->gve_iface_lock);
105254dfc97bSShailend Chand 
105354dfc97bSShailend Chand 	pci_enable_busmaster(dev);
105454dfc97bSShailend Chand 
105554dfc97bSShailend Chand 	rid = PCIR_BAR(GVE_REGISTER_BAR);
105654dfc97bSShailend Chand 	priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
105754dfc97bSShailend Chand 	    &rid, RF_ACTIVE);
105854dfc97bSShailend Chand 	if (priv->reg_bar == NULL) {
105954dfc97bSShailend Chand 		device_printf(dev, "Failed to allocate BAR0\n");
106054dfc97bSShailend Chand 		err = ENXIO;
106154dfc97bSShailend Chand 		goto abort;
106254dfc97bSShailend Chand 	}
106354dfc97bSShailend Chand 
106454dfc97bSShailend Chand 	rid = PCIR_BAR(GVE_DOORBELL_BAR);
106554dfc97bSShailend Chand 	priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
106654dfc97bSShailend Chand 	    &rid, RF_ACTIVE);
106754dfc97bSShailend Chand 	if (priv->db_bar == NULL) {
106854dfc97bSShailend Chand 		device_printf(dev, "Failed to allocate BAR2\n");
106954dfc97bSShailend Chand 		err = ENXIO;
107054dfc97bSShailend Chand 		goto abort;
107154dfc97bSShailend Chand 	}
107254dfc97bSShailend Chand 
107354dfc97bSShailend Chand 	rid = pci_msix_table_bar(priv->dev);
107454dfc97bSShailend Chand 	priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
107554dfc97bSShailend Chand 	    &rid, RF_ACTIVE);
107654dfc97bSShailend Chand 	if (priv->msix_table == NULL) {
107754dfc97bSShailend Chand 		device_printf(dev, "Failed to allocate msix table\n");
107854dfc97bSShailend Chand 		err = ENXIO;
107954dfc97bSShailend Chand 		goto abort;
108054dfc97bSShailend Chand 	}
108154dfc97bSShailend Chand 
108254dfc97bSShailend Chand 	err = gve_alloc_adminq_and_describe_device(priv);
108354dfc97bSShailend Chand 	if (err != 0)
108454dfc97bSShailend Chand 		goto abort;
108554dfc97bSShailend Chand 
108662b2d0c3SJasper Tran O'Leary 	err = gve_alloc_and_configure_device_resources(priv);
108754dfc97bSShailend Chand 	if (err != 0)
108854dfc97bSShailend Chand 		goto abort;
108954dfc97bSShailend Chand 
109071702df6SVee Agarwal 	priv->rx_buf_size_dqo = gve_get_dqo_rx_buf_size(priv, priv->max_mtu);
109154dfc97bSShailend Chand 	err = gve_alloc_rings(priv);
109254dfc97bSShailend Chand 	if (err != 0)
109354dfc97bSShailend Chand 		goto abort;
109454dfc97bSShailend Chand 
1095aa386085SZhenlei Huang 	gve_setup_ifnet(dev, priv);
109654dfc97bSShailend Chand 
109754dfc97bSShailend Chand 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
109854dfc97bSShailend Chand 
109954dfc97bSShailend Chand 	bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION,
110054dfc97bSShailend Chand 	    sizeof(GVE_DRIVER_VERSION) - 1);
110154dfc97bSShailend Chand 
110254dfc97bSShailend Chand 	TASK_INIT(&priv->service_task, 0, gve_service_task, priv);
110354dfc97bSShailend Chand 	priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO,
110454dfc97bSShailend Chand 	    taskqueue_thread_enqueue, &priv->service_tq);
110554dfc97bSShailend Chand 	taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq",
110654dfc97bSShailend Chand 	    device_get_nameunit(priv->dev));
110754dfc97bSShailend Chand 
110854dfc97bSShailend Chand         gve_setup_sysctl(priv);
110954dfc97bSShailend Chand 
111054dfc97bSShailend Chand 	if (bootverbose)
111154dfc97bSShailend Chand 		device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION);
111254dfc97bSShailend Chand 	return (0);
111354dfc97bSShailend Chand 
111454dfc97bSShailend Chand abort:
111554dfc97bSShailend Chand 	gve_free_rings(priv);
111662b2d0c3SJasper Tran O'Leary 	gve_deconfigure_and_free_device_resources(priv);
111754dfc97bSShailend Chand 	gve_release_adminq(priv);
111854dfc97bSShailend Chand 	gve_free_sys_res_mem(priv);
111954dfc97bSShailend Chand 	GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
112054dfc97bSShailend Chand 	return (err);
112154dfc97bSShailend Chand }
112254dfc97bSShailend Chand 
112354dfc97bSShailend Chand static int
gve_detach(device_t dev)112454dfc97bSShailend Chand gve_detach(device_t dev)
112554dfc97bSShailend Chand {
112654dfc97bSShailend Chand 	struct gve_priv *priv = device_get_softc(dev);
112754dfc97bSShailend Chand 	if_t ifp = priv->ifp;
1128d412c076SJohn Baldwin 	int error;
1129d412c076SJohn Baldwin 
1130d412c076SJohn Baldwin 	error = bus_generic_detach(dev);
1131d412c076SJohn Baldwin 	if (error != 0)
1132d412c076SJohn Baldwin 		return (error);
113354dfc97bSShailend Chand 
113454dfc97bSShailend Chand 	ether_ifdetach(ifp);
113554dfc97bSShailend Chand 
113654dfc97bSShailend Chand 	GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
113754dfc97bSShailend Chand 	gve_destroy(priv);
113854dfc97bSShailend Chand 	GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
113954dfc97bSShailend Chand 
114054dfc97bSShailend Chand 	gve_free_rings(priv);
114154dfc97bSShailend Chand 	gve_free_sys_res_mem(priv);
114254dfc97bSShailend Chand 	GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
114354dfc97bSShailend Chand 
114454dfc97bSShailend Chand 	while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL))
114554dfc97bSShailend Chand 		taskqueue_drain(priv->service_tq, &priv->service_task);
114654dfc97bSShailend Chand 	taskqueue_free(priv->service_tq);
114754dfc97bSShailend Chand 
114854dfc97bSShailend Chand 	if_free(ifp);
1149d412c076SJohn Baldwin 	return (0);
115054dfc97bSShailend Chand }
115154dfc97bSShailend Chand 
115254dfc97bSShailend Chand static device_method_t gve_methods[] = {
115354dfc97bSShailend Chand 	DEVMETHOD(device_probe, gve_probe),
115454dfc97bSShailend Chand 	DEVMETHOD(device_attach, gve_attach),
115554dfc97bSShailend Chand 	DEVMETHOD(device_detach, gve_detach),
115654dfc97bSShailend Chand 	DEVMETHOD_END
115754dfc97bSShailend Chand };
115854dfc97bSShailend Chand 
115954dfc97bSShailend Chand static driver_t gve_driver = {
116054dfc97bSShailend Chand 	"gve",
116154dfc97bSShailend Chand 	gve_methods,
116254dfc97bSShailend Chand 	sizeof(struct gve_priv)
116354dfc97bSShailend Chand };
116454dfc97bSShailend Chand 
116554dfc97bSShailend Chand #if __FreeBSD_version < 1301503
116654dfc97bSShailend Chand static devclass_t gve_devclass;
116754dfc97bSShailend Chand 
116854dfc97bSShailend Chand DRIVER_MODULE(gve, pci, gve_driver, gve_devclass, 0, 0);
116954dfc97bSShailend Chand #else
117054dfc97bSShailend Chand DRIVER_MODULE(gve, pci, gve_driver, 0, 0);
117154dfc97bSShailend Chand #endif
11721177a6c8SXin LI MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, gve, gve_devs,
11731177a6c8SXin LI     nitems(gve_devs));
1174