1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2019 Google, Inc.
5 */
6
7 #ifndef _GVE_H_
8 #define _GVE_H_
9
10 #include <linux/dma-mapping.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/u64_stats_sync.h>
14 #include "gve_desc.h"
15
16 #ifndef PCI_VENDOR_ID_GOOGLE
17 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
18 #endif
19
20 #define PCI_DEV_ID_GVNIC 0x0042
21
22 #define GVE_REGISTER_BAR 0
23 #define GVE_DOORBELL_BAR 2
24
25 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
26 #define GVE_TX_MAX_IOVEC 4
27 /* 1 for management, 1 for rx, 1 for tx */
28 #define GVE_MIN_MSIX 3
29
30 /* Numbers of gve tx/rx stats in stats report. */
31 #define GVE_TX_STATS_REPORT_NUM 5
32 #define GVE_RX_STATS_REPORT_NUM 2
33
34 /* Interval to schedule a stats report update, 20000ms. */
35 #define GVE_STATS_REPORT_TIMER_PERIOD 20000
36
37 /* Numbers of NIC tx/rx stats in stats report. */
38 #define NIC_TX_STATS_REPORT_NUM 0
39 #define NIC_RX_STATS_REPORT_NUM 4
40
41 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
42 struct gve_rx_desc_queue {
43 struct gve_rx_desc *desc_ring; /* the descriptor ring */
44 dma_addr_t bus; /* the bus for the desc_ring */
45 u8 seqno; /* the next expected seqno for this desc*/
46 };
47
48 /* The page info for a single slot in the RX data queue */
49 struct gve_rx_slot_page_info {
50 struct page *page;
51 void *page_address;
52 u32 page_offset; /* offset to write to in page */
53 };
54
55 /* A list of pages registered with the device during setup and used by a queue
56 * as buffers
57 */
58 struct gve_queue_page_list {
59 u32 id; /* unique id */
60 u32 num_entries;
61 struct page **pages; /* list of num_entries pages */
62 dma_addr_t *page_buses; /* the dma addrs of the pages */
63 };
64
65 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
66 struct gve_rx_data_queue {
67 struct gve_rx_data_slot *data_ring; /* read by NIC */
68 dma_addr_t data_bus; /* dma mapping of the slots */
69 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
70 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
71 };
72
73 struct gve_priv;
74
75 /* An RX ring that contains a power-of-two sized desc and data ring. */
76 struct gve_rx_ring {
77 struct gve_priv *gve;
78 struct gve_rx_desc_queue desc;
79 struct gve_rx_data_queue data;
80 u64 rbytes; /* free-running bytes received */
81 u64 rpackets; /* free-running packets received */
82 u32 cnt; /* free-running total number of completed packets */
83 u32 fill_cnt; /* free-running total number of descs and buffs posted */
84 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
85 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
86 u64 rx_copied_pkt; /* free-running total number of copied packets */
87 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
88 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
89 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
90 u32 q_num; /* queue index */
91 u32 ntfy_id; /* notification block index */
92 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
93 dma_addr_t q_resources_bus; /* dma address for the queue resources */
94 struct u64_stats_sync statss; /* sync stats for 32bit archs */
95 };
96
97 /* A TX desc ring entry */
98 union gve_tx_desc {
99 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
100 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
101 };
102
103 /* Tracks the memory in the fifo occupied by a segment of a packet */
104 struct gve_tx_iovec {
105 u32 iov_offset; /* offset into this segment */
106 u32 iov_len; /* length */
107 u32 iov_padding; /* padding associated with this segment */
108 };
109
110 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
111 * ring entry but only used for a pkt_desc not a seg_desc
112 */
113 struct gve_tx_buffer_state {
114 struct sk_buff *skb; /* skb for this pkt */
115 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
116 };
117
118 /* A TX buffer - each queue has one */
119 struct gve_tx_fifo {
120 void *base; /* address of base of FIFO */
121 u32 size; /* total size */
122 atomic_t available; /* how much space is still available */
123 u32 head; /* offset to write at */
124 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
125 };
126
127 /* A TX ring that contains a power-of-two sized desc ring and a FIFO buffer */
128 struct gve_tx_ring {
129 /* Cacheline 0 -- Accessed & dirtied during transmit */
130 struct gve_tx_fifo tx_fifo;
131 u32 req; /* driver tracked head pointer */
132 u32 done; /* driver tracked tail pointer */
133
134 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
135 __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
136 u64 pkt_done; /* free-running - total packets completed */
137 u64 bytes_done; /* free-running - total bytes completed */
138
139 /* Cacheline 2 -- Read-mostly fields */
140 union gve_tx_desc *desc ____cacheline_aligned;
141 struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
142 struct netdev_queue *netdev_txq;
143 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
144 u32 mask; /* masks req and done down to queue size */
145
146 /* Slow-path fields */
147 u32 q_num ____cacheline_aligned; /* queue idx */
148 u32 stop_queue; /* count of queue stops */
149 u32 wake_queue; /* count of queue wakes */
150 u32 ntfy_id; /* notification block index */
151 dma_addr_t bus; /* dma address of the descr ring */
152 dma_addr_t q_resources_bus; /* dma address of the queue resources */
153 struct u64_stats_sync statss; /* sync stats for 32bit archs */
154 } ____cacheline_aligned;
155
156 /* Wraps the info for one irq including the napi struct and the queues
157 * associated with that irq.
158 */
159 struct gve_notify_block {
160 __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
161 char name[IFNAMSIZ + 16]; /* name registered with the kernel */
162 struct napi_struct napi; /* kernel napi struct for this block */
163 struct gve_priv *priv;
164 struct gve_tx_ring *tx; /* tx rings on this block */
165 struct gve_rx_ring *rx; /* rx rings on this block */
166 } ____cacheline_aligned;
167
168 /* Tracks allowed and current queue settings */
169 struct gve_queue_config {
170 u16 max_queues;
171 u16 num_queues; /* current */
172 };
173
174 /* Tracks the available and used qpl IDs */
175 struct gve_qpl_config {
176 u32 qpl_map_size; /* map memory size */
177 unsigned long *qpl_id_map; /* bitmap of used qpl ids */
178 };
179
180 struct gve_priv {
181 struct net_device *dev;
182 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
183 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
184 struct gve_queue_page_list *qpls; /* array of num qpls */
185 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
186 dma_addr_t ntfy_block_bus;
187 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
188 char mgmt_msix_name[IFNAMSIZ + 16];
189 u32 mgmt_msix_idx;
190 __be32 *counter_array; /* array of num_event_counters */
191 dma_addr_t counter_array_bus;
192
193 u16 num_event_counters;
194 u16 tx_desc_cnt; /* num desc per ring */
195 u16 rx_desc_cnt; /* num desc per ring */
196 u16 tx_pages_per_qpl; /* tx buffer length */
197 u16 rx_pages_per_qpl; /* rx buffer length */
198 u64 max_registered_pages;
199 u64 num_registered_pages; /* num pages registered with NIC */
200 u32 rx_copybreak; /* copy packets smaller than this */
201 u16 default_num_queues; /* default num queues to set up */
202
203 struct gve_queue_config tx_cfg;
204 struct gve_queue_config rx_cfg;
205 struct gve_qpl_config qpl_cfg; /* map used QPL ids */
206 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
207
208 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
209 __be32 __iomem *db_bar2; /* "array" of doorbells */
210 u32 msg_enable; /* level for netif* netdev print macros */
211 struct pci_dev *pdev;
212
213 /* metrics */
214 u32 tx_timeo_cnt;
215
216 /* Admin queue - see gve_adminq.h*/
217 union gve_adminq_command *adminq;
218 dma_addr_t adminq_bus_addr;
219 u32 adminq_mask; /* masks prod_cnt to adminq size */
220 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
221 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
222 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
223 /* free-running count of per AQ cmd executed */
224 u32 adminq_describe_device_cnt;
225 u32 adminq_cfg_device_resources_cnt;
226 u32 adminq_register_page_list_cnt;
227 u32 adminq_unregister_page_list_cnt;
228 u32 adminq_create_tx_queue_cnt;
229 u32 adminq_create_rx_queue_cnt;
230 u32 adminq_destroy_tx_queue_cnt;
231 u32 adminq_destroy_rx_queue_cnt;
232 u32 adminq_dcfg_device_resources_cnt;
233 u32 adminq_set_driver_parameter_cnt;
234 u32 adminq_report_stats_cnt;
235 u32 adminq_report_link_speed_cnt;
236
237 /* Global stats */
238 u32 interface_up_cnt; /* count of times interface turned up since last reset */
239 u32 interface_down_cnt; /* count of times interface turned down since last reset */
240 u32 reset_cnt; /* count of reset */
241 u32 page_alloc_fail; /* count of page alloc fails */
242 u32 dma_mapping_error; /* count of dma mapping errors */
243 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
244 struct workqueue_struct *gve_wq;
245 struct work_struct service_task;
246 struct work_struct stats_report_task;
247 unsigned long service_task_flags;
248 unsigned long state_flags;
249
250 struct gve_stats_report *stats_report;
251 u64 stats_report_len;
252 dma_addr_t stats_report_bus; /* dma address for the stats report */
253 unsigned long ethtool_flags;
254
255 unsigned long stats_report_timer_period;
256 struct timer_list stats_report_timer;
257
258 /* Gvnic device link speed from hypervisor. */
259 u64 link_speed;
260 };
261
262 enum gve_service_task_flags_bit {
263 GVE_PRIV_FLAGS_DO_RESET = 1,
264 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
265 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
266 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
267 };
268
269 enum gve_state_flags_bit {
270 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
271 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
272 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
273 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
274 };
275
276 enum gve_ethtool_flags_bit {
277 GVE_PRIV_FLAGS_REPORT_STATS = 0,
278 };
279
gve_get_do_reset(struct gve_priv * priv)280 static inline bool gve_get_do_reset(struct gve_priv *priv)
281 {
282 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
283 }
284
gve_set_do_reset(struct gve_priv * priv)285 static inline void gve_set_do_reset(struct gve_priv *priv)
286 {
287 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
288 }
289
gve_clear_do_reset(struct gve_priv * priv)290 static inline void gve_clear_do_reset(struct gve_priv *priv)
291 {
292 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
293 }
294
gve_get_reset_in_progress(struct gve_priv * priv)295 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
296 {
297 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
298 &priv->service_task_flags);
299 }
300
gve_set_reset_in_progress(struct gve_priv * priv)301 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
302 {
303 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
304 }
305
gve_clear_reset_in_progress(struct gve_priv * priv)306 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
307 {
308 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
309 }
310
gve_get_probe_in_progress(struct gve_priv * priv)311 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
312 {
313 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
314 &priv->service_task_flags);
315 }
316
gve_set_probe_in_progress(struct gve_priv * priv)317 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
318 {
319 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
320 }
321
gve_clear_probe_in_progress(struct gve_priv * priv)322 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
323 {
324 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
325 }
326
gve_get_do_report_stats(struct gve_priv * priv)327 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
328 {
329 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
330 &priv->service_task_flags);
331 }
332
gve_set_do_report_stats(struct gve_priv * priv)333 static inline void gve_set_do_report_stats(struct gve_priv *priv)
334 {
335 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
336 }
337
gve_clear_do_report_stats(struct gve_priv * priv)338 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
339 {
340 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
341 }
342
gve_get_admin_queue_ok(struct gve_priv * priv)343 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
344 {
345 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
346 }
347
gve_set_admin_queue_ok(struct gve_priv * priv)348 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
349 {
350 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
351 }
352
gve_clear_admin_queue_ok(struct gve_priv * priv)353 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
354 {
355 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
356 }
357
gve_get_device_resources_ok(struct gve_priv * priv)358 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
359 {
360 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
361 }
362
gve_set_device_resources_ok(struct gve_priv * priv)363 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
364 {
365 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
366 }
367
gve_clear_device_resources_ok(struct gve_priv * priv)368 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
369 {
370 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
371 }
372
gve_get_device_rings_ok(struct gve_priv * priv)373 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
374 {
375 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
376 }
377
gve_set_device_rings_ok(struct gve_priv * priv)378 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
379 {
380 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
381 }
382
gve_clear_device_rings_ok(struct gve_priv * priv)383 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
384 {
385 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
386 }
387
gve_get_napi_enabled(struct gve_priv * priv)388 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
389 {
390 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
391 }
392
gve_set_napi_enabled(struct gve_priv * priv)393 static inline void gve_set_napi_enabled(struct gve_priv *priv)
394 {
395 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
396 }
397
gve_clear_napi_enabled(struct gve_priv * priv)398 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
399 {
400 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
401 }
402
gve_get_report_stats(struct gve_priv * priv)403 static inline bool gve_get_report_stats(struct gve_priv *priv)
404 {
405 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
406 }
407
gve_clear_report_stats(struct gve_priv * priv)408 static inline void gve_clear_report_stats(struct gve_priv *priv)
409 {
410 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
411 }
412
413 /* Returns the address of the ntfy_blocks irq doorbell
414 */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)415 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
416 struct gve_notify_block *block)
417 {
418 return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
419 }
420
421 /* Returns the index into ntfy_blocks of the given tx ring's block
422 */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)423 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
424 {
425 return queue_idx;
426 }
427
428 /* Returns the index into ntfy_blocks of the given rx ring's block
429 */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)430 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
431 {
432 return (priv->num_ntfy_blks / 2) + queue_idx;
433 }
434
435 /* Returns the number of tx queue page lists
436 */
gve_num_tx_qpls(struct gve_priv * priv)437 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
438 {
439 return priv->tx_cfg.num_queues;
440 }
441
442 /* Returns the number of rx queue page lists
443 */
gve_num_rx_qpls(struct gve_priv * priv)444 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
445 {
446 return priv->rx_cfg.num_queues;
447 }
448
449 /* Returns a pointer to the next available tx qpl in the list of qpls
450 */
451 static inline
gve_assign_tx_qpl(struct gve_priv * priv)452 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
453 {
454 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
455 priv->qpl_cfg.qpl_map_size);
456
457 /* we are out of tx qpls */
458 if (id >= gve_num_tx_qpls(priv))
459 return NULL;
460
461 set_bit(id, priv->qpl_cfg.qpl_id_map);
462 return &priv->qpls[id];
463 }
464
465 /* Returns a pointer to the next available rx qpl in the list of qpls
466 */
467 static inline
gve_assign_rx_qpl(struct gve_priv * priv)468 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
469 {
470 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
471 priv->qpl_cfg.qpl_map_size,
472 gve_num_tx_qpls(priv));
473
474 /* we are out of rx qpls */
475 if (id == priv->qpl_cfg.qpl_map_size)
476 return NULL;
477
478 set_bit(id, priv->qpl_cfg.qpl_id_map);
479 return &priv->qpls[id];
480 }
481
482 /* Unassigns the qpl with the given id
483 */
gve_unassign_qpl(struct gve_priv * priv,int id)484 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
485 {
486 clear_bit(id, priv->qpl_cfg.qpl_id_map);
487 }
488
489 /* Returns the correct dma direction for tx and rx qpls
490 */
gve_qpl_dma_dir(struct gve_priv * priv,int id)491 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
492 int id)
493 {
494 if (id < gve_num_tx_qpls(priv))
495 return DMA_TO_DEVICE;
496 else
497 return DMA_FROM_DEVICE;
498 }
499
500 /* Returns true if the max mtu allows page recycling */
gve_can_recycle_pages(struct net_device * dev)501 static inline bool gve_can_recycle_pages(struct net_device *dev)
502 {
503 /* We can't recycle the pages if we can't fit a packet into half a
504 * page.
505 */
506 return dev->max_mtu <= PAGE_SIZE / 2;
507 }
508
509 /* buffers */
510 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
511 struct page **page, dma_addr_t *dma,
512 enum dma_data_direction);
513 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
514 enum dma_data_direction);
515 /* tx handling */
516 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
517 bool gve_tx_poll(struct gve_notify_block *block, int budget);
518 int gve_tx_alloc_rings(struct gve_priv *priv);
519 void gve_tx_free_rings(struct gve_priv *priv);
520 __be32 gve_tx_load_event_counter(struct gve_priv *priv,
521 struct gve_tx_ring *tx);
522 /* rx handling */
523 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
524 bool gve_rx_poll(struct gve_notify_block *block, int budget);
525 int gve_rx_alloc_rings(struct gve_priv *priv);
526 void gve_rx_free_rings(struct gve_priv *priv);
527 bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
528 netdev_features_t feat);
529 /* Reset */
530 void gve_schedule_reset(struct gve_priv *priv);
531 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
532 int gve_adjust_queues(struct gve_priv *priv,
533 struct gve_queue_config new_rx_config,
534 struct gve_queue_config new_tx_config);
535 /* report stats handling */
536 void gve_handle_report_stats(struct gve_priv *priv);
537 /* exported by ethtool.c */
538 extern const struct ethtool_ops gve_ethtool_ops;
539 /* needed by ethtool */
540 extern const char gve_version_str[];
541 #endif /* _GVE_H_ */
542