1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include <linux/etherdevice.h>
8 #include <linux/pci.h>
9 #include "gve.h"
10 #include "gve_adminq.h"
11 #include "gve_register.h"
12
13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14 #define GVE_ADMINQ_SLEEP_LEN 20
15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
16
17 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
18 "Expected: length=%d, feature_mask=%x.\n" \
19 "Actual: length=%d, feature_mask=%x.\n"
20
21 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
22
23 static
gve_get_next_option(struct gve_device_descriptor * descriptor,struct gve_device_option * option)24 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
25 struct gve_device_option *option)
26 {
27 void *option_end, *descriptor_end;
28
29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
31
32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
33 }
34
35 static
gve_parse_device_option(struct gve_priv * priv,struct gve_device_descriptor * device_descriptor,struct gve_device_option * option,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl)36 void gve_parse_device_option(struct gve_priv *priv,
37 struct gve_device_descriptor *device_descriptor,
38 struct gve_device_option *option,
39 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
40 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
41 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
42 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
43 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
44 {
45 u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
46 u16 option_length = be16_to_cpu(option->option_length);
47 u16 option_id = be16_to_cpu(option->option_id);
48
49 /* If the length or feature mask doesn't match, continue without
50 * enabling the feature.
51 */
52 switch (option_id) {
53 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
54 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
55 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
56 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
57 "Raw Addressing",
58 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
59 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
60 option_length, req_feat_mask);
61 break;
62 }
63
64 dev_info(&priv->pdev->dev,
65 "Gqi raw addressing device option enabled.\n");
66 priv->queue_format = GVE_GQI_RDA_FORMAT;
67 break;
68 case GVE_DEV_OPT_ID_GQI_RDA:
69 if (option_length < sizeof(**dev_op_gqi_rda) ||
70 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
71 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
72 "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
73 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
74 option_length, req_feat_mask);
75 break;
76 }
77
78 if (option_length > sizeof(**dev_op_gqi_rda)) {
79 dev_warn(&priv->pdev->dev,
80 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
81 }
82 *dev_op_gqi_rda = (void *)(option + 1);
83 break;
84 case GVE_DEV_OPT_ID_GQI_QPL:
85 if (option_length < sizeof(**dev_op_gqi_qpl) ||
86 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
87 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
88 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
89 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
90 option_length, req_feat_mask);
91 break;
92 }
93
94 if (option_length > sizeof(**dev_op_gqi_qpl)) {
95 dev_warn(&priv->pdev->dev,
96 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
97 }
98 *dev_op_gqi_qpl = (void *)(option + 1);
99 break;
100 case GVE_DEV_OPT_ID_DQO_RDA:
101 if (option_length < sizeof(**dev_op_dqo_rda) ||
102 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
103 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
104 "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
105 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
106 option_length, req_feat_mask);
107 break;
108 }
109
110 if (option_length > sizeof(**dev_op_dqo_rda)) {
111 dev_warn(&priv->pdev->dev,
112 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
113 }
114 *dev_op_dqo_rda = (void *)(option + 1);
115 break;
116 case GVE_DEV_OPT_ID_DQO_QPL:
117 if (option_length < sizeof(**dev_op_dqo_qpl) ||
118 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
119 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
120 "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
121 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
122 option_length, req_feat_mask);
123 break;
124 }
125
126 if (option_length > sizeof(**dev_op_dqo_qpl)) {
127 dev_warn(&priv->pdev->dev,
128 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL");
129 }
130 *dev_op_dqo_qpl = (void *)(option + 1);
131 break;
132 case GVE_DEV_OPT_ID_JUMBO_FRAMES:
133 if (option_length < sizeof(**dev_op_jumbo_frames) ||
134 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
135 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
136 "Jumbo Frames",
137 (int)sizeof(**dev_op_jumbo_frames),
138 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
139 option_length, req_feat_mask);
140 break;
141 }
142
143 if (option_length > sizeof(**dev_op_jumbo_frames)) {
144 dev_warn(&priv->pdev->dev,
145 GVE_DEVICE_OPTION_TOO_BIG_FMT,
146 "Jumbo Frames");
147 }
148 *dev_op_jumbo_frames = (void *)(option + 1);
149 break;
150 default:
151 /* If we don't recognize the option just continue
152 * without doing anything.
153 */
154 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
155 option_id);
156 }
157 }
158
159 /* Process all device options for a given describe device call. */
160 static int
gve_process_device_options(struct gve_priv * priv,struct gve_device_descriptor * descriptor,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl)161 gve_process_device_options(struct gve_priv *priv,
162 struct gve_device_descriptor *descriptor,
163 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
164 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
165 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
166 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
167 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
168 {
169 const int num_options = be16_to_cpu(descriptor->num_device_options);
170 struct gve_device_option *dev_opt;
171 int i;
172
173 /* The options struct directly follows the device descriptor. */
174 dev_opt = (void *)(descriptor + 1);
175 for (i = 0; i < num_options; i++) {
176 struct gve_device_option *next_opt;
177
178 next_opt = gve_get_next_option(descriptor, dev_opt);
179 if (!next_opt) {
180 dev_err(&priv->dev->dev,
181 "options exceed device_descriptor's total length.\n");
182 return -EINVAL;
183 }
184
185 gve_parse_device_option(priv, descriptor, dev_opt,
186 dev_op_gqi_rda, dev_op_gqi_qpl,
187 dev_op_dqo_rda, dev_op_jumbo_frames,
188 dev_op_dqo_qpl);
189 dev_opt = next_opt;
190 }
191
192 return 0;
193 }
194
gve_adminq_alloc(struct device * dev,struct gve_priv * priv)195 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
196 {
197 priv->adminq_pool = dma_pool_create("adminq_pool", dev,
198 GVE_ADMINQ_BUFFER_SIZE, 0, 0);
199 if (unlikely(!priv->adminq_pool))
200 return -ENOMEM;
201 priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
202 &priv->adminq_bus_addr);
203 if (unlikely(!priv->adminq)) {
204 dma_pool_destroy(priv->adminq_pool);
205 return -ENOMEM;
206 }
207
208 priv->adminq_mask =
209 (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1;
210 priv->adminq_prod_cnt = 0;
211 priv->adminq_cmd_fail = 0;
212 priv->adminq_timeouts = 0;
213 priv->adminq_describe_device_cnt = 0;
214 priv->adminq_cfg_device_resources_cnt = 0;
215 priv->adminq_register_page_list_cnt = 0;
216 priv->adminq_unregister_page_list_cnt = 0;
217 priv->adminq_create_tx_queue_cnt = 0;
218 priv->adminq_create_rx_queue_cnt = 0;
219 priv->adminq_destroy_tx_queue_cnt = 0;
220 priv->adminq_destroy_rx_queue_cnt = 0;
221 priv->adminq_dcfg_device_resources_cnt = 0;
222 priv->adminq_set_driver_parameter_cnt = 0;
223 priv->adminq_report_stats_cnt = 0;
224 priv->adminq_report_link_speed_cnt = 0;
225 priv->adminq_get_ptype_map_cnt = 0;
226
227 /* Setup Admin queue with the device */
228 if (priv->pdev->revision < 0x1) {
229 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
230 &priv->reg_bar0->adminq_pfn);
231 } else {
232 iowrite16be(GVE_ADMINQ_BUFFER_SIZE,
233 &priv->reg_bar0->adminq_length);
234 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
235 iowrite32be(priv->adminq_bus_addr >> 32,
236 &priv->reg_bar0->adminq_base_address_hi);
237 #endif
238 iowrite32be(priv->adminq_bus_addr,
239 &priv->reg_bar0->adminq_base_address_lo);
240 iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
241 }
242 gve_set_admin_queue_ok(priv);
243 return 0;
244 }
245
gve_adminq_release(struct gve_priv * priv)246 void gve_adminq_release(struct gve_priv *priv)
247 {
248 int i = 0;
249
250 /* Tell the device the adminq is leaving */
251 if (priv->pdev->revision < 0x1) {
252 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
253 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
254 /* If this is reached the device is unrecoverable and still
255 * holding memory. Continue looping to avoid memory corruption,
256 * but WARN so it is visible what is going on.
257 */
258 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
259 WARN(1, "Unrecoverable platform error!");
260 i++;
261 msleep(GVE_ADMINQ_SLEEP_LEN);
262 }
263 } else {
264 iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status);
265 while (!(ioread32be(&priv->reg_bar0->device_status)
266 & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) {
267 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
268 WARN(1, "Unrecoverable platform error!");
269 i++;
270 msleep(GVE_ADMINQ_SLEEP_LEN);
271 }
272 }
273 gve_clear_device_rings_ok(priv);
274 gve_clear_device_resources_ok(priv);
275 gve_clear_admin_queue_ok(priv);
276 }
277
gve_adminq_free(struct device * dev,struct gve_priv * priv)278 void gve_adminq_free(struct device *dev, struct gve_priv *priv)
279 {
280 if (!gve_get_admin_queue_ok(priv))
281 return;
282 gve_adminq_release(priv);
283 dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr);
284 dma_pool_destroy(priv->adminq_pool);
285 gve_clear_admin_queue_ok(priv);
286 }
287
gve_adminq_kick_cmd(struct gve_priv * priv,u32 prod_cnt)288 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
289 {
290 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
291 }
292
gve_adminq_wait_for_cmd(struct gve_priv * priv,u32 prod_cnt)293 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
294 {
295 int i;
296
297 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
298 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
299 == prod_cnt)
300 return true;
301 msleep(GVE_ADMINQ_SLEEP_LEN);
302 }
303
304 return false;
305 }
306
gve_adminq_parse_err(struct gve_priv * priv,u32 status)307 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
308 {
309 if (status != GVE_ADMINQ_COMMAND_PASSED &&
310 status != GVE_ADMINQ_COMMAND_UNSET) {
311 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
312 priv->adminq_cmd_fail++;
313 }
314 switch (status) {
315 case GVE_ADMINQ_COMMAND_PASSED:
316 return 0;
317 case GVE_ADMINQ_COMMAND_UNSET:
318 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
319 return -EINVAL;
320 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
321 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
322 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
323 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
324 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
325 return -EAGAIN;
326 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
327 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
328 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
329 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
330 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
331 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
332 return -EINVAL;
333 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
334 return -ETIME;
335 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
336 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
337 return -EACCES;
338 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
339 return -ENOMEM;
340 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
341 return -EOPNOTSUPP;
342 default:
343 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
344 return -EINVAL;
345 }
346 }
347
348 /* Flushes all AQ commands currently queued and waits for them to complete.
349 * If there are failures, it will return the first error.
350 */
gve_adminq_kick_and_wait(struct gve_priv * priv)351 static int gve_adminq_kick_and_wait(struct gve_priv *priv)
352 {
353 int tail, head;
354 int i;
355
356 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
357 head = priv->adminq_prod_cnt;
358
359 gve_adminq_kick_cmd(priv, head);
360 if (!gve_adminq_wait_for_cmd(priv, head)) {
361 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
362 priv->adminq_timeouts++;
363 return -ENOTRECOVERABLE;
364 }
365
366 for (i = tail; i < head; i++) {
367 union gve_adminq_command *cmd;
368 u32 status, err;
369
370 cmd = &priv->adminq[i & priv->adminq_mask];
371 status = be32_to_cpu(READ_ONCE(cmd->status));
372 err = gve_adminq_parse_err(priv, status);
373 if (err)
374 // Return the first error if we failed.
375 return err;
376 }
377
378 return 0;
379 }
380
381 /* This function is not threadsafe - the caller is responsible for any
382 * necessary locks.
383 */
gve_adminq_issue_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)384 static int gve_adminq_issue_cmd(struct gve_priv *priv,
385 union gve_adminq_command *cmd_orig)
386 {
387 union gve_adminq_command *cmd;
388 u32 opcode;
389 u32 tail;
390
391 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
392
393 // Check if next command will overflow the buffer.
394 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
395 (tail & priv->adminq_mask)) {
396 int err;
397
398 // Flush existing commands to make room.
399 err = gve_adminq_kick_and_wait(priv);
400 if (err)
401 return err;
402
403 // Retry.
404 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
405 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
406 (tail & priv->adminq_mask)) {
407 // This should never happen. We just flushed the
408 // command queue so there should be enough space.
409 return -ENOMEM;
410 }
411 }
412
413 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
414 priv->adminq_prod_cnt++;
415
416 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
417 opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
418
419 switch (opcode) {
420 case GVE_ADMINQ_DESCRIBE_DEVICE:
421 priv->adminq_describe_device_cnt++;
422 break;
423 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
424 priv->adminq_cfg_device_resources_cnt++;
425 break;
426 case GVE_ADMINQ_REGISTER_PAGE_LIST:
427 priv->adminq_register_page_list_cnt++;
428 break;
429 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
430 priv->adminq_unregister_page_list_cnt++;
431 break;
432 case GVE_ADMINQ_CREATE_TX_QUEUE:
433 priv->adminq_create_tx_queue_cnt++;
434 break;
435 case GVE_ADMINQ_CREATE_RX_QUEUE:
436 priv->adminq_create_rx_queue_cnt++;
437 break;
438 case GVE_ADMINQ_DESTROY_TX_QUEUE:
439 priv->adminq_destroy_tx_queue_cnt++;
440 break;
441 case GVE_ADMINQ_DESTROY_RX_QUEUE:
442 priv->adminq_destroy_rx_queue_cnt++;
443 break;
444 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
445 priv->adminq_dcfg_device_resources_cnt++;
446 break;
447 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
448 priv->adminq_set_driver_parameter_cnt++;
449 break;
450 case GVE_ADMINQ_REPORT_STATS:
451 priv->adminq_report_stats_cnt++;
452 break;
453 case GVE_ADMINQ_REPORT_LINK_SPEED:
454 priv->adminq_report_link_speed_cnt++;
455 break;
456 case GVE_ADMINQ_GET_PTYPE_MAP:
457 priv->adminq_get_ptype_map_cnt++;
458 break;
459 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
460 priv->adminq_verify_driver_compatibility_cnt++;
461 break;
462 default:
463 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
464 }
465
466 return 0;
467 }
468
469 /* This function is not threadsafe - the caller is responsible for any
470 * necessary locks.
471 * The caller is also responsible for making sure there are no commands
472 * waiting to be executed.
473 */
gve_adminq_execute_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)474 static int gve_adminq_execute_cmd(struct gve_priv *priv,
475 union gve_adminq_command *cmd_orig)
476 {
477 u32 tail, head;
478 int err;
479
480 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
481 head = priv->adminq_prod_cnt;
482 if (tail != head)
483 // This is not a valid path
484 return -EINVAL;
485
486 err = gve_adminq_issue_cmd(priv, cmd_orig);
487 if (err)
488 return err;
489
490 return gve_adminq_kick_and_wait(priv);
491 }
492
493 /* The device specifies that the management vector can either be the first irq
494 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
495 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
496 * the management vector is first.
497 *
498 * gve arranges the msix vectors so that the management vector is last.
499 */
500 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
gve_adminq_configure_device_resources(struct gve_priv * priv,dma_addr_t counter_array_bus_addr,u32 num_counters,dma_addr_t db_array_bus_addr,u32 num_ntfy_blks)501 int gve_adminq_configure_device_resources(struct gve_priv *priv,
502 dma_addr_t counter_array_bus_addr,
503 u32 num_counters,
504 dma_addr_t db_array_bus_addr,
505 u32 num_ntfy_blks)
506 {
507 union gve_adminq_command cmd;
508
509 memset(&cmd, 0, sizeof(cmd));
510 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
511 cmd.configure_device_resources =
512 (struct gve_adminq_configure_device_resources) {
513 .counter_array = cpu_to_be64(counter_array_bus_addr),
514 .num_counters = cpu_to_be32(num_counters),
515 .irq_db_addr = cpu_to_be64(db_array_bus_addr),
516 .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
517 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
518 .ntfy_blk_msix_base_idx =
519 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
520 .queue_format = priv->queue_format,
521 };
522
523 return gve_adminq_execute_cmd(priv, &cmd);
524 }
525
gve_adminq_deconfigure_device_resources(struct gve_priv * priv)526 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
527 {
528 union gve_adminq_command cmd;
529
530 memset(&cmd, 0, sizeof(cmd));
531 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
532
533 return gve_adminq_execute_cmd(priv, &cmd);
534 }
535
gve_adminq_create_tx_queue(struct gve_priv * priv,u32 queue_index)536 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
537 {
538 struct gve_tx_ring *tx = &priv->tx[queue_index];
539 union gve_adminq_command cmd;
540
541 memset(&cmd, 0, sizeof(cmd));
542 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
543 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
544 .queue_id = cpu_to_be32(queue_index),
545 .queue_resources_addr =
546 cpu_to_be64(tx->q_resources_bus),
547 .tx_ring_addr = cpu_to_be64(tx->bus),
548 .ntfy_id = cpu_to_be32(tx->ntfy_id),
549 };
550
551 if (gve_is_gqi(priv)) {
552 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
553 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
554
555 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
556 } else {
557 u16 comp_ring_size;
558 u32 qpl_id = 0;
559
560 if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
561 qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
562 comp_ring_size =
563 priv->options_dqo_rda.tx_comp_ring_entries;
564 } else {
565 qpl_id = tx->dqo.qpl->id;
566 comp_ring_size = priv->tx_desc_cnt;
567 }
568 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
569 cmd.create_tx_queue.tx_ring_size =
570 cpu_to_be16(priv->tx_desc_cnt);
571 cmd.create_tx_queue.tx_comp_ring_addr =
572 cpu_to_be64(tx->complq_bus_dqo);
573 cmd.create_tx_queue.tx_comp_ring_size =
574 cpu_to_be16(comp_ring_size);
575 }
576
577 return gve_adminq_issue_cmd(priv, &cmd);
578 }
579
gve_adminq_create_tx_queues(struct gve_priv * priv,u32 start_id,u32 num_queues)580 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
581 {
582 int err;
583 int i;
584
585 for (i = start_id; i < start_id + num_queues; i++) {
586 err = gve_adminq_create_tx_queue(priv, i);
587 if (err)
588 return err;
589 }
590
591 return gve_adminq_kick_and_wait(priv);
592 }
593
gve_adminq_create_rx_queue(struct gve_priv * priv,u32 queue_index)594 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
595 {
596 struct gve_rx_ring *rx = &priv->rx[queue_index];
597 union gve_adminq_command cmd;
598
599 memset(&cmd, 0, sizeof(cmd));
600 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
601 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
602 .queue_id = cpu_to_be32(queue_index),
603 .ntfy_id = cpu_to_be32(rx->ntfy_id),
604 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
605 };
606
607 if (gve_is_gqi(priv)) {
608 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
609 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
610
611 cmd.create_rx_queue.rx_desc_ring_addr =
612 cpu_to_be64(rx->desc.bus),
613 cmd.create_rx_queue.rx_data_ring_addr =
614 cpu_to_be64(rx->data.data_bus),
615 cmd.create_rx_queue.index = cpu_to_be32(queue_index);
616 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
617 cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
618 } else {
619 u16 rx_buff_ring_entries;
620 u32 qpl_id = 0;
621
622 if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
623 qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
624 rx_buff_ring_entries =
625 priv->options_dqo_rda.rx_buff_ring_entries;
626 } else {
627 qpl_id = rx->dqo.qpl->id;
628 rx_buff_ring_entries = priv->rx_desc_cnt;
629 }
630 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
631 cmd.create_rx_queue.rx_ring_size =
632 cpu_to_be16(priv->rx_desc_cnt);
633 cmd.create_rx_queue.rx_desc_ring_addr =
634 cpu_to_be64(rx->dqo.complq.bus);
635 cmd.create_rx_queue.rx_data_ring_addr =
636 cpu_to_be64(rx->dqo.bufq.bus);
637 cmd.create_rx_queue.packet_buffer_size =
638 cpu_to_be16(priv->data_buffer_size_dqo);
639 cmd.create_rx_queue.rx_buff_ring_size =
640 cpu_to_be16(rx_buff_ring_entries);
641 cmd.create_rx_queue.enable_rsc =
642 !!(priv->dev->features & NETIF_F_LRO);
643 }
644
645 return gve_adminq_issue_cmd(priv, &cmd);
646 }
647
gve_adminq_create_rx_queues(struct gve_priv * priv,u32 num_queues)648 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
649 {
650 int err;
651 int i;
652
653 for (i = 0; i < num_queues; i++) {
654 err = gve_adminq_create_rx_queue(priv, i);
655 if (err)
656 return err;
657 }
658
659 return gve_adminq_kick_and_wait(priv);
660 }
661
gve_adminq_destroy_tx_queue(struct gve_priv * priv,u32 queue_index)662 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
663 {
664 union gve_adminq_command cmd;
665 int err;
666
667 memset(&cmd, 0, sizeof(cmd));
668 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
669 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
670 .queue_id = cpu_to_be32(queue_index),
671 };
672
673 err = gve_adminq_issue_cmd(priv, &cmd);
674 if (err)
675 return err;
676
677 return 0;
678 }
679
gve_adminq_destroy_tx_queues(struct gve_priv * priv,u32 start_id,u32 num_queues)680 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
681 {
682 int err;
683 int i;
684
685 for (i = start_id; i < start_id + num_queues; i++) {
686 err = gve_adminq_destroy_tx_queue(priv, i);
687 if (err)
688 return err;
689 }
690
691 return gve_adminq_kick_and_wait(priv);
692 }
693
gve_adminq_destroy_rx_queue(struct gve_priv * priv,u32 queue_index)694 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
695 {
696 union gve_adminq_command cmd;
697 int err;
698
699 memset(&cmd, 0, sizeof(cmd));
700 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
701 cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
702 .queue_id = cpu_to_be32(queue_index),
703 };
704
705 err = gve_adminq_issue_cmd(priv, &cmd);
706 if (err)
707 return err;
708
709 return 0;
710 }
711
gve_adminq_destroy_rx_queues(struct gve_priv * priv,u32 num_queues)712 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
713 {
714 int err;
715 int i;
716
717 for (i = 0; i < num_queues; i++) {
718 err = gve_adminq_destroy_rx_queue(priv, i);
719 if (err)
720 return err;
721 }
722
723 return gve_adminq_kick_and_wait(priv);
724 }
725
gve_set_desc_cnt(struct gve_priv * priv,struct gve_device_descriptor * descriptor)726 static int gve_set_desc_cnt(struct gve_priv *priv,
727 struct gve_device_descriptor *descriptor)
728 {
729 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
730 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
731 return 0;
732 }
733
734 static int
gve_set_desc_cnt_dqo(struct gve_priv * priv,const struct gve_device_descriptor * descriptor,const struct gve_device_option_dqo_rda * dev_op_dqo_rda)735 gve_set_desc_cnt_dqo(struct gve_priv *priv,
736 const struct gve_device_descriptor *descriptor,
737 const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
738 {
739 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
740 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
741
742 if (priv->queue_format == GVE_DQO_QPL_FORMAT)
743 return 0;
744
745 priv->options_dqo_rda.tx_comp_ring_entries =
746 be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
747 priv->options_dqo_rda.rx_buff_ring_entries =
748 be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
749
750 return 0;
751 }
752
gve_enable_supported_features(struct gve_priv * priv,u32 supported_features_mask,const struct gve_device_option_jumbo_frames * dev_op_jumbo_frames,const struct gve_device_option_dqo_qpl * dev_op_dqo_qpl)753 static void gve_enable_supported_features(struct gve_priv *priv,
754 u32 supported_features_mask,
755 const struct gve_device_option_jumbo_frames
756 *dev_op_jumbo_frames,
757 const struct gve_device_option_dqo_qpl
758 *dev_op_dqo_qpl)
759 {
760 /* Before control reaches this point, the page-size-capped max MTU from
761 * the gve_device_descriptor field has already been stored in
762 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
763 */
764 if (dev_op_jumbo_frames &&
765 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
766 dev_info(&priv->pdev->dev,
767 "JUMBO FRAMES device option enabled.\n");
768 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
769 }
770
771 /* Override pages for qpl for DQO-QPL */
772 if (dev_op_dqo_qpl) {
773 priv->tx_pages_per_qpl =
774 be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
775 priv->rx_pages_per_qpl =
776 be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
777 if (priv->tx_pages_per_qpl == 0)
778 priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
779 if (priv->rx_pages_per_qpl == 0)
780 priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
781 }
782 }
783
gve_adminq_describe_device(struct gve_priv * priv)784 int gve_adminq_describe_device(struct gve_priv *priv)
785 {
786 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
787 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
788 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
789 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
790 struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
791 struct gve_device_descriptor *descriptor;
792 u32 supported_features_mask = 0;
793 union gve_adminq_command cmd;
794 dma_addr_t descriptor_bus;
795 int err = 0;
796 u8 *mac;
797 u16 mtu;
798
799 memset(&cmd, 0, sizeof(cmd));
800 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
801 &descriptor_bus);
802 if (!descriptor)
803 return -ENOMEM;
804 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
805 cmd.describe_device.device_descriptor_addr =
806 cpu_to_be64(descriptor_bus);
807 cmd.describe_device.device_descriptor_version =
808 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
809 cmd.describe_device.available_length =
810 cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE);
811
812 err = gve_adminq_execute_cmd(priv, &cmd);
813 if (err)
814 goto free_device_descriptor;
815
816 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
817 &dev_op_gqi_qpl, &dev_op_dqo_rda,
818 &dev_op_jumbo_frames,
819 &dev_op_dqo_qpl);
820 if (err)
821 goto free_device_descriptor;
822
823 /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
824 * is not set to GqiRda, choose the queue format in a priority order:
825 * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default.
826 */
827 if (dev_op_dqo_rda) {
828 priv->queue_format = GVE_DQO_RDA_FORMAT;
829 dev_info(&priv->pdev->dev,
830 "Driver is running with DQO RDA queue format.\n");
831 supported_features_mask =
832 be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
833 } else if (dev_op_dqo_qpl) {
834 priv->queue_format = GVE_DQO_QPL_FORMAT;
835 supported_features_mask =
836 be32_to_cpu(dev_op_dqo_qpl->supported_features_mask);
837 } else if (dev_op_gqi_rda) {
838 priv->queue_format = GVE_GQI_RDA_FORMAT;
839 dev_info(&priv->pdev->dev,
840 "Driver is running with GQI RDA queue format.\n");
841 supported_features_mask =
842 be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
843 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
844 dev_info(&priv->pdev->dev,
845 "Driver is running with GQI RDA queue format.\n");
846 } else {
847 priv->queue_format = GVE_GQI_QPL_FORMAT;
848 if (dev_op_gqi_qpl)
849 supported_features_mask =
850 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
851 dev_info(&priv->pdev->dev,
852 "Driver is running with GQI QPL queue format.\n");
853 }
854 if (gve_is_gqi(priv)) {
855 err = gve_set_desc_cnt(priv, descriptor);
856 } else {
857 /* DQO supports LRO. */
858 priv->dev->hw_features |= NETIF_F_LRO;
859 err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
860 }
861 if (err)
862 goto free_device_descriptor;
863
864 priv->max_registered_pages =
865 be64_to_cpu(descriptor->max_registered_pages);
866 mtu = be16_to_cpu(descriptor->mtu);
867 if (mtu < ETH_MIN_MTU) {
868 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
869 err = -EINVAL;
870 goto free_device_descriptor;
871 }
872 priv->dev->max_mtu = mtu;
873 priv->num_event_counters = be16_to_cpu(descriptor->counters);
874 eth_hw_addr_set(priv->dev, descriptor->mac);
875 mac = descriptor->mac;
876 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
877 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
878 priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
879
880 if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
881 dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
882 priv->rx_data_slot_cnt);
883 priv->rx_desc_cnt = priv->rx_data_slot_cnt;
884 }
885 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
886
887 gve_enable_supported_features(priv, supported_features_mask,
888 dev_op_jumbo_frames, dev_op_dqo_qpl);
889
890 free_device_descriptor:
891 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
892 return err;
893 }
894
gve_adminq_register_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl)895 int gve_adminq_register_page_list(struct gve_priv *priv,
896 struct gve_queue_page_list *qpl)
897 {
898 struct device *hdev = &priv->pdev->dev;
899 u32 num_entries = qpl->num_entries;
900 u32 size = num_entries * sizeof(qpl->page_buses[0]);
901 union gve_adminq_command cmd;
902 dma_addr_t page_list_bus;
903 __be64 *page_list;
904 int err;
905 int i;
906
907 memset(&cmd, 0, sizeof(cmd));
908 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
909 if (!page_list)
910 return -ENOMEM;
911
912 for (i = 0; i < num_entries; i++)
913 page_list[i] = cpu_to_be64(qpl->page_buses[i]);
914
915 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
916 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
917 .page_list_id = cpu_to_be32(qpl->id),
918 .num_pages = cpu_to_be32(num_entries),
919 .page_address_list_addr = cpu_to_be64(page_list_bus),
920 .page_size = cpu_to_be64(PAGE_SIZE),
921 };
922
923 err = gve_adminq_execute_cmd(priv, &cmd);
924 dma_free_coherent(hdev, size, page_list, page_list_bus);
925 return err;
926 }
927
gve_adminq_unregister_page_list(struct gve_priv * priv,u32 page_list_id)928 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
929 {
930 union gve_adminq_command cmd;
931
932 memset(&cmd, 0, sizeof(cmd));
933 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
934 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
935 .page_list_id = cpu_to_be32(page_list_id),
936 };
937
938 return gve_adminq_execute_cmd(priv, &cmd);
939 }
940
gve_adminq_set_mtu(struct gve_priv * priv,u64 mtu)941 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
942 {
943 union gve_adminq_command cmd;
944
945 memset(&cmd, 0, sizeof(cmd));
946 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
947 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
948 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
949 .parameter_value = cpu_to_be64(mtu),
950 };
951
952 return gve_adminq_execute_cmd(priv, &cmd);
953 }
954
gve_adminq_report_stats(struct gve_priv * priv,u64 stats_report_len,dma_addr_t stats_report_addr,u64 interval)955 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
956 dma_addr_t stats_report_addr, u64 interval)
957 {
958 union gve_adminq_command cmd;
959
960 memset(&cmd, 0, sizeof(cmd));
961 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
962 cmd.report_stats = (struct gve_adminq_report_stats) {
963 .stats_report_len = cpu_to_be64(stats_report_len),
964 .stats_report_addr = cpu_to_be64(stats_report_addr),
965 .interval = cpu_to_be64(interval),
966 };
967
968 return gve_adminq_execute_cmd(priv, &cmd);
969 }
970
gve_adminq_verify_driver_compatibility(struct gve_priv * priv,u64 driver_info_len,dma_addr_t driver_info_addr)971 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
972 u64 driver_info_len,
973 dma_addr_t driver_info_addr)
974 {
975 union gve_adminq_command cmd;
976
977 memset(&cmd, 0, sizeof(cmd));
978 cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
979 cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
980 .driver_info_len = cpu_to_be64(driver_info_len),
981 .driver_info_addr = cpu_to_be64(driver_info_addr),
982 };
983
984 return gve_adminq_execute_cmd(priv, &cmd);
985 }
986
gve_adminq_report_link_speed(struct gve_priv * priv)987 int gve_adminq_report_link_speed(struct gve_priv *priv)
988 {
989 union gve_adminq_command gvnic_cmd;
990 dma_addr_t link_speed_region_bus;
991 __be64 *link_speed_region;
992 int err;
993
994 link_speed_region =
995 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
996 &link_speed_region_bus, GFP_KERNEL);
997
998 if (!link_speed_region)
999 return -ENOMEM;
1000
1001 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
1002 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
1003 gvnic_cmd.report_link_speed.link_speed_address =
1004 cpu_to_be64(link_speed_region_bus);
1005
1006 err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
1007
1008 priv->link_speed = be64_to_cpu(*link_speed_region);
1009 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
1010 link_speed_region_bus);
1011 return err;
1012 }
1013
gve_adminq_get_ptype_map_dqo(struct gve_priv * priv,struct gve_ptype_lut * ptype_lut)1014 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
1015 struct gve_ptype_lut *ptype_lut)
1016 {
1017 struct gve_ptype_map *ptype_map;
1018 union gve_adminq_command cmd;
1019 dma_addr_t ptype_map_bus;
1020 int err = 0;
1021 int i;
1022
1023 memset(&cmd, 0, sizeof(cmd));
1024 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
1025 &ptype_map_bus, GFP_KERNEL);
1026 if (!ptype_map)
1027 return -ENOMEM;
1028
1029 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
1030 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
1031 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
1032 .ptype_map_addr = cpu_to_be64(ptype_map_bus),
1033 };
1034
1035 err = gve_adminq_execute_cmd(priv, &cmd);
1036 if (err)
1037 goto err;
1038
1039 /* Populate ptype_lut. */
1040 for (i = 0; i < GVE_NUM_PTYPES; i++) {
1041 ptype_lut->ptypes[i].l3_type =
1042 ptype_map->ptypes[i].l3_type;
1043 ptype_lut->ptypes[i].l4_type =
1044 ptype_map->ptypes[i].l4_type;
1045 }
1046 err:
1047 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
1048 ptype_map_bus);
1049 return err;
1050 }
1051