1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include <net/libeth/rx.h> 5 6 #include "idpf.h" 7 #include "idpf_virtchnl.h" 8 9 #define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 10 #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) 11 #define IDPF_VC_XN_IDX_M GENMASK(7, 0) 12 #define IDPF_VC_XN_SALT_M GENMASK(15, 8) 13 #define IDPF_VC_XN_RING_LEN U8_MAX 14 15 /** 16 * enum idpf_vc_xn_state - Virtchnl transaction status 17 * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used 18 * @IDPF_VC_XN_WAITING: expecting a reply, not yet received 19 * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, 20 * buffer updated 21 * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there 22 * was an error, buffer not updated 23 * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down 24 * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the 25 * return context; a callback may be provided to handle 26 * return 27 */ 28 enum idpf_vc_xn_state { 29 IDPF_VC_XN_IDLE = 1, 30 IDPF_VC_XN_WAITING, 31 IDPF_VC_XN_COMPLETED_SUCCESS, 32 IDPF_VC_XN_COMPLETED_FAILED, 33 IDPF_VC_XN_SHUTDOWN, 34 IDPF_VC_XN_ASYNC, 35 }; 36 37 struct idpf_vc_xn; 38 /* Callback for asynchronous messages */ 39 typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, 40 const struct idpf_ctlq_msg *); 41 42 /** 43 * struct idpf_vc_xn - Data structure representing virtchnl transactions 44 * @completed: virtchnl event loop uses that to signal when a reply is 45 * available, uses kernel completion API 46 * @state: virtchnl event loop stores the data below, protected by the 47 * completion's lock. 48 * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be 49 * truncated on its way to the receiver thread according to 50 * reply_buf.iov_len. 51 * @reply: Reference to the buffer(s) where the reply data should be written 52 * to. May be 0-length (then NULL address permitted) if the reply data 53 * should be ignored. 54 * @async_handler: if sent asynchronously, a callback can be provided to handle 55 * the reply when it's received 56 * @vc_op: corresponding opcode sent with this transaction 57 * @idx: index used as retrieval on reply receive, used for cookie 58 * @salt: changed every message to make unique, used for cookie 59 */ 60 struct idpf_vc_xn { 61 struct completion completed; 62 enum idpf_vc_xn_state state; 63 size_t reply_sz; 64 struct kvec reply; 65 async_vc_cb async_handler; 66 u32 vc_op; 67 u8 idx; 68 u8 salt; 69 }; 70 71 /** 72 * struct idpf_vc_xn_params - Parameters for executing transaction 73 * @send_buf: kvec for send buffer 74 * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length 75 * @timeout_ms: timeout to wait for reply 76 * @async: send message asynchronously, will not wait on completion 77 * @async_handler: If sent asynchronously, optional callback handler. The user 78 * must be careful when using async handlers as the memory for 79 * the recv_buf _cannot_ be on stack if this is async. 80 * @vc_op: virtchnl op to send 81 */ 82 struct idpf_vc_xn_params { 83 struct kvec send_buf; 84 struct kvec recv_buf; 85 int timeout_ms; 86 bool async; 87 async_vc_cb async_handler; 88 u32 vc_op; 89 }; 90 91 /** 92 * struct idpf_vc_xn_manager - Manager for tracking transactions 93 * @ring: backing and lookup for transactions 94 * @free_xn_bm: bitmap for free transactions 95 * @xn_bm_lock: make bitmap access synchronous where necessary 96 * @salt: used to make cookie unique every message 97 */ 98 struct idpf_vc_xn_manager { 99 struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN]; 100 DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN); 101 spinlock_t xn_bm_lock; 102 u8 salt; 103 }; 104 105 /** 106 * idpf_vid_to_vport - Translate vport id to vport pointer 107 * @adapter: private data struct 108 * @v_id: vport id to translate 109 * 110 * Returns vport matching v_id, NULL if not found. 111 */ 112 static 113 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id) 114 { 115 u16 num_max_vports = idpf_get_max_vports(adapter); 116 int i; 117 118 for (i = 0; i < num_max_vports; i++) 119 if (adapter->vport_ids[i] == v_id) 120 return adapter->vports[i]; 121 122 return NULL; 123 } 124 125 /** 126 * idpf_handle_event_link - Handle link event message 127 * @adapter: private data struct 128 * @v2e: virtchnl event message 129 */ 130 static void idpf_handle_event_link(struct idpf_adapter *adapter, 131 const struct virtchnl2_event *v2e) 132 { 133 struct idpf_netdev_priv *np; 134 struct idpf_vport *vport; 135 136 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); 137 if (!vport) { 138 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", 139 v2e->vport_id); 140 return; 141 } 142 np = netdev_priv(vport->netdev); 143 144 np->link_speed_mbps = le32_to_cpu(v2e->link_speed); 145 146 if (vport->link_up == v2e->link_status) 147 return; 148 149 vport->link_up = v2e->link_status; 150 151 if (np->state != __IDPF_VPORT_UP) 152 return; 153 154 if (vport->link_up) { 155 netif_tx_start_all_queues(vport->netdev); 156 netif_carrier_on(vport->netdev); 157 } else { 158 netif_tx_stop_all_queues(vport->netdev); 159 netif_carrier_off(vport->netdev); 160 } 161 } 162 163 /** 164 * idpf_recv_event_msg - Receive virtchnl event message 165 * @adapter: Driver specific private structure 166 * @ctlq_msg: message to copy from 167 * 168 * Receive virtchnl event message 169 */ 170 static void idpf_recv_event_msg(struct idpf_adapter *adapter, 171 struct idpf_ctlq_msg *ctlq_msg) 172 { 173 int payload_size = ctlq_msg->ctx.indirect.payload->size; 174 struct virtchnl2_event *v2e; 175 u32 event; 176 177 if (payload_size < sizeof(*v2e)) { 178 dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n", 179 ctlq_msg->cookie.mbx.chnl_opcode, 180 payload_size); 181 return; 182 } 183 184 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; 185 event = le32_to_cpu(v2e->event); 186 187 switch (event) { 188 case VIRTCHNL2_EVENT_LINK_CHANGE: 189 idpf_handle_event_link(adapter, v2e); 190 return; 191 default: 192 dev_err(&adapter->pdev->dev, 193 "Unknown event %d from PF\n", event); 194 break; 195 } 196 } 197 198 /** 199 * idpf_mb_clean - Reclaim the send mailbox queue entries 200 * @adapter: Driver specific private structure 201 * 202 * Reclaim the send mailbox queue entries to be used to send further messages 203 * 204 * Returns 0 on success, negative on failure 205 */ 206 static int idpf_mb_clean(struct idpf_adapter *adapter) 207 { 208 u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; 209 struct idpf_ctlq_msg **q_msg; 210 struct idpf_dma_mem *dma_mem; 211 int err; 212 213 q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC); 214 if (!q_msg) 215 return -ENOMEM; 216 217 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); 218 if (err) 219 goto err_kfree; 220 221 for (i = 0; i < num_q_msg; i++) { 222 if (!q_msg[i]) 223 continue; 224 dma_mem = q_msg[i]->ctx.indirect.payload; 225 if (dma_mem) 226 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, 227 dma_mem->va, dma_mem->pa); 228 kfree(q_msg[i]); 229 kfree(dma_mem); 230 } 231 232 err_kfree: 233 kfree(q_msg); 234 235 return err; 236 } 237 238 /** 239 * idpf_send_mb_msg - Send message over mailbox 240 * @adapter: Driver specific private structure 241 * @op: virtchnl opcode 242 * @msg_size: size of the payload 243 * @msg: pointer to buffer holding the payload 244 * @cookie: unique SW generated cookie per message 245 * 246 * Will prepare the control queue message and initiates the send api 247 * 248 * Returns 0 on success, negative on failure 249 */ 250 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, 251 u16 msg_size, u8 *msg, u16 cookie) 252 { 253 struct idpf_ctlq_msg *ctlq_msg; 254 struct idpf_dma_mem *dma_mem; 255 int err; 256 257 /* If we are here and a reset is detected nothing much can be 258 * done. This thread should silently abort and expected to 259 * be corrected with a new run either by user or driver 260 * flows after reset 261 */ 262 if (idpf_is_reset_detected(adapter)) 263 return 0; 264 265 err = idpf_mb_clean(adapter); 266 if (err) 267 return err; 268 269 ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC); 270 if (!ctlq_msg) 271 return -ENOMEM; 272 273 dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC); 274 if (!dma_mem) { 275 err = -ENOMEM; 276 goto dma_mem_error; 277 } 278 279 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; 280 ctlq_msg->func_id = 0; 281 ctlq_msg->data_len = msg_size; 282 ctlq_msg->cookie.mbx.chnl_opcode = op; 283 ctlq_msg->cookie.mbx.chnl_retval = 0; 284 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; 285 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, 286 &dma_mem->pa, GFP_ATOMIC); 287 if (!dma_mem->va) { 288 err = -ENOMEM; 289 goto dma_alloc_error; 290 } 291 292 /* It's possible we're just sending an opcode but no buffer */ 293 if (msg && msg_size) 294 memcpy(dma_mem->va, msg, msg_size); 295 ctlq_msg->ctx.indirect.payload = dma_mem; 296 ctlq_msg->ctx.sw_cookie.data = cookie; 297 298 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); 299 if (err) 300 goto send_error; 301 302 return 0; 303 304 send_error: 305 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, 306 dma_mem->pa); 307 dma_alloc_error: 308 kfree(dma_mem); 309 dma_mem_error: 310 kfree(ctlq_msg); 311 312 return err; 313 } 314 315 /* API for virtchnl "transaction" support ("xn" for short). 316 * 317 * We are reusing the completion lock to serialize the accesses to the 318 * transaction state for simplicity, but it could be its own separate synchro 319 * as well. For now, this API is only used from within a workqueue context; 320 * raw_spin_lock() is enough. 321 */ 322 /** 323 * idpf_vc_xn_lock - Request exclusive access to vc transaction 324 * @xn: struct idpf_vc_xn* to access 325 */ 326 #define idpf_vc_xn_lock(xn) \ 327 raw_spin_lock(&(xn)->completed.wait.lock) 328 329 /** 330 * idpf_vc_xn_unlock - Release exclusive access to vc transaction 331 * @xn: struct idpf_vc_xn* to access 332 */ 333 #define idpf_vc_xn_unlock(xn) \ 334 raw_spin_unlock(&(xn)->completed.wait.lock) 335 336 /** 337 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and 338 * reset the transaction state. 339 * @xn: struct idpf_vc_xn to update 340 */ 341 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn) 342 { 343 xn->reply.iov_base = NULL; 344 xn->reply.iov_len = 0; 345 346 if (xn->state != IDPF_VC_XN_SHUTDOWN) 347 xn->state = IDPF_VC_XN_IDLE; 348 } 349 350 /** 351 * idpf_vc_xn_init - Initialize virtchnl transaction object 352 * @vcxn_mngr: pointer to vc transaction manager struct 353 */ 354 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) 355 { 356 int i; 357 358 spin_lock_init(&vcxn_mngr->xn_bm_lock); 359 360 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 361 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 362 363 xn->state = IDPF_VC_XN_IDLE; 364 xn->idx = i; 365 idpf_vc_xn_release_bufs(xn); 366 init_completion(&xn->completed); 367 } 368 369 bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 370 } 371 372 /** 373 * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object 374 * @vcxn_mngr: pointer to vc transaction manager struct 375 * 376 * All waiting threads will be woken-up and their transaction aborted. Further 377 * operations on that object will fail. 378 */ 379 static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) 380 { 381 int i; 382 383 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 384 bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 385 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 386 387 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 388 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 389 390 idpf_vc_xn_lock(xn); 391 xn->state = IDPF_VC_XN_SHUTDOWN; 392 idpf_vc_xn_release_bufs(xn); 393 idpf_vc_xn_unlock(xn); 394 complete_all(&xn->completed); 395 } 396 } 397 398 /** 399 * idpf_vc_xn_pop_free - Pop a free transaction from free list 400 * @vcxn_mngr: transaction manager to pop from 401 * 402 * Returns NULL if no free transactions 403 */ 404 static 405 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr) 406 { 407 struct idpf_vc_xn *xn = NULL; 408 unsigned long free_idx; 409 410 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 411 free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 412 if (free_idx == IDPF_VC_XN_RING_LEN) 413 goto do_unlock; 414 415 clear_bit(free_idx, vcxn_mngr->free_xn_bm); 416 xn = &vcxn_mngr->ring[free_idx]; 417 xn->salt = vcxn_mngr->salt++; 418 419 do_unlock: 420 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 421 422 return xn; 423 } 424 425 /** 426 * idpf_vc_xn_push_free - Push a free transaction to free list 427 * @vcxn_mngr: transaction manager to push to 428 * @xn: transaction to push 429 */ 430 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, 431 struct idpf_vc_xn *xn) 432 { 433 idpf_vc_xn_release_bufs(xn); 434 set_bit(xn->idx, vcxn_mngr->free_xn_bm); 435 } 436 437 /** 438 * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction 439 * @adapter: driver specific private structure with vcxn_mngr 440 * @params: parameters for this particular transaction including 441 * -vc_op: virtchannel operation to send 442 * -send_buf: kvec iov for send buf and len 443 * -recv_buf: kvec iov for recv buf and len (ignored if NULL) 444 * -timeout_ms: timeout waiting for a reply (milliseconds) 445 * -async: don't wait for message reply, will lose caller context 446 * -async_handler: callback to handle async replies 447 * 448 * @returns >= 0 for success, the size of the initial reply (may or may not be 449 * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for 450 * error. 451 */ 452 static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, 453 const struct idpf_vc_xn_params *params) 454 { 455 const struct kvec *send_buf = ¶ms->send_buf; 456 struct idpf_vc_xn *xn; 457 ssize_t retval; 458 u16 cookie; 459 460 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); 461 /* no free transactions available */ 462 if (!xn) 463 return -ENOSPC; 464 465 idpf_vc_xn_lock(xn); 466 if (xn->state == IDPF_VC_XN_SHUTDOWN) { 467 retval = -ENXIO; 468 goto only_unlock; 469 } else if (xn->state != IDPF_VC_XN_IDLE) { 470 /* We're just going to clobber this transaction even though 471 * it's not IDLE. If we don't reuse it we could theoretically 472 * eventually leak all the free transactions and not be able to 473 * send any messages. At least this way we make an attempt to 474 * remain functional even though something really bad is 475 * happening that's corrupting what was supposed to be free 476 * transactions. 477 */ 478 WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n", 479 xn->idx, xn->vc_op); 480 } 481 482 xn->reply = params->recv_buf; 483 xn->reply_sz = 0; 484 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; 485 xn->vc_op = params->vc_op; 486 xn->async_handler = params->async_handler; 487 idpf_vc_xn_unlock(xn); 488 489 if (!params->async) 490 reinit_completion(&xn->completed); 491 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | 492 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); 493 494 retval = idpf_send_mb_msg(adapter, params->vc_op, 495 send_buf->iov_len, send_buf->iov_base, 496 cookie); 497 if (retval) { 498 idpf_vc_xn_lock(xn); 499 goto release_and_unlock; 500 } 501 502 if (params->async) 503 return 0; 504 505 wait_for_completion_timeout(&xn->completed, 506 msecs_to_jiffies(params->timeout_ms)); 507 508 /* No need to check the return value; we check the final state of the 509 * transaction below. It's possible the transaction actually gets more 510 * timeout than specified if we get preempted here but after 511 * wait_for_completion_timeout returns. This should be non-issue 512 * however. 513 */ 514 idpf_vc_xn_lock(xn); 515 switch (xn->state) { 516 case IDPF_VC_XN_SHUTDOWN: 517 retval = -ENXIO; 518 goto only_unlock; 519 case IDPF_VC_XN_WAITING: 520 dev_notice_ratelimited(&adapter->pdev->dev, 521 "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", 522 params->vc_op, cookie, xn->vc_op, 523 xn->salt, params->timeout_ms); 524 retval = -ETIME; 525 break; 526 case IDPF_VC_XN_COMPLETED_SUCCESS: 527 retval = xn->reply_sz; 528 break; 529 case IDPF_VC_XN_COMPLETED_FAILED: 530 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", 531 params->vc_op); 532 retval = -EIO; 533 break; 534 default: 535 /* Invalid state. */ 536 WARN_ON_ONCE(1); 537 retval = -EIO; 538 break; 539 } 540 541 release_and_unlock: 542 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 543 /* If we receive a VC reply after here, it will be dropped. */ 544 only_unlock: 545 idpf_vc_xn_unlock(xn); 546 547 return retval; 548 } 549 550 /** 551 * idpf_vc_xn_forward_async - Handle async reply receives 552 * @adapter: private data struct 553 * @xn: transaction to handle 554 * @ctlq_msg: corresponding ctlq_msg 555 * 556 * For async sends we're going to lose the caller's context so, if an 557 * async_handler was provided, it can deal with the reply, otherwise we'll just 558 * check and report if there is an error. 559 */ 560 static int 561 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, 562 const struct idpf_ctlq_msg *ctlq_msg) 563 { 564 int err = 0; 565 566 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 567 dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 568 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 569 xn->reply_sz = 0; 570 err = -EINVAL; 571 goto release_bufs; 572 } 573 574 if (xn->async_handler) { 575 err = xn->async_handler(adapter, xn, ctlq_msg); 576 goto release_bufs; 577 } 578 579 if (ctlq_msg->cookie.mbx.chnl_retval) { 580 xn->reply_sz = 0; 581 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", 582 ctlq_msg->cookie.mbx.chnl_opcode); 583 err = -EINVAL; 584 } 585 586 release_bufs: 587 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 588 589 return err; 590 } 591 592 /** 593 * idpf_vc_xn_forward_reply - copy a reply back to receiving thread 594 * @adapter: driver specific private structure with vcxn_mngr 595 * @ctlq_msg: controlq message to send back to receiving thread 596 */ 597 static int 598 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, 599 const struct idpf_ctlq_msg *ctlq_msg) 600 { 601 const void *payload = NULL; 602 size_t payload_size = 0; 603 struct idpf_vc_xn *xn; 604 u16 msg_info; 605 int err = 0; 606 u16 xn_idx; 607 u16 salt; 608 609 msg_info = ctlq_msg->ctx.sw_cookie.data; 610 xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info); 611 if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { 612 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", 613 xn_idx); 614 return -EINVAL; 615 } 616 xn = &adapter->vcxn_mngr->ring[xn_idx]; 617 idpf_vc_xn_lock(xn); 618 salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); 619 if (xn->salt != salt) { 620 dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n", 621 xn->vc_op, xn->salt, xn->state, 622 ctlq_msg->cookie.mbx.chnl_opcode, salt); 623 idpf_vc_xn_unlock(xn); 624 return -EINVAL; 625 } 626 627 switch (xn->state) { 628 case IDPF_VC_XN_WAITING: 629 /* success */ 630 break; 631 case IDPF_VC_XN_IDLE: 632 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", 633 ctlq_msg->cookie.mbx.chnl_opcode); 634 err = -EINVAL; 635 goto out_unlock; 636 case IDPF_VC_XN_SHUTDOWN: 637 /* ENXIO is a bit special here as the recv msg loop uses that 638 * know if it should stop trying to clean the ring if we lost 639 * the virtchnl. We need to stop playing with registers and 640 * yield. 641 */ 642 err = -ENXIO; 643 goto out_unlock; 644 case IDPF_VC_XN_ASYNC: 645 err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); 646 idpf_vc_xn_unlock(xn); 647 return err; 648 default: 649 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", 650 ctlq_msg->cookie.mbx.chnl_opcode); 651 err = -EBUSY; 652 goto out_unlock; 653 } 654 655 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 656 dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 657 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 658 xn->reply_sz = 0; 659 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 660 err = -EINVAL; 661 goto out_unlock; 662 } 663 664 if (ctlq_msg->cookie.mbx.chnl_retval) { 665 xn->reply_sz = 0; 666 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 667 err = -EINVAL; 668 goto out_unlock; 669 } 670 671 if (ctlq_msg->data_len) { 672 payload = ctlq_msg->ctx.indirect.payload->va; 673 payload_size = ctlq_msg->data_len; 674 } 675 676 xn->reply_sz = payload_size; 677 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; 678 679 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) 680 memcpy(xn->reply.iov_base, payload, 681 min_t(size_t, xn->reply.iov_len, payload_size)); 682 683 out_unlock: 684 idpf_vc_xn_unlock(xn); 685 /* we _cannot_ hold lock while calling complete */ 686 complete(&xn->completed); 687 688 return err; 689 } 690 691 /** 692 * idpf_recv_mb_msg - Receive message over mailbox 693 * @adapter: Driver specific private structure 694 * 695 * Will receive control queue message and posts the receive buffer. Returns 0 696 * on success and negative on failure. 697 */ 698 int idpf_recv_mb_msg(struct idpf_adapter *adapter) 699 { 700 struct idpf_ctlq_msg ctlq_msg; 701 struct idpf_dma_mem *dma_mem; 702 int post_err, err; 703 u16 num_recv; 704 705 while (1) { 706 /* This will get <= num_recv messages and output how many 707 * actually received on num_recv. 708 */ 709 num_recv = 1; 710 err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); 711 if (err || !num_recv) 712 break; 713 714 if (ctlq_msg.data_len) { 715 dma_mem = ctlq_msg.ctx.indirect.payload; 716 } else { 717 dma_mem = NULL; 718 num_recv = 0; 719 } 720 721 if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT) 722 idpf_recv_event_msg(adapter, &ctlq_msg); 723 else 724 err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg); 725 726 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, 727 adapter->hw.arq, 728 &num_recv, &dma_mem); 729 730 /* If post failed clear the only buffer we supplied */ 731 if (post_err) { 732 if (dma_mem) 733 dmam_free_coherent(&adapter->pdev->dev, 734 dma_mem->size, dma_mem->va, 735 dma_mem->pa); 736 break; 737 } 738 739 /* virtchnl trying to shutdown, stop cleaning */ 740 if (err == -ENXIO) 741 break; 742 } 743 744 return err; 745 } 746 747 /** 748 * idpf_wait_for_marker_event - wait for software marker response 749 * @vport: virtual port data structure 750 * 751 * Returns 0 success, negative on failure. 752 **/ 753 static int idpf_wait_for_marker_event(struct idpf_vport *vport) 754 { 755 int event; 756 int i; 757 758 for (i = 0; i < vport->num_txq; i++) 759 idpf_queue_set(SW_MARKER, vport->txqs[i]); 760 761 event = wait_event_timeout(vport->sw_marker_wq, 762 test_and_clear_bit(IDPF_VPORT_SW_MARKER, 763 vport->flags), 764 msecs_to_jiffies(500)); 765 766 for (i = 0; i < vport->num_txq; i++) 767 idpf_queue_clear(POLL_MODE, vport->txqs[i]); 768 769 if (event) 770 return 0; 771 772 dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); 773 774 return -ETIMEDOUT; 775 } 776 777 /** 778 * idpf_send_ver_msg - send virtchnl version message 779 * @adapter: Driver specific private structure 780 * 781 * Send virtchnl version message. Returns 0 on success, negative on failure. 782 */ 783 static int idpf_send_ver_msg(struct idpf_adapter *adapter) 784 { 785 struct idpf_vc_xn_params xn_params = {}; 786 struct virtchnl2_version_info vvi; 787 ssize_t reply_sz; 788 u32 major, minor; 789 int err = 0; 790 791 if (adapter->virt_ver_maj) { 792 vvi.major = cpu_to_le32(adapter->virt_ver_maj); 793 vvi.minor = cpu_to_le32(adapter->virt_ver_min); 794 } else { 795 vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); 796 vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); 797 } 798 799 xn_params.vc_op = VIRTCHNL2_OP_VERSION; 800 xn_params.send_buf.iov_base = &vvi; 801 xn_params.send_buf.iov_len = sizeof(vvi); 802 xn_params.recv_buf = xn_params.send_buf; 803 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 804 805 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 806 if (reply_sz < 0) 807 return reply_sz; 808 if (reply_sz < sizeof(vvi)) 809 return -EIO; 810 811 major = le32_to_cpu(vvi.major); 812 minor = le32_to_cpu(vvi.minor); 813 814 if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { 815 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); 816 return -EINVAL; 817 } 818 819 if (major == IDPF_VIRTCHNL_VERSION_MAJOR && 820 minor > IDPF_VIRTCHNL_VERSION_MINOR) 821 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); 822 823 /* If we have a mismatch, resend version to update receiver on what 824 * version we will use. 825 */ 826 if (!adapter->virt_ver_maj && 827 major != IDPF_VIRTCHNL_VERSION_MAJOR && 828 minor != IDPF_VIRTCHNL_VERSION_MINOR) 829 err = -EAGAIN; 830 831 adapter->virt_ver_maj = major; 832 adapter->virt_ver_min = minor; 833 834 return err; 835 } 836 837 /** 838 * idpf_send_get_caps_msg - Send virtchnl get capabilities message 839 * @adapter: Driver specific private structure 840 * 841 * Send virtchl get capabilities message. Returns 0 on success, negative on 842 * failure. 843 */ 844 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) 845 { 846 struct virtchnl2_get_capabilities caps = {}; 847 struct idpf_vc_xn_params xn_params = {}; 848 ssize_t reply_sz; 849 850 caps.csum_caps = 851 cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | 852 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | 853 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | 854 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | 855 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | 856 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | 857 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | 858 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | 859 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | 860 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | 861 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | 862 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | 863 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | 864 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | 865 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | 866 VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | 867 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | 868 VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | 869 VIRTCHNL2_CAP_RX_CSUM_GENERIC); 870 871 caps.seg_caps = 872 cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | 873 VIRTCHNL2_CAP_SEG_IPV4_UDP | 874 VIRTCHNL2_CAP_SEG_IPV4_SCTP | 875 VIRTCHNL2_CAP_SEG_IPV6_TCP | 876 VIRTCHNL2_CAP_SEG_IPV6_UDP | 877 VIRTCHNL2_CAP_SEG_IPV6_SCTP | 878 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); 879 880 caps.rss_caps = 881 cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | 882 VIRTCHNL2_CAP_RSS_IPV4_UDP | 883 VIRTCHNL2_CAP_RSS_IPV4_SCTP | 884 VIRTCHNL2_CAP_RSS_IPV4_OTHER | 885 VIRTCHNL2_CAP_RSS_IPV6_TCP | 886 VIRTCHNL2_CAP_RSS_IPV6_UDP | 887 VIRTCHNL2_CAP_RSS_IPV6_SCTP | 888 VIRTCHNL2_CAP_RSS_IPV6_OTHER); 889 890 caps.hsplit_caps = 891 cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | 892 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); 893 894 caps.rsc_caps = 895 cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | 896 VIRTCHNL2_CAP_RSC_IPV6_TCP); 897 898 caps.other_caps = 899 cpu_to_le64(VIRTCHNL2_CAP_SRIOV | 900 VIRTCHNL2_CAP_MACFILTER | 901 VIRTCHNL2_CAP_SPLITQ_QSCHED | 902 VIRTCHNL2_CAP_PROMISC | 903 VIRTCHNL2_CAP_LOOPBACK); 904 905 xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; 906 xn_params.send_buf.iov_base = ∩︀ 907 xn_params.send_buf.iov_len = sizeof(caps); 908 xn_params.recv_buf.iov_base = &adapter->caps; 909 xn_params.recv_buf.iov_len = sizeof(adapter->caps); 910 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 911 912 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 913 if (reply_sz < 0) 914 return reply_sz; 915 if (reply_sz < sizeof(adapter->caps)) 916 return -EIO; 917 918 return 0; 919 } 920 921 /** 922 * idpf_vport_alloc_max_qs - Allocate max queues for a vport 923 * @adapter: Driver specific private structure 924 * @max_q: vport max queue structure 925 */ 926 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, 927 struct idpf_vport_max_q *max_q) 928 { 929 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 930 struct virtchnl2_get_capabilities *caps = &adapter->caps; 931 u16 default_vports = idpf_get_default_vports(adapter); 932 int max_rx_q, max_tx_q; 933 934 mutex_lock(&adapter->queue_lock); 935 936 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; 937 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; 938 if (adapter->num_alloc_vports < default_vports) { 939 max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); 940 max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); 941 } else { 942 max_q->max_rxq = IDPF_MIN_Q; 943 max_q->max_txq = IDPF_MIN_Q; 944 } 945 max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; 946 max_q->max_complq = max_q->max_txq; 947 948 if (avail_queues->avail_rxq < max_q->max_rxq || 949 avail_queues->avail_txq < max_q->max_txq || 950 avail_queues->avail_bufq < max_q->max_bufq || 951 avail_queues->avail_complq < max_q->max_complq) { 952 mutex_unlock(&adapter->queue_lock); 953 954 return -EINVAL; 955 } 956 957 avail_queues->avail_rxq -= max_q->max_rxq; 958 avail_queues->avail_txq -= max_q->max_txq; 959 avail_queues->avail_bufq -= max_q->max_bufq; 960 avail_queues->avail_complq -= max_q->max_complq; 961 962 mutex_unlock(&adapter->queue_lock); 963 964 return 0; 965 } 966 967 /** 968 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport 969 * @adapter: Driver specific private structure 970 * @max_q: vport max queue structure 971 */ 972 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, 973 struct idpf_vport_max_q *max_q) 974 { 975 struct idpf_avail_queue_info *avail_queues; 976 977 mutex_lock(&adapter->queue_lock); 978 avail_queues = &adapter->avail_queues; 979 980 avail_queues->avail_rxq += max_q->max_rxq; 981 avail_queues->avail_txq += max_q->max_txq; 982 avail_queues->avail_bufq += max_q->max_bufq; 983 avail_queues->avail_complq += max_q->max_complq; 984 985 mutex_unlock(&adapter->queue_lock); 986 } 987 988 /** 989 * idpf_init_avail_queues - Initialize available queues on the device 990 * @adapter: Driver specific private structure 991 */ 992 static void idpf_init_avail_queues(struct idpf_adapter *adapter) 993 { 994 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 995 struct virtchnl2_get_capabilities *caps = &adapter->caps; 996 997 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); 998 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); 999 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); 1000 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); 1001 } 1002 1003 /** 1004 * idpf_get_reg_intr_vecs - Get vector queue register offset 1005 * @vport: virtual port structure 1006 * @reg_vals: Register offsets to store in 1007 * 1008 * Returns number of registers that got populated 1009 */ 1010 int idpf_get_reg_intr_vecs(struct idpf_vport *vport, 1011 struct idpf_vec_regs *reg_vals) 1012 { 1013 struct virtchnl2_vector_chunks *chunks; 1014 struct idpf_vec_regs reg_val; 1015 u16 num_vchunks, num_vec; 1016 int num_regs = 0, i, j; 1017 1018 chunks = &vport->adapter->req_vec_chunks->vchunks; 1019 num_vchunks = le16_to_cpu(chunks->num_vchunks); 1020 1021 for (j = 0; j < num_vchunks; j++) { 1022 struct virtchnl2_vector_chunk *chunk; 1023 u32 dynctl_reg_spacing; 1024 u32 itrn_reg_spacing; 1025 1026 chunk = &chunks->vchunks[j]; 1027 num_vec = le16_to_cpu(chunk->num_vectors); 1028 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); 1029 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); 1030 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); 1031 1032 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); 1033 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); 1034 1035 for (i = 0; i < num_vec; i++) { 1036 reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; 1037 reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; 1038 reg_vals[num_regs].itrn_index_spacing = 1039 reg_val.itrn_index_spacing; 1040 1041 reg_val.dyn_ctl_reg += dynctl_reg_spacing; 1042 reg_val.itrn_reg += itrn_reg_spacing; 1043 num_regs++; 1044 } 1045 } 1046 1047 return num_regs; 1048 } 1049 1050 /** 1051 * idpf_vport_get_q_reg - Get the queue registers for the vport 1052 * @reg_vals: register values needing to be set 1053 * @num_regs: amount we expect to fill 1054 * @q_type: queue model 1055 * @chunks: queue regs received over mailbox 1056 * 1057 * This function parses the queue register offsets from the queue register 1058 * chunk information, with a specific queue type and stores it into the array 1059 * passed as an argument. It returns the actual number of queue registers that 1060 * are filled. 1061 */ 1062 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, 1063 struct virtchnl2_queue_reg_chunks *chunks) 1064 { 1065 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 1066 int reg_filled = 0, i; 1067 u32 reg_val; 1068 1069 while (num_chunks--) { 1070 struct virtchnl2_queue_reg_chunk *chunk; 1071 u16 num_q; 1072 1073 chunk = &chunks->chunks[num_chunks]; 1074 if (le32_to_cpu(chunk->type) != q_type) 1075 continue; 1076 1077 num_q = le32_to_cpu(chunk->num_queues); 1078 reg_val = le64_to_cpu(chunk->qtail_reg_start); 1079 for (i = 0; i < num_q && reg_filled < num_regs ; i++) { 1080 reg_vals[reg_filled++] = reg_val; 1081 reg_val += le32_to_cpu(chunk->qtail_reg_spacing); 1082 } 1083 } 1084 1085 return reg_filled; 1086 } 1087 1088 /** 1089 * __idpf_queue_reg_init - initialize queue registers 1090 * @vport: virtual port structure 1091 * @reg_vals: registers we are initializing 1092 * @num_regs: how many registers there are in total 1093 * @q_type: queue model 1094 * 1095 * Return number of queues that are initialized 1096 */ 1097 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, 1098 int num_regs, u32 q_type) 1099 { 1100 struct idpf_adapter *adapter = vport->adapter; 1101 int i, j, k = 0; 1102 1103 switch (q_type) { 1104 case VIRTCHNL2_QUEUE_TYPE_TX: 1105 for (i = 0; i < vport->num_txq_grp; i++) { 1106 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1107 1108 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) 1109 tx_qgrp->txqs[j]->tail = 1110 idpf_get_reg_addr(adapter, reg_vals[k]); 1111 } 1112 break; 1113 case VIRTCHNL2_QUEUE_TYPE_RX: 1114 for (i = 0; i < vport->num_rxq_grp; i++) { 1115 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1116 u16 num_rxq = rx_qgrp->singleq.num_rxq; 1117 1118 for (j = 0; j < num_rxq && k < num_regs; j++, k++) { 1119 struct idpf_rx_queue *q; 1120 1121 q = rx_qgrp->singleq.rxqs[j]; 1122 q->tail = idpf_get_reg_addr(adapter, 1123 reg_vals[k]); 1124 } 1125 } 1126 break; 1127 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 1128 for (i = 0; i < vport->num_rxq_grp; i++) { 1129 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1130 u8 num_bufqs = vport->num_bufqs_per_qgrp; 1131 1132 for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { 1133 struct idpf_buf_queue *q; 1134 1135 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1136 q->tail = idpf_get_reg_addr(adapter, 1137 reg_vals[k]); 1138 } 1139 } 1140 break; 1141 default: 1142 break; 1143 } 1144 1145 return k; 1146 } 1147 1148 /** 1149 * idpf_queue_reg_init - initialize queue registers 1150 * @vport: virtual port structure 1151 * 1152 * Return 0 on success, negative on failure 1153 */ 1154 int idpf_queue_reg_init(struct idpf_vport *vport) 1155 { 1156 struct virtchnl2_create_vport *vport_params; 1157 struct virtchnl2_queue_reg_chunks *chunks; 1158 struct idpf_vport_config *vport_config; 1159 u16 vport_idx = vport->idx; 1160 int num_regs, ret = 0; 1161 u32 *reg_vals; 1162 1163 /* We may never deal with more than 256 same type of queues */ 1164 reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); 1165 if (!reg_vals) 1166 return -ENOMEM; 1167 1168 vport_config = vport->adapter->vport_config[vport_idx]; 1169 if (vport_config->req_qs_chunks) { 1170 struct virtchnl2_add_queues *vc_aq = 1171 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 1172 chunks = &vc_aq->chunks; 1173 } else { 1174 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 1175 chunks = &vport_params->chunks; 1176 } 1177 1178 /* Initialize Tx queue tail register address */ 1179 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1180 VIRTCHNL2_QUEUE_TYPE_TX, 1181 chunks); 1182 if (num_regs < vport->num_txq) { 1183 ret = -EINVAL; 1184 goto free_reg_vals; 1185 } 1186 1187 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1188 VIRTCHNL2_QUEUE_TYPE_TX); 1189 if (num_regs < vport->num_txq) { 1190 ret = -EINVAL; 1191 goto free_reg_vals; 1192 } 1193 1194 /* Initialize Rx/buffer queue tail register address based on Rx queue 1195 * model 1196 */ 1197 if (idpf_is_queue_model_split(vport->rxq_model)) { 1198 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1199 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, 1200 chunks); 1201 if (num_regs < vport->num_bufq) { 1202 ret = -EINVAL; 1203 goto free_reg_vals; 1204 } 1205 1206 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1207 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1208 if (num_regs < vport->num_bufq) { 1209 ret = -EINVAL; 1210 goto free_reg_vals; 1211 } 1212 } else { 1213 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1214 VIRTCHNL2_QUEUE_TYPE_RX, 1215 chunks); 1216 if (num_regs < vport->num_rxq) { 1217 ret = -EINVAL; 1218 goto free_reg_vals; 1219 } 1220 1221 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1222 VIRTCHNL2_QUEUE_TYPE_RX); 1223 if (num_regs < vport->num_rxq) { 1224 ret = -EINVAL; 1225 goto free_reg_vals; 1226 } 1227 } 1228 1229 free_reg_vals: 1230 kfree(reg_vals); 1231 1232 return ret; 1233 } 1234 1235 /** 1236 * idpf_send_create_vport_msg - Send virtchnl create vport message 1237 * @adapter: Driver specific private structure 1238 * @max_q: vport max queue info 1239 * 1240 * send virtchnl creae vport message 1241 * 1242 * Returns 0 on success, negative on failure 1243 */ 1244 int idpf_send_create_vport_msg(struct idpf_adapter *adapter, 1245 struct idpf_vport_max_q *max_q) 1246 { 1247 struct virtchnl2_create_vport *vport_msg; 1248 struct idpf_vc_xn_params xn_params = {}; 1249 u16 idx = adapter->next_vport; 1250 int err, buf_size; 1251 ssize_t reply_sz; 1252 1253 buf_size = sizeof(struct virtchnl2_create_vport); 1254 if (!adapter->vport_params_reqd[idx]) { 1255 adapter->vport_params_reqd[idx] = kzalloc(buf_size, 1256 GFP_KERNEL); 1257 if (!adapter->vport_params_reqd[idx]) 1258 return -ENOMEM; 1259 } 1260 1261 vport_msg = adapter->vport_params_reqd[idx]; 1262 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); 1263 vport_msg->vport_index = cpu_to_le16(idx); 1264 1265 if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1266 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1267 else 1268 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1269 1270 if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1271 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1272 else 1273 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1274 1275 err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q); 1276 if (err) { 1277 dev_err(&adapter->pdev->dev, "Enough queues are not available"); 1278 1279 return err; 1280 } 1281 1282 if (!adapter->vport_params_recvd[idx]) { 1283 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, 1284 GFP_KERNEL); 1285 if (!adapter->vport_params_recvd[idx]) { 1286 err = -ENOMEM; 1287 goto free_vport_params; 1288 } 1289 } 1290 1291 xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT; 1292 xn_params.send_buf.iov_base = vport_msg; 1293 xn_params.send_buf.iov_len = buf_size; 1294 xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; 1295 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 1296 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1297 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1298 if (reply_sz < 0) { 1299 err = reply_sz; 1300 goto free_vport_params; 1301 } 1302 1303 return 0; 1304 1305 free_vport_params: 1306 kfree(adapter->vport_params_recvd[idx]); 1307 adapter->vport_params_recvd[idx] = NULL; 1308 kfree(adapter->vport_params_reqd[idx]); 1309 adapter->vport_params_reqd[idx] = NULL; 1310 1311 return err; 1312 } 1313 1314 /** 1315 * idpf_check_supported_desc_ids - Verify we have required descriptor support 1316 * @vport: virtual port structure 1317 * 1318 * Return 0 on success, error on failure 1319 */ 1320 int idpf_check_supported_desc_ids(struct idpf_vport *vport) 1321 { 1322 struct idpf_adapter *adapter = vport->adapter; 1323 struct virtchnl2_create_vport *vport_msg; 1324 u64 rx_desc_ids, tx_desc_ids; 1325 1326 vport_msg = adapter->vport_params_recvd[vport->idx]; 1327 1328 if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) && 1329 (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || 1330 vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { 1331 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); 1332 return -EOPNOTSUPP; 1333 } 1334 1335 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); 1336 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); 1337 1338 if (idpf_is_queue_model_split(vport->rxq_model)) { 1339 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { 1340 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); 1341 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1342 } 1343 } else { 1344 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) 1345 vport->base_rxd = true; 1346 } 1347 1348 if (!idpf_is_queue_model_split(vport->txq_model)) 1349 return 0; 1350 1351 if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { 1352 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); 1353 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); 1354 } 1355 1356 return 0; 1357 } 1358 1359 /** 1360 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message 1361 * @vport: virtual port data structure 1362 * 1363 * Send virtchnl destroy vport message. Returns 0 on success, negative on 1364 * failure. 1365 */ 1366 int idpf_send_destroy_vport_msg(struct idpf_vport *vport) 1367 { 1368 struct idpf_vc_xn_params xn_params = {}; 1369 struct virtchnl2_vport v_id; 1370 ssize_t reply_sz; 1371 1372 v_id.vport_id = cpu_to_le32(vport->vport_id); 1373 1374 xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT; 1375 xn_params.send_buf.iov_base = &v_id; 1376 xn_params.send_buf.iov_len = sizeof(v_id); 1377 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1378 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1379 1380 return reply_sz < 0 ? reply_sz : 0; 1381 } 1382 1383 /** 1384 * idpf_send_enable_vport_msg - Send virtchnl enable vport message 1385 * @vport: virtual port data structure 1386 * 1387 * Send enable vport virtchnl message. Returns 0 on success, negative on 1388 * failure. 1389 */ 1390 int idpf_send_enable_vport_msg(struct idpf_vport *vport) 1391 { 1392 struct idpf_vc_xn_params xn_params = {}; 1393 struct virtchnl2_vport v_id; 1394 ssize_t reply_sz; 1395 1396 v_id.vport_id = cpu_to_le32(vport->vport_id); 1397 1398 xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT; 1399 xn_params.send_buf.iov_base = &v_id; 1400 xn_params.send_buf.iov_len = sizeof(v_id); 1401 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1402 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1403 1404 return reply_sz < 0 ? reply_sz : 0; 1405 } 1406 1407 /** 1408 * idpf_send_disable_vport_msg - Send virtchnl disable vport message 1409 * @vport: virtual port data structure 1410 * 1411 * Send disable vport virtchnl message. Returns 0 on success, negative on 1412 * failure. 1413 */ 1414 int idpf_send_disable_vport_msg(struct idpf_vport *vport) 1415 { 1416 struct idpf_vc_xn_params xn_params = {}; 1417 struct virtchnl2_vport v_id; 1418 ssize_t reply_sz; 1419 1420 v_id.vport_id = cpu_to_le32(vport->vport_id); 1421 1422 xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT; 1423 xn_params.send_buf.iov_base = &v_id; 1424 xn_params.send_buf.iov_len = sizeof(v_id); 1425 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1426 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1427 1428 return reply_sz < 0 ? reply_sz : 0; 1429 } 1430 1431 /** 1432 * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message 1433 * @vport: virtual port data structure 1434 * 1435 * Send config tx queues virtchnl message. Returns 0 on success, negative on 1436 * failure. 1437 */ 1438 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) 1439 { 1440 struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL; 1441 struct virtchnl2_txq_info *qi __free(kfree) = NULL; 1442 struct idpf_vc_xn_params xn_params = {}; 1443 u32 config_sz, chunk_sz, buf_sz; 1444 int totqs, num_msgs, num_chunks; 1445 ssize_t reply_sz; 1446 int i, k = 0; 1447 1448 totqs = vport->num_txq + vport->num_complq; 1449 qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); 1450 if (!qi) 1451 return -ENOMEM; 1452 1453 /* Populate the queue info buffer with all queue context info */ 1454 for (i = 0; i < vport->num_txq_grp; i++) { 1455 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1456 int j, sched_mode; 1457 1458 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1459 qi[k].queue_id = 1460 cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1461 qi[k].model = 1462 cpu_to_le16(vport->txq_model); 1463 qi[k].type = 1464 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1465 qi[k].ring_len = 1466 cpu_to_le16(tx_qgrp->txqs[j]->desc_count); 1467 qi[k].dma_ring_addr = 1468 cpu_to_le64(tx_qgrp->txqs[j]->dma); 1469 if (idpf_is_queue_model_split(vport->txq_model)) { 1470 struct idpf_tx_queue *q = tx_qgrp->txqs[j]; 1471 1472 qi[k].tx_compl_queue_id = 1473 cpu_to_le16(tx_qgrp->complq->q_id); 1474 qi[k].relative_queue_id = cpu_to_le16(j); 1475 1476 if (idpf_queue_has(FLOW_SCH_EN, q)) 1477 qi[k].sched_mode = 1478 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); 1479 else 1480 qi[k].sched_mode = 1481 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1482 } else { 1483 qi[k].sched_mode = 1484 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1485 } 1486 } 1487 1488 if (!idpf_is_queue_model_split(vport->txq_model)) 1489 continue; 1490 1491 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); 1492 qi[k].model = cpu_to_le16(vport->txq_model); 1493 qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); 1494 qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); 1495 qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); 1496 1497 if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) 1498 sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1499 else 1500 sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1501 qi[k].sched_mode = cpu_to_le16(sched_mode); 1502 1503 k++; 1504 } 1505 1506 /* Make sure accounting agrees */ 1507 if (k != totqs) 1508 return -EINVAL; 1509 1510 /* Chunk up the queue contexts into multiple messages to avoid 1511 * sending a control queue message buffer that is too large 1512 */ 1513 config_sz = sizeof(struct virtchnl2_config_tx_queues); 1514 chunk_sz = sizeof(struct virtchnl2_txq_info); 1515 1516 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1517 totqs); 1518 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 1519 1520 buf_sz = struct_size(ctq, qinfo, num_chunks); 1521 ctq = kzalloc(buf_sz, GFP_KERNEL); 1522 if (!ctq) 1523 return -ENOMEM; 1524 1525 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES; 1526 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1527 1528 for (i = 0, k = 0; i < num_msgs; i++) { 1529 memset(ctq, 0, buf_sz); 1530 ctq->vport_id = cpu_to_le32(vport->vport_id); 1531 ctq->num_qinfo = cpu_to_le16(num_chunks); 1532 memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); 1533 1534 xn_params.send_buf.iov_base = ctq; 1535 xn_params.send_buf.iov_len = buf_sz; 1536 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1537 if (reply_sz < 0) 1538 return reply_sz; 1539 1540 k += num_chunks; 1541 totqs -= num_chunks; 1542 num_chunks = min(num_chunks, totqs); 1543 /* Recalculate buffer size */ 1544 buf_sz = struct_size(ctq, qinfo, num_chunks); 1545 } 1546 1547 return 0; 1548 } 1549 1550 /** 1551 * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message 1552 * @vport: virtual port data structure 1553 * 1554 * Send config rx queues virtchnl message. Returns 0 on success, negative on 1555 * failure. 1556 */ 1557 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) 1558 { 1559 struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL; 1560 struct virtchnl2_rxq_info *qi __free(kfree) = NULL; 1561 struct idpf_vc_xn_params xn_params = {}; 1562 u32 config_sz, chunk_sz, buf_sz; 1563 int totqs, num_msgs, num_chunks; 1564 ssize_t reply_sz; 1565 int i, k = 0; 1566 1567 totqs = vport->num_rxq + vport->num_bufq; 1568 qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); 1569 if (!qi) 1570 return -ENOMEM; 1571 1572 /* Populate the queue info buffer with all queue context info */ 1573 for (i = 0; i < vport->num_rxq_grp; i++) { 1574 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1575 u16 num_rxq; 1576 int j; 1577 1578 if (!idpf_is_queue_model_split(vport->rxq_model)) 1579 goto setup_rxqs; 1580 1581 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { 1582 struct idpf_buf_queue *bufq = 1583 &rx_qgrp->splitq.bufq_sets[j].bufq; 1584 1585 qi[k].queue_id = cpu_to_le32(bufq->q_id); 1586 qi[k].model = cpu_to_le16(vport->rxq_model); 1587 qi[k].type = 1588 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1589 qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1590 qi[k].ring_len = cpu_to_le16(bufq->desc_count); 1591 qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); 1592 qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); 1593 qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE; 1594 qi[k].rx_buffer_low_watermark = 1595 cpu_to_le16(bufq->rx_buffer_low_watermark); 1596 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1597 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1598 } 1599 1600 setup_rxqs: 1601 if (idpf_is_queue_model_split(vport->rxq_model)) 1602 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1603 else 1604 num_rxq = rx_qgrp->singleq.num_rxq; 1605 1606 for (j = 0; j < num_rxq; j++, k++) { 1607 const struct idpf_bufq_set *sets; 1608 struct idpf_rx_queue *rxq; 1609 1610 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1611 rxq = rx_qgrp->singleq.rxqs[j]; 1612 goto common_qi_fields; 1613 } 1614 1615 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1616 sets = rxq->bufq_sets; 1617 1618 /* In splitq mode, RXQ buffer size should be 1619 * set to that of the first buffer queue 1620 * associated with this RXQ. 1621 */ 1622 rxq->rx_buf_size = sets[0].bufq.rx_buf_size; 1623 1624 qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); 1625 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { 1626 qi[k].bufq2_ena = IDPF_BUFQ2_ENA; 1627 qi[k].rx_bufq2_id = 1628 cpu_to_le16(sets[1].bufq.q_id); 1629 } 1630 qi[k].rx_buffer_low_watermark = 1631 cpu_to_le16(rxq->rx_buffer_low_watermark); 1632 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1633 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1634 1635 rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; 1636 1637 if (idpf_queue_has(HSPLIT_EN, rxq)) { 1638 qi[k].qflags |= 1639 cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); 1640 qi[k].hdr_buffer_size = 1641 cpu_to_le16(rxq->rx_hbuf_size); 1642 } 1643 1644 common_qi_fields: 1645 qi[k].queue_id = cpu_to_le32(rxq->q_id); 1646 qi[k].model = cpu_to_le16(vport->rxq_model); 1647 qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1648 qi[k].ring_len = cpu_to_le16(rxq->desc_count); 1649 qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); 1650 qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); 1651 qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); 1652 qi[k].qflags |= 1653 cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); 1654 qi[k].desc_ids = cpu_to_le64(rxq->rxdids); 1655 } 1656 } 1657 1658 /* Make sure accounting agrees */ 1659 if (k != totqs) 1660 return -EINVAL; 1661 1662 /* Chunk up the queue contexts into multiple messages to avoid 1663 * sending a control queue message buffer that is too large 1664 */ 1665 config_sz = sizeof(struct virtchnl2_config_rx_queues); 1666 chunk_sz = sizeof(struct virtchnl2_rxq_info); 1667 1668 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1669 totqs); 1670 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 1671 1672 buf_sz = struct_size(crq, qinfo, num_chunks); 1673 crq = kzalloc(buf_sz, GFP_KERNEL); 1674 if (!crq) 1675 return -ENOMEM; 1676 1677 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES; 1678 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1679 1680 for (i = 0, k = 0; i < num_msgs; i++) { 1681 memset(crq, 0, buf_sz); 1682 crq->vport_id = cpu_to_le32(vport->vport_id); 1683 crq->num_qinfo = cpu_to_le16(num_chunks); 1684 memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); 1685 1686 xn_params.send_buf.iov_base = crq; 1687 xn_params.send_buf.iov_len = buf_sz; 1688 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1689 if (reply_sz < 0) 1690 return reply_sz; 1691 1692 k += num_chunks; 1693 totqs -= num_chunks; 1694 num_chunks = min(num_chunks, totqs); 1695 /* Recalculate buffer size */ 1696 buf_sz = struct_size(crq, qinfo, num_chunks); 1697 } 1698 1699 return 0; 1700 } 1701 1702 /** 1703 * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable 1704 * queues message 1705 * @vport: virtual port data structure 1706 * @ena: if true enable, false disable 1707 * 1708 * Send enable or disable queues virtchnl message. Returns 0 on success, 1709 * negative on failure. 1710 */ 1711 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) 1712 { 1713 struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; 1714 struct virtchnl2_queue_chunk *qc __free(kfree) = NULL; 1715 u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; 1716 struct idpf_vc_xn_params xn_params = {}; 1717 struct virtchnl2_queue_chunks *qcs; 1718 u32 config_sz, chunk_sz, buf_sz; 1719 ssize_t reply_sz; 1720 int i, j, k = 0; 1721 1722 num_txq = vport->num_txq + vport->num_complq; 1723 num_rxq = vport->num_rxq + vport->num_bufq; 1724 num_q = num_txq + num_rxq; 1725 buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q; 1726 qc = kzalloc(buf_sz, GFP_KERNEL); 1727 if (!qc) 1728 return -ENOMEM; 1729 1730 for (i = 0; i < vport->num_txq_grp; i++) { 1731 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1732 1733 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1734 qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1735 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1736 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1737 } 1738 } 1739 if (vport->num_txq != k) 1740 return -EINVAL; 1741 1742 if (!idpf_is_queue_model_split(vport->txq_model)) 1743 goto setup_rx; 1744 1745 for (i = 0; i < vport->num_txq_grp; i++, k++) { 1746 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1747 1748 qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); 1749 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); 1750 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1751 } 1752 if (vport->num_complq != (k - vport->num_txq)) 1753 return -EINVAL; 1754 1755 setup_rx: 1756 for (i = 0; i < vport->num_rxq_grp; i++) { 1757 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1758 1759 if (idpf_is_queue_model_split(vport->rxq_model)) 1760 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1761 else 1762 num_rxq = rx_qgrp->singleq.num_rxq; 1763 1764 for (j = 0; j < num_rxq; j++, k++) { 1765 if (idpf_is_queue_model_split(vport->rxq_model)) { 1766 qc[k].start_queue_id = 1767 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); 1768 qc[k].type = 1769 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1770 } else { 1771 qc[k].start_queue_id = 1772 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); 1773 qc[k].type = 1774 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1775 } 1776 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1777 } 1778 } 1779 if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) 1780 return -EINVAL; 1781 1782 if (!idpf_is_queue_model_split(vport->rxq_model)) 1783 goto send_msg; 1784 1785 for (i = 0; i < vport->num_rxq_grp; i++) { 1786 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1787 1788 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { 1789 const struct idpf_buf_queue *q; 1790 1791 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1792 qc[k].type = 1793 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1794 qc[k].start_queue_id = cpu_to_le32(q->q_id); 1795 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1796 } 1797 } 1798 if (vport->num_bufq != k - (vport->num_txq + 1799 vport->num_complq + 1800 vport->num_rxq)) 1801 return -EINVAL; 1802 1803 send_msg: 1804 /* Chunk up the queue info into multiple messages */ 1805 config_sz = sizeof(struct virtchnl2_del_ena_dis_queues); 1806 chunk_sz = sizeof(struct virtchnl2_queue_chunk); 1807 1808 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1809 num_q); 1810 num_msgs = DIV_ROUND_UP(num_q, num_chunks); 1811 1812 buf_sz = struct_size(eq, chunks.chunks, num_chunks); 1813 eq = kzalloc(buf_sz, GFP_KERNEL); 1814 if (!eq) 1815 return -ENOMEM; 1816 1817 if (ena) { 1818 xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES; 1819 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1820 } else { 1821 xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES; 1822 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1823 } 1824 1825 for (i = 0, k = 0; i < num_msgs; i++) { 1826 memset(eq, 0, buf_sz); 1827 eq->vport_id = cpu_to_le32(vport->vport_id); 1828 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 1829 qcs = &eq->chunks; 1830 memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); 1831 1832 xn_params.send_buf.iov_base = eq; 1833 xn_params.send_buf.iov_len = buf_sz; 1834 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1835 if (reply_sz < 0) 1836 return reply_sz; 1837 1838 k += num_chunks; 1839 num_q -= num_chunks; 1840 num_chunks = min(num_chunks, num_q); 1841 /* Recalculate buffer size */ 1842 buf_sz = struct_size(eq, chunks.chunks, num_chunks); 1843 } 1844 1845 return 0; 1846 } 1847 1848 /** 1849 * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue 1850 * vector message 1851 * @vport: virtual port data structure 1852 * @map: true for map and false for unmap 1853 * 1854 * Send map or unmap queue vector virtchnl message. Returns 0 on success, 1855 * negative on failure. 1856 */ 1857 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) 1858 { 1859 struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL; 1860 struct virtchnl2_queue_vector *vqv __free(kfree) = NULL; 1861 struct idpf_vc_xn_params xn_params = {}; 1862 u32 config_sz, chunk_sz, buf_sz; 1863 u32 num_msgs, num_chunks, num_q; 1864 ssize_t reply_sz; 1865 int i, j, k = 0; 1866 1867 num_q = vport->num_txq + vport->num_rxq; 1868 1869 buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q; 1870 vqv = kzalloc(buf_sz, GFP_KERNEL); 1871 if (!vqv) 1872 return -ENOMEM; 1873 1874 for (i = 0; i < vport->num_txq_grp; i++) { 1875 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1876 1877 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1878 vqv[k].queue_type = 1879 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1880 vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1881 1882 if (idpf_is_queue_model_split(vport->txq_model)) { 1883 vqv[k].vector_id = 1884 cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); 1885 vqv[k].itr_idx = 1886 cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); 1887 } else { 1888 vqv[k].vector_id = 1889 cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); 1890 vqv[k].itr_idx = 1891 cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); 1892 } 1893 } 1894 } 1895 1896 if (vport->num_txq != k) 1897 return -EINVAL; 1898 1899 for (i = 0; i < vport->num_rxq_grp; i++) { 1900 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1901 u16 num_rxq; 1902 1903 if (idpf_is_queue_model_split(vport->rxq_model)) 1904 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1905 else 1906 num_rxq = rx_qgrp->singleq.num_rxq; 1907 1908 for (j = 0; j < num_rxq; j++, k++) { 1909 struct idpf_rx_queue *rxq; 1910 1911 if (idpf_is_queue_model_split(vport->rxq_model)) 1912 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1913 else 1914 rxq = rx_qgrp->singleq.rxqs[j]; 1915 1916 vqv[k].queue_type = 1917 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1918 vqv[k].queue_id = cpu_to_le32(rxq->q_id); 1919 vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); 1920 vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); 1921 } 1922 } 1923 1924 if (idpf_is_queue_model_split(vport->txq_model)) { 1925 if (vport->num_rxq != k - vport->num_complq) 1926 return -EINVAL; 1927 } else { 1928 if (vport->num_rxq != k - vport->num_txq) 1929 return -EINVAL; 1930 } 1931 1932 /* Chunk up the vector info into multiple messages */ 1933 config_sz = sizeof(struct virtchnl2_queue_vector_maps); 1934 chunk_sz = sizeof(struct virtchnl2_queue_vector); 1935 1936 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1937 num_q); 1938 num_msgs = DIV_ROUND_UP(num_q, num_chunks); 1939 1940 buf_sz = struct_size(vqvm, qv_maps, num_chunks); 1941 vqvm = kzalloc(buf_sz, GFP_KERNEL); 1942 if (!vqvm) 1943 return -ENOMEM; 1944 1945 if (map) { 1946 xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR; 1947 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1948 } else { 1949 xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR; 1950 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1951 } 1952 1953 for (i = 0, k = 0; i < num_msgs; i++) { 1954 memset(vqvm, 0, buf_sz); 1955 xn_params.send_buf.iov_base = vqvm; 1956 xn_params.send_buf.iov_len = buf_sz; 1957 vqvm->vport_id = cpu_to_le32(vport->vport_id); 1958 vqvm->num_qv_maps = cpu_to_le16(num_chunks); 1959 memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); 1960 1961 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1962 if (reply_sz < 0) 1963 return reply_sz; 1964 1965 k += num_chunks; 1966 num_q -= num_chunks; 1967 num_chunks = min(num_chunks, num_q); 1968 /* Recalculate buffer size */ 1969 buf_sz = struct_size(vqvm, qv_maps, num_chunks); 1970 } 1971 1972 return 0; 1973 } 1974 1975 /** 1976 * idpf_send_enable_queues_msg - send enable queues virtchnl message 1977 * @vport: Virtual port private data structure 1978 * 1979 * Will send enable queues virtchnl message. Returns 0 on success, negative on 1980 * failure. 1981 */ 1982 int idpf_send_enable_queues_msg(struct idpf_vport *vport) 1983 { 1984 return idpf_send_ena_dis_queues_msg(vport, true); 1985 } 1986 1987 /** 1988 * idpf_send_disable_queues_msg - send disable queues virtchnl message 1989 * @vport: Virtual port private data structure 1990 * 1991 * Will send disable queues virtchnl message. Returns 0 on success, negative 1992 * on failure. 1993 */ 1994 int idpf_send_disable_queues_msg(struct idpf_vport *vport) 1995 { 1996 int err, i; 1997 1998 err = idpf_send_ena_dis_queues_msg(vport, false); 1999 if (err) 2000 return err; 2001 2002 /* switch to poll mode as interrupts will be disabled after disable 2003 * queues virtchnl message is sent 2004 */ 2005 for (i = 0; i < vport->num_txq; i++) 2006 idpf_queue_set(POLL_MODE, vport->txqs[i]); 2007 2008 /* schedule the napi to receive all the marker packets */ 2009 local_bh_disable(); 2010 for (i = 0; i < vport->num_q_vectors; i++) 2011 napi_schedule(&vport->q_vectors[i].napi); 2012 local_bh_enable(); 2013 2014 return idpf_wait_for_marker_event(vport); 2015 } 2016 2017 /** 2018 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right 2019 * structure 2020 * @dchunks: Destination chunks to store data to 2021 * @schunks: Source chunks to copy data from 2022 * @num_chunks: number of chunks to copy 2023 */ 2024 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, 2025 struct virtchnl2_queue_reg_chunk *schunks, 2026 u16 num_chunks) 2027 { 2028 u16 i; 2029 2030 for (i = 0; i < num_chunks; i++) { 2031 dchunks[i].type = schunks[i].type; 2032 dchunks[i].start_queue_id = schunks[i].start_queue_id; 2033 dchunks[i].num_queues = schunks[i].num_queues; 2034 } 2035 } 2036 2037 /** 2038 * idpf_send_delete_queues_msg - send delete queues virtchnl message 2039 * @vport: Virtual port private data structure 2040 * 2041 * Will send delete queues virtchnl message. Return 0 on success, negative on 2042 * failure. 2043 */ 2044 int idpf_send_delete_queues_msg(struct idpf_vport *vport) 2045 { 2046 struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; 2047 struct virtchnl2_create_vport *vport_params; 2048 struct virtchnl2_queue_reg_chunks *chunks; 2049 struct idpf_vc_xn_params xn_params = {}; 2050 struct idpf_vport_config *vport_config; 2051 u16 vport_idx = vport->idx; 2052 ssize_t reply_sz; 2053 u16 num_chunks; 2054 int buf_size; 2055 2056 vport_config = vport->adapter->vport_config[vport_idx]; 2057 if (vport_config->req_qs_chunks) { 2058 chunks = &vport_config->req_qs_chunks->chunks; 2059 } else { 2060 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 2061 chunks = &vport_params->chunks; 2062 } 2063 2064 num_chunks = le16_to_cpu(chunks->num_chunks); 2065 buf_size = struct_size(eq, chunks.chunks, num_chunks); 2066 2067 eq = kzalloc(buf_size, GFP_KERNEL); 2068 if (!eq) 2069 return -ENOMEM; 2070 2071 eq->vport_id = cpu_to_le32(vport->vport_id); 2072 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 2073 2074 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, 2075 num_chunks); 2076 2077 xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES; 2078 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 2079 xn_params.send_buf.iov_base = eq; 2080 xn_params.send_buf.iov_len = buf_size; 2081 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2082 2083 return reply_sz < 0 ? reply_sz : 0; 2084 } 2085 2086 /** 2087 * idpf_send_config_queues_msg - Send config queues virtchnl message 2088 * @vport: Virtual port private data structure 2089 * 2090 * Will send config queues virtchnl message. Returns 0 on success, negative on 2091 * failure. 2092 */ 2093 int idpf_send_config_queues_msg(struct idpf_vport *vport) 2094 { 2095 int err; 2096 2097 err = idpf_send_config_tx_queues_msg(vport); 2098 if (err) 2099 return err; 2100 2101 return idpf_send_config_rx_queues_msg(vport); 2102 } 2103 2104 /** 2105 * idpf_send_add_queues_msg - Send virtchnl add queues message 2106 * @vport: Virtual port private data structure 2107 * @num_tx_q: number of transmit queues 2108 * @num_complq: number of transmit completion queues 2109 * @num_rx_q: number of receive queues 2110 * @num_rx_bufq: number of receive buffer queues 2111 * 2112 * Returns 0 on success, negative on failure. vport _MUST_ be const here as 2113 * we should not change any fields within vport itself in this function. 2114 */ 2115 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, 2116 u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) 2117 { 2118 struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL; 2119 struct idpf_vc_xn_params xn_params = {}; 2120 struct idpf_vport_config *vport_config; 2121 struct virtchnl2_add_queues aq = {}; 2122 u16 vport_idx = vport->idx; 2123 ssize_t reply_sz; 2124 int size; 2125 2126 vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2127 if (!vc_msg) 2128 return -ENOMEM; 2129 2130 vport_config = vport->adapter->vport_config[vport_idx]; 2131 kfree(vport_config->req_qs_chunks); 2132 vport_config->req_qs_chunks = NULL; 2133 2134 aq.vport_id = cpu_to_le32(vport->vport_id); 2135 aq.num_tx_q = cpu_to_le16(num_tx_q); 2136 aq.num_tx_complq = cpu_to_le16(num_complq); 2137 aq.num_rx_q = cpu_to_le16(num_rx_q); 2138 aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); 2139 2140 xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES; 2141 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2142 xn_params.send_buf.iov_base = &aq; 2143 xn_params.send_buf.iov_len = sizeof(aq); 2144 xn_params.recv_buf.iov_base = vc_msg; 2145 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2146 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2147 if (reply_sz < 0) 2148 return reply_sz; 2149 2150 /* compare vc_msg num queues with vport num queues */ 2151 if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || 2152 le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || 2153 le16_to_cpu(vc_msg->num_tx_complq) != num_complq || 2154 le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) 2155 return -EINVAL; 2156 2157 size = struct_size(vc_msg, chunks.chunks, 2158 le16_to_cpu(vc_msg->chunks.num_chunks)); 2159 if (reply_sz < size) 2160 return -EIO; 2161 2162 vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); 2163 if (!vport_config->req_qs_chunks) 2164 return -ENOMEM; 2165 2166 return 0; 2167 } 2168 2169 /** 2170 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message 2171 * @adapter: Driver specific private structure 2172 * @num_vectors: number of vectors to be allocated 2173 * 2174 * Returns 0 on success, negative on failure. 2175 */ 2176 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) 2177 { 2178 struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL; 2179 struct idpf_vc_xn_params xn_params = {}; 2180 struct virtchnl2_alloc_vectors ac = {}; 2181 ssize_t reply_sz; 2182 u16 num_vchunks; 2183 int size; 2184 2185 ac.num_vectors = cpu_to_le16(num_vectors); 2186 2187 rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2188 if (!rcvd_vec) 2189 return -ENOMEM; 2190 2191 xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS; 2192 xn_params.send_buf.iov_base = ∾ 2193 xn_params.send_buf.iov_len = sizeof(ac); 2194 xn_params.recv_buf.iov_base = rcvd_vec; 2195 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2196 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2197 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2198 if (reply_sz < 0) 2199 return reply_sz; 2200 2201 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); 2202 size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); 2203 if (reply_sz < size) 2204 return -EIO; 2205 2206 if (size > IDPF_CTLQ_MAX_BUF_LEN) 2207 return -EINVAL; 2208 2209 kfree(adapter->req_vec_chunks); 2210 adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); 2211 if (!adapter->req_vec_chunks) 2212 return -ENOMEM; 2213 2214 if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { 2215 kfree(adapter->req_vec_chunks); 2216 adapter->req_vec_chunks = NULL; 2217 return -EINVAL; 2218 } 2219 2220 return 0; 2221 } 2222 2223 /** 2224 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message 2225 * @adapter: Driver specific private structure 2226 * 2227 * Returns 0 on success, negative on failure. 2228 */ 2229 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) 2230 { 2231 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; 2232 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; 2233 struct idpf_vc_xn_params xn_params = {}; 2234 ssize_t reply_sz; 2235 int buf_size; 2236 2237 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); 2238 2239 xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS; 2240 xn_params.send_buf.iov_base = vcs; 2241 xn_params.send_buf.iov_len = buf_size; 2242 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 2243 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2244 if (reply_sz < 0) 2245 return reply_sz; 2246 2247 kfree(adapter->req_vec_chunks); 2248 adapter->req_vec_chunks = NULL; 2249 2250 return 0; 2251 } 2252 2253 /** 2254 * idpf_get_max_vfs - Get max number of vfs supported 2255 * @adapter: Driver specific private structure 2256 * 2257 * Returns max number of VFs 2258 */ 2259 static int idpf_get_max_vfs(struct idpf_adapter *adapter) 2260 { 2261 return le16_to_cpu(adapter->caps.max_sriov_vfs); 2262 } 2263 2264 /** 2265 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message 2266 * @adapter: Driver specific private structure 2267 * @num_vfs: number of virtual functions to be created 2268 * 2269 * Returns 0 on success, negative on failure. 2270 */ 2271 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) 2272 { 2273 struct virtchnl2_sriov_vfs_info svi = {}; 2274 struct idpf_vc_xn_params xn_params = {}; 2275 ssize_t reply_sz; 2276 2277 svi.num_vfs = cpu_to_le16(num_vfs); 2278 xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS; 2279 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2280 xn_params.send_buf.iov_base = &svi; 2281 xn_params.send_buf.iov_len = sizeof(svi); 2282 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2283 2284 return reply_sz < 0 ? reply_sz : 0; 2285 } 2286 2287 /** 2288 * idpf_send_get_stats_msg - Send virtchnl get statistics message 2289 * @vport: vport to get stats for 2290 * 2291 * Returns 0 on success, negative on failure. 2292 */ 2293 int idpf_send_get_stats_msg(struct idpf_vport *vport) 2294 { 2295 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 2296 struct rtnl_link_stats64 *netstats = &np->netstats; 2297 struct virtchnl2_vport_stats stats_msg = {}; 2298 struct idpf_vc_xn_params xn_params = {}; 2299 ssize_t reply_sz; 2300 2301 2302 /* Don't send get_stats message if the link is down */ 2303 if (np->state <= __IDPF_VPORT_DOWN) 2304 return 0; 2305 2306 stats_msg.vport_id = cpu_to_le32(vport->vport_id); 2307 2308 xn_params.vc_op = VIRTCHNL2_OP_GET_STATS; 2309 xn_params.send_buf.iov_base = &stats_msg; 2310 xn_params.send_buf.iov_len = sizeof(stats_msg); 2311 xn_params.recv_buf = xn_params.send_buf; 2312 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2313 2314 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2315 if (reply_sz < 0) 2316 return reply_sz; 2317 if (reply_sz < sizeof(stats_msg)) 2318 return -EIO; 2319 2320 spin_lock_bh(&np->stats_lock); 2321 2322 netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + 2323 le64_to_cpu(stats_msg.rx_multicast) + 2324 le64_to_cpu(stats_msg.rx_broadcast); 2325 netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + 2326 le64_to_cpu(stats_msg.tx_multicast) + 2327 le64_to_cpu(stats_msg.tx_broadcast); 2328 netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); 2329 netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); 2330 netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); 2331 netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); 2332 netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); 2333 netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); 2334 2335 vport->port_stats.vport_stats = stats_msg; 2336 2337 spin_unlock_bh(&np->stats_lock); 2338 2339 return 0; 2340 } 2341 2342 /** 2343 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message 2344 * @vport: virtual port data structure 2345 * @get: flag to set or get rss look up table 2346 * 2347 * Returns 0 on success, negative on failure. 2348 */ 2349 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) 2350 { 2351 struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL; 2352 struct virtchnl2_rss_lut *rl __free(kfree) = NULL; 2353 struct idpf_vc_xn_params xn_params = {}; 2354 struct idpf_rss_data *rss_data; 2355 int buf_size, lut_buf_size; 2356 ssize_t reply_sz; 2357 int i; 2358 2359 rss_data = 2360 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; 2361 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); 2362 rl = kzalloc(buf_size, GFP_KERNEL); 2363 if (!rl) 2364 return -ENOMEM; 2365 2366 rl->vport_id = cpu_to_le32(vport->vport_id); 2367 2368 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2369 xn_params.send_buf.iov_base = rl; 2370 xn_params.send_buf.iov_len = buf_size; 2371 2372 if (get) { 2373 recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2374 if (!recv_rl) 2375 return -ENOMEM; 2376 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT; 2377 xn_params.recv_buf.iov_base = recv_rl; 2378 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2379 } else { 2380 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); 2381 for (i = 0; i < rss_data->rss_lut_size; i++) 2382 rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); 2383 2384 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT; 2385 } 2386 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2387 if (reply_sz < 0) 2388 return reply_sz; 2389 if (!get) 2390 return 0; 2391 if (reply_sz < sizeof(struct virtchnl2_rss_lut)) 2392 return -EIO; 2393 2394 lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); 2395 if (reply_sz < lut_buf_size) 2396 return -EIO; 2397 2398 /* size didn't change, we can reuse existing lut buf */ 2399 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) 2400 goto do_memcpy; 2401 2402 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); 2403 kfree(rss_data->rss_lut); 2404 2405 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); 2406 if (!rss_data->rss_lut) { 2407 rss_data->rss_lut_size = 0; 2408 return -ENOMEM; 2409 } 2410 2411 do_memcpy: 2412 memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); 2413 2414 return 0; 2415 } 2416 2417 /** 2418 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message 2419 * @vport: virtual port data structure 2420 * @get: flag to set or get rss look up table 2421 * 2422 * Returns 0 on success, negative on failure 2423 */ 2424 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) 2425 { 2426 struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL; 2427 struct virtchnl2_rss_key *rk __free(kfree) = NULL; 2428 struct idpf_vc_xn_params xn_params = {}; 2429 struct idpf_rss_data *rss_data; 2430 ssize_t reply_sz; 2431 int i, buf_size; 2432 u16 key_size; 2433 2434 rss_data = 2435 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; 2436 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); 2437 rk = kzalloc(buf_size, GFP_KERNEL); 2438 if (!rk) 2439 return -ENOMEM; 2440 2441 rk->vport_id = cpu_to_le32(vport->vport_id); 2442 xn_params.send_buf.iov_base = rk; 2443 xn_params.send_buf.iov_len = buf_size; 2444 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2445 if (get) { 2446 recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2447 if (!recv_rk) 2448 return -ENOMEM; 2449 2450 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY; 2451 xn_params.recv_buf.iov_base = recv_rk; 2452 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2453 } else { 2454 rk->key_len = cpu_to_le16(rss_data->rss_key_size); 2455 for (i = 0; i < rss_data->rss_key_size; i++) 2456 rk->key_flex[i] = rss_data->rss_key[i]; 2457 2458 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY; 2459 } 2460 2461 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2462 if (reply_sz < 0) 2463 return reply_sz; 2464 if (!get) 2465 return 0; 2466 if (reply_sz < sizeof(struct virtchnl2_rss_key)) 2467 return -EIO; 2468 2469 key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 2470 le16_to_cpu(recv_rk->key_len)); 2471 if (reply_sz < key_size) 2472 return -EIO; 2473 2474 /* key len didn't change, reuse existing buf */ 2475 if (rss_data->rss_key_size == key_size) 2476 goto do_memcpy; 2477 2478 rss_data->rss_key_size = key_size; 2479 kfree(rss_data->rss_key); 2480 rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); 2481 if (!rss_data->rss_key) { 2482 rss_data->rss_key_size = 0; 2483 return -ENOMEM; 2484 } 2485 2486 do_memcpy: 2487 memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); 2488 2489 return 0; 2490 } 2491 2492 /** 2493 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table 2494 * @ptype: ptype lookup table 2495 * @pstate: state machine for ptype lookup table 2496 * @ipv4: ipv4 or ipv6 2497 * @frag: fragmentation allowed 2498 * 2499 */ 2500 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype, 2501 struct idpf_ptype_state *pstate, 2502 bool ipv4, bool frag) 2503 { 2504 if (!pstate->outer_ip || !pstate->outer_frag) { 2505 pstate->outer_ip = true; 2506 2507 if (ipv4) 2508 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; 2509 else 2510 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; 2511 2512 if (frag) { 2513 ptype->outer_frag = LIBETH_RX_PT_FRAG; 2514 pstate->outer_frag = true; 2515 } 2516 } else { 2517 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; 2518 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; 2519 2520 if (ipv4) 2521 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; 2522 else 2523 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; 2524 2525 if (frag) 2526 ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; 2527 } 2528 } 2529 2530 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype) 2531 { 2532 if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 2533 ptype->inner_prot) 2534 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; 2535 else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 2536 ptype->outer_ip) 2537 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; 2538 else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) 2539 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; 2540 else 2541 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; 2542 2543 libeth_rx_pt_gen_hash_type(ptype); 2544 } 2545 2546 /** 2547 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info 2548 * @vport: virtual port data structure 2549 * 2550 * Returns 0 on success, negative on failure. 2551 */ 2552 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) 2553 { 2554 struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; 2555 struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; 2556 struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL; 2557 int max_ptype, ptypes_recvd = 0, ptype_offset; 2558 struct idpf_adapter *adapter = vport->adapter; 2559 struct idpf_vc_xn_params xn_params = {}; 2560 u16 next_ptype_id = 0; 2561 ssize_t reply_sz; 2562 int i, j, k; 2563 2564 if (vport->rx_ptype_lkup) 2565 return 0; 2566 2567 if (idpf_is_queue_model_split(vport->rxq_model)) 2568 max_ptype = IDPF_RX_MAX_PTYPE; 2569 else 2570 max_ptype = IDPF_RX_MAX_BASE_PTYPE; 2571 2572 ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL); 2573 if (!ptype_lkup) 2574 return -ENOMEM; 2575 2576 get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); 2577 if (!get_ptype_info) 2578 return -ENOMEM; 2579 2580 ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2581 if (!ptype_info) 2582 return -ENOMEM; 2583 2584 xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO; 2585 xn_params.send_buf.iov_base = get_ptype_info; 2586 xn_params.send_buf.iov_len = sizeof(*get_ptype_info); 2587 xn_params.recv_buf.iov_base = ptype_info; 2588 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2589 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2590 2591 while (next_ptype_id < max_ptype) { 2592 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); 2593 2594 if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) 2595 get_ptype_info->num_ptypes = 2596 cpu_to_le16(max_ptype - next_ptype_id); 2597 else 2598 get_ptype_info->num_ptypes = 2599 cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); 2600 2601 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2602 if (reply_sz < 0) 2603 return reply_sz; 2604 2605 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); 2606 if (ptypes_recvd > max_ptype) 2607 return -EINVAL; 2608 2609 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + 2610 le16_to_cpu(get_ptype_info->num_ptypes); 2611 2612 ptype_offset = IDPF_RX_PTYPE_HDR_SZ; 2613 2614 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { 2615 struct idpf_ptype_state pstate = { }; 2616 struct virtchnl2_ptype *ptype; 2617 u16 id; 2618 2619 ptype = (struct virtchnl2_ptype *) 2620 ((u8 *)ptype_info + ptype_offset); 2621 2622 ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); 2623 if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) 2624 return -EINVAL; 2625 2626 /* 0xFFFF indicates end of ptypes */ 2627 if (le16_to_cpu(ptype->ptype_id_10) == 2628 IDPF_INVALID_PTYPE_ID) 2629 goto out; 2630 2631 if (idpf_is_queue_model_split(vport->rxq_model)) 2632 k = le16_to_cpu(ptype->ptype_id_10); 2633 else 2634 k = ptype->ptype_id_8; 2635 2636 for (j = 0; j < ptype->proto_id_count; j++) { 2637 id = le16_to_cpu(ptype->proto_id[j]); 2638 switch (id) { 2639 case VIRTCHNL2_PROTO_HDR_GRE: 2640 if (pstate.tunnel_state == 2641 IDPF_PTYPE_TUNNEL_IP) { 2642 ptype_lkup[k].tunnel_type = 2643 LIBETH_RX_PT_TUNNEL_IP_GRENAT; 2644 pstate.tunnel_state |= 2645 IDPF_PTYPE_TUNNEL_IP_GRENAT; 2646 } 2647 break; 2648 case VIRTCHNL2_PROTO_HDR_MAC: 2649 ptype_lkup[k].outer_ip = 2650 LIBETH_RX_PT_OUTER_L2; 2651 if (pstate.tunnel_state == 2652 IDPF_TUN_IP_GRE) { 2653 ptype_lkup[k].tunnel_type = 2654 LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC; 2655 pstate.tunnel_state |= 2656 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; 2657 } 2658 break; 2659 case VIRTCHNL2_PROTO_HDR_IPV4: 2660 idpf_fill_ptype_lookup(&ptype_lkup[k], 2661 &pstate, true, 2662 false); 2663 break; 2664 case VIRTCHNL2_PROTO_HDR_IPV6: 2665 idpf_fill_ptype_lookup(&ptype_lkup[k], 2666 &pstate, false, 2667 false); 2668 break; 2669 case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: 2670 idpf_fill_ptype_lookup(&ptype_lkup[k], 2671 &pstate, true, 2672 true); 2673 break; 2674 case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: 2675 idpf_fill_ptype_lookup(&ptype_lkup[k], 2676 &pstate, false, 2677 true); 2678 break; 2679 case VIRTCHNL2_PROTO_HDR_UDP: 2680 ptype_lkup[k].inner_prot = 2681 LIBETH_RX_PT_INNER_UDP; 2682 break; 2683 case VIRTCHNL2_PROTO_HDR_TCP: 2684 ptype_lkup[k].inner_prot = 2685 LIBETH_RX_PT_INNER_TCP; 2686 break; 2687 case VIRTCHNL2_PROTO_HDR_SCTP: 2688 ptype_lkup[k].inner_prot = 2689 LIBETH_RX_PT_INNER_SCTP; 2690 break; 2691 case VIRTCHNL2_PROTO_HDR_ICMP: 2692 ptype_lkup[k].inner_prot = 2693 LIBETH_RX_PT_INNER_ICMP; 2694 break; 2695 case VIRTCHNL2_PROTO_HDR_PAY: 2696 ptype_lkup[k].payload_layer = 2697 LIBETH_RX_PT_PAYLOAD_L2; 2698 break; 2699 case VIRTCHNL2_PROTO_HDR_ICMPV6: 2700 case VIRTCHNL2_PROTO_HDR_IPV6_EH: 2701 case VIRTCHNL2_PROTO_HDR_PRE_MAC: 2702 case VIRTCHNL2_PROTO_HDR_POST_MAC: 2703 case VIRTCHNL2_PROTO_HDR_ETHERTYPE: 2704 case VIRTCHNL2_PROTO_HDR_SVLAN: 2705 case VIRTCHNL2_PROTO_HDR_CVLAN: 2706 case VIRTCHNL2_PROTO_HDR_MPLS: 2707 case VIRTCHNL2_PROTO_HDR_MMPLS: 2708 case VIRTCHNL2_PROTO_HDR_PTP: 2709 case VIRTCHNL2_PROTO_HDR_CTRL: 2710 case VIRTCHNL2_PROTO_HDR_LLDP: 2711 case VIRTCHNL2_PROTO_HDR_ARP: 2712 case VIRTCHNL2_PROTO_HDR_ECP: 2713 case VIRTCHNL2_PROTO_HDR_EAPOL: 2714 case VIRTCHNL2_PROTO_HDR_PPPOD: 2715 case VIRTCHNL2_PROTO_HDR_PPPOE: 2716 case VIRTCHNL2_PROTO_HDR_IGMP: 2717 case VIRTCHNL2_PROTO_HDR_AH: 2718 case VIRTCHNL2_PROTO_HDR_ESP: 2719 case VIRTCHNL2_PROTO_HDR_IKE: 2720 case VIRTCHNL2_PROTO_HDR_NATT_KEEP: 2721 case VIRTCHNL2_PROTO_HDR_L2TPV2: 2722 case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: 2723 case VIRTCHNL2_PROTO_HDR_L2TPV3: 2724 case VIRTCHNL2_PROTO_HDR_GTP: 2725 case VIRTCHNL2_PROTO_HDR_GTP_EH: 2726 case VIRTCHNL2_PROTO_HDR_GTPCV2: 2727 case VIRTCHNL2_PROTO_HDR_GTPC_TEID: 2728 case VIRTCHNL2_PROTO_HDR_GTPU: 2729 case VIRTCHNL2_PROTO_HDR_GTPU_UL: 2730 case VIRTCHNL2_PROTO_HDR_GTPU_DL: 2731 case VIRTCHNL2_PROTO_HDR_ECPRI: 2732 case VIRTCHNL2_PROTO_HDR_VRRP: 2733 case VIRTCHNL2_PROTO_HDR_OSPF: 2734 case VIRTCHNL2_PROTO_HDR_TUN: 2735 case VIRTCHNL2_PROTO_HDR_NVGRE: 2736 case VIRTCHNL2_PROTO_HDR_VXLAN: 2737 case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: 2738 case VIRTCHNL2_PROTO_HDR_GENEVE: 2739 case VIRTCHNL2_PROTO_HDR_NSH: 2740 case VIRTCHNL2_PROTO_HDR_QUIC: 2741 case VIRTCHNL2_PROTO_HDR_PFCP: 2742 case VIRTCHNL2_PROTO_HDR_PFCP_NODE: 2743 case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: 2744 case VIRTCHNL2_PROTO_HDR_RTP: 2745 case VIRTCHNL2_PROTO_HDR_NO_PROTO: 2746 break; 2747 default: 2748 break; 2749 } 2750 } 2751 2752 idpf_finalize_ptype_lookup(&ptype_lkup[k]); 2753 } 2754 } 2755 2756 out: 2757 vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); 2758 2759 return 0; 2760 } 2761 2762 /** 2763 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback 2764 * message 2765 * @vport: virtual port data structure 2766 * 2767 * Returns 0 on success, negative on failure. 2768 */ 2769 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) 2770 { 2771 struct idpf_vc_xn_params xn_params = {}; 2772 struct virtchnl2_loopback loopback; 2773 ssize_t reply_sz; 2774 2775 loopback.vport_id = cpu_to_le32(vport->vport_id); 2776 loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); 2777 2778 xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK; 2779 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2780 xn_params.send_buf.iov_base = &loopback; 2781 xn_params.send_buf.iov_len = sizeof(loopback); 2782 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2783 2784 return reply_sz < 0 ? reply_sz : 0; 2785 } 2786 2787 /** 2788 * idpf_find_ctlq - Given a type and id, find ctlq info 2789 * @hw: hardware struct 2790 * @type: type of ctrlq to find 2791 * @id: ctlq id to find 2792 * 2793 * Returns pointer to found ctlq info struct, NULL otherwise. 2794 */ 2795 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, 2796 enum idpf_ctlq_type type, int id) 2797 { 2798 struct idpf_ctlq_info *cq, *tmp; 2799 2800 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) 2801 if (cq->q_id == id && cq->cq_type == type) 2802 return cq; 2803 2804 return NULL; 2805 } 2806 2807 /** 2808 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request 2809 * @adapter: adapter info struct 2810 * 2811 * Returns 0 on success, negative otherwise 2812 */ 2813 int idpf_init_dflt_mbx(struct idpf_adapter *adapter) 2814 { 2815 struct idpf_ctlq_create_info ctlq_info[] = { 2816 { 2817 .type = IDPF_CTLQ_TYPE_MAILBOX_TX, 2818 .id = IDPF_DFLT_MBX_ID, 2819 .len = IDPF_DFLT_MBX_Q_LEN, 2820 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 2821 }, 2822 { 2823 .type = IDPF_CTLQ_TYPE_MAILBOX_RX, 2824 .id = IDPF_DFLT_MBX_ID, 2825 .len = IDPF_DFLT_MBX_Q_LEN, 2826 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 2827 } 2828 }; 2829 struct idpf_hw *hw = &adapter->hw; 2830 int err; 2831 2832 adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); 2833 2834 err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); 2835 if (err) 2836 return err; 2837 2838 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, 2839 IDPF_DFLT_MBX_ID); 2840 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, 2841 IDPF_DFLT_MBX_ID); 2842 2843 if (!hw->asq || !hw->arq) { 2844 idpf_ctlq_deinit(hw); 2845 2846 return -ENOENT; 2847 } 2848 2849 adapter->state = __IDPF_VER_CHECK; 2850 2851 return 0; 2852 } 2853 2854 /** 2855 * idpf_deinit_dflt_mbx - Free up ctlqs setup 2856 * @adapter: Driver specific private data structure 2857 */ 2858 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) 2859 { 2860 if (adapter->hw.arq && adapter->hw.asq) { 2861 idpf_mb_clean(adapter); 2862 idpf_ctlq_deinit(&adapter->hw); 2863 } 2864 adapter->hw.arq = NULL; 2865 adapter->hw.asq = NULL; 2866 } 2867 2868 /** 2869 * idpf_vport_params_buf_rel - Release memory for MailBox resources 2870 * @adapter: Driver specific private data structure 2871 * 2872 * Will release memory to hold the vport parameters received on MailBox 2873 */ 2874 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) 2875 { 2876 kfree(adapter->vport_params_recvd); 2877 adapter->vport_params_recvd = NULL; 2878 kfree(adapter->vport_params_reqd); 2879 adapter->vport_params_reqd = NULL; 2880 kfree(adapter->vport_ids); 2881 adapter->vport_ids = NULL; 2882 } 2883 2884 /** 2885 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources 2886 * @adapter: Driver specific private data structure 2887 * 2888 * Will alloc memory to hold the vport parameters received on MailBox 2889 */ 2890 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) 2891 { 2892 u16 num_max_vports = idpf_get_max_vports(adapter); 2893 2894 adapter->vport_params_reqd = kcalloc(num_max_vports, 2895 sizeof(*adapter->vport_params_reqd), 2896 GFP_KERNEL); 2897 if (!adapter->vport_params_reqd) 2898 return -ENOMEM; 2899 2900 adapter->vport_params_recvd = kcalloc(num_max_vports, 2901 sizeof(*adapter->vport_params_recvd), 2902 GFP_KERNEL); 2903 if (!adapter->vport_params_recvd) 2904 goto err_mem; 2905 2906 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); 2907 if (!adapter->vport_ids) 2908 goto err_mem; 2909 2910 if (adapter->vport_config) 2911 return 0; 2912 2913 adapter->vport_config = kcalloc(num_max_vports, 2914 sizeof(*adapter->vport_config), 2915 GFP_KERNEL); 2916 if (!adapter->vport_config) 2917 goto err_mem; 2918 2919 return 0; 2920 2921 err_mem: 2922 idpf_vport_params_buf_rel(adapter); 2923 2924 return -ENOMEM; 2925 } 2926 2927 /** 2928 * idpf_vc_core_init - Initialize state machine and get driver specific 2929 * resources 2930 * @adapter: Driver specific private structure 2931 * 2932 * This function will initialize the state machine and request all necessary 2933 * resources required by the device driver. Once the state machine is 2934 * initialized, allocate memory to store vport specific information and also 2935 * requests required interrupts. 2936 * 2937 * Returns 0 on success, -EAGAIN function will get called again, 2938 * otherwise negative on failure. 2939 */ 2940 int idpf_vc_core_init(struct idpf_adapter *adapter) 2941 { 2942 int task_delay = 30; 2943 u16 num_max_vports; 2944 int err = 0; 2945 2946 if (!adapter->vcxn_mngr) { 2947 adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL); 2948 if (!adapter->vcxn_mngr) { 2949 err = -ENOMEM; 2950 goto init_failed; 2951 } 2952 } 2953 idpf_vc_xn_init(adapter->vcxn_mngr); 2954 2955 while (adapter->state != __IDPF_INIT_SW) { 2956 switch (adapter->state) { 2957 case __IDPF_VER_CHECK: 2958 err = idpf_send_ver_msg(adapter); 2959 switch (err) { 2960 case 0: 2961 /* success, move state machine forward */ 2962 adapter->state = __IDPF_GET_CAPS; 2963 fallthrough; 2964 case -EAGAIN: 2965 goto restart; 2966 default: 2967 /* Something bad happened, try again but only a 2968 * few times. 2969 */ 2970 goto init_failed; 2971 } 2972 case __IDPF_GET_CAPS: 2973 err = idpf_send_get_caps_msg(adapter); 2974 if (err) 2975 goto init_failed; 2976 adapter->state = __IDPF_INIT_SW; 2977 break; 2978 default: 2979 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", 2980 adapter->state); 2981 err = -EINVAL; 2982 goto init_failed; 2983 } 2984 break; 2985 restart: 2986 /* Give enough time before proceeding further with 2987 * state machine 2988 */ 2989 msleep(task_delay); 2990 } 2991 2992 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); 2993 num_max_vports = idpf_get_max_vports(adapter); 2994 adapter->max_vports = num_max_vports; 2995 adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), 2996 GFP_KERNEL); 2997 if (!adapter->vports) 2998 return -ENOMEM; 2999 3000 if (!adapter->netdevs) { 3001 adapter->netdevs = kcalloc(num_max_vports, 3002 sizeof(struct net_device *), 3003 GFP_KERNEL); 3004 if (!adapter->netdevs) { 3005 err = -ENOMEM; 3006 goto err_netdev_alloc; 3007 } 3008 } 3009 3010 err = idpf_vport_params_buf_alloc(adapter); 3011 if (err) { 3012 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", 3013 err); 3014 goto err_netdev_alloc; 3015 } 3016 3017 /* Start the mailbox task before requesting vectors. This will ensure 3018 * vector information response from mailbox is handled 3019 */ 3020 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 3021 3022 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 3023 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3024 3025 err = idpf_intr_req(adapter); 3026 if (err) { 3027 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", 3028 err); 3029 goto err_intr_req; 3030 } 3031 3032 idpf_init_avail_queues(adapter); 3033 3034 /* Skew the delay for init tasks for each function based on fn number 3035 * to prevent every function from making the same call simultaneously. 3036 */ 3037 queue_delayed_work(adapter->init_wq, &adapter->init_task, 3038 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3039 3040 set_bit(IDPF_VC_CORE_INIT, adapter->flags); 3041 3042 return 0; 3043 3044 err_intr_req: 3045 cancel_delayed_work_sync(&adapter->serv_task); 3046 cancel_delayed_work_sync(&adapter->mbx_task); 3047 idpf_vport_params_buf_rel(adapter); 3048 err_netdev_alloc: 3049 kfree(adapter->vports); 3050 adapter->vports = NULL; 3051 return err; 3052 3053 init_failed: 3054 /* Don't retry if we're trying to go down, just bail. */ 3055 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 3056 return err; 3057 3058 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { 3059 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); 3060 3061 return -EFAULT; 3062 } 3063 /* If it reached here, it is possible that mailbox queue initialization 3064 * register writes might not have taken effect. Retry to initialize 3065 * the mailbox again 3066 */ 3067 adapter->state = __IDPF_VER_CHECK; 3068 if (adapter->vcxn_mngr) 3069 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3070 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); 3071 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, 3072 msecs_to_jiffies(task_delay)); 3073 3074 return -EAGAIN; 3075 } 3076 3077 /** 3078 * idpf_vc_core_deinit - Device deinit routine 3079 * @adapter: Driver specific private structure 3080 * 3081 */ 3082 void idpf_vc_core_deinit(struct idpf_adapter *adapter) 3083 { 3084 bool remove_in_prog; 3085 3086 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) 3087 return; 3088 3089 /* Avoid transaction timeouts when called during reset */ 3090 remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); 3091 if (!remove_in_prog) 3092 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3093 3094 idpf_deinit_task(adapter); 3095 idpf_intr_rel(adapter); 3096 3097 if (remove_in_prog) 3098 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3099 3100 cancel_delayed_work_sync(&adapter->serv_task); 3101 cancel_delayed_work_sync(&adapter->mbx_task); 3102 3103 idpf_vport_params_buf_rel(adapter); 3104 3105 kfree(adapter->vports); 3106 adapter->vports = NULL; 3107 3108 clear_bit(IDPF_VC_CORE_INIT, adapter->flags); 3109 } 3110 3111 /** 3112 * idpf_vport_alloc_vec_indexes - Get relative vector indexes 3113 * @vport: virtual port data struct 3114 * 3115 * This function requests the vector information required for the vport and 3116 * stores the vector indexes received from the 'global vector distribution' 3117 * in the vport's queue vectors array. 3118 * 3119 * Return 0 on success, error on failure 3120 */ 3121 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) 3122 { 3123 struct idpf_vector_info vec_info; 3124 int num_alloc_vecs; 3125 3126 vec_info.num_curr_vecs = vport->num_q_vectors; 3127 vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); 3128 vec_info.default_vport = vport->default_vport; 3129 vec_info.index = vport->idx; 3130 3131 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, 3132 vport->q_vector_idxs, 3133 &vec_info); 3134 if (num_alloc_vecs <= 0) { 3135 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", 3136 num_alloc_vecs); 3137 return -EINVAL; 3138 } 3139 3140 vport->num_q_vectors = num_alloc_vecs; 3141 3142 return 0; 3143 } 3144 3145 /** 3146 * idpf_vport_init - Initialize virtual port 3147 * @vport: virtual port to be initialized 3148 * @max_q: vport max queue info 3149 * 3150 * Will initialize vport with the info received through MB earlier 3151 */ 3152 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) 3153 { 3154 struct idpf_adapter *adapter = vport->adapter; 3155 struct virtchnl2_create_vport *vport_msg; 3156 struct idpf_vport_config *vport_config; 3157 u16 tx_itr[] = {2, 8, 64, 128, 256}; 3158 u16 rx_itr[] = {2, 8, 32, 96, 128}; 3159 struct idpf_rss_data *rss_data; 3160 u16 idx = vport->idx; 3161 3162 vport_config = adapter->vport_config[idx]; 3163 rss_data = &vport_config->user_config.rss_data; 3164 vport_msg = adapter->vport_params_recvd[idx]; 3165 3166 vport_config->max_q.max_txq = max_q->max_txq; 3167 vport_config->max_q.max_rxq = max_q->max_rxq; 3168 vport_config->max_q.max_complq = max_q->max_complq; 3169 vport_config->max_q.max_bufq = max_q->max_bufq; 3170 3171 vport->txq_model = le16_to_cpu(vport_msg->txq_model); 3172 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); 3173 vport->vport_type = le16_to_cpu(vport_msg->vport_type); 3174 vport->vport_id = le32_to_cpu(vport_msg->vport_id); 3175 3176 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 3177 le16_to_cpu(vport_msg->rss_key_size)); 3178 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); 3179 3180 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); 3181 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; 3182 3183 /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ 3184 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); 3185 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); 3186 3187 idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); 3188 3189 idpf_vport_init_num_qs(vport, vport_msg); 3190 idpf_vport_calc_num_q_desc(vport); 3191 idpf_vport_calc_num_q_groups(vport); 3192 idpf_vport_alloc_vec_indexes(vport); 3193 3194 vport->crc_enable = adapter->crc_enable; 3195 } 3196 3197 /** 3198 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters 3199 * @adapter: adapter structure to get the mailbox vector id 3200 * @vecids: Array of vector ids 3201 * @num_vecids: number of vector ids 3202 * @chunks: vector ids received over mailbox 3203 * 3204 * Will initialize the mailbox vector id which is received from the 3205 * get capabilities and data queue vector ids with ids received as 3206 * mailbox parameters. 3207 * Returns number of ids filled 3208 */ 3209 int idpf_get_vec_ids(struct idpf_adapter *adapter, 3210 u16 *vecids, int num_vecids, 3211 struct virtchnl2_vector_chunks *chunks) 3212 { 3213 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); 3214 int num_vecid_filled = 0; 3215 int i, j; 3216 3217 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; 3218 num_vecid_filled++; 3219 3220 for (j = 0; j < num_chunks; j++) { 3221 struct virtchnl2_vector_chunk *chunk; 3222 u16 start_vecid, num_vec; 3223 3224 chunk = &chunks->vchunks[j]; 3225 num_vec = le16_to_cpu(chunk->num_vectors); 3226 start_vecid = le16_to_cpu(chunk->start_vector_id); 3227 3228 for (i = 0; i < num_vec; i++) { 3229 if ((num_vecid_filled + i) < num_vecids) { 3230 vecids[num_vecid_filled + i] = start_vecid; 3231 start_vecid++; 3232 } else { 3233 break; 3234 } 3235 } 3236 num_vecid_filled = num_vecid_filled + i; 3237 } 3238 3239 return num_vecid_filled; 3240 } 3241 3242 /** 3243 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters 3244 * @qids: Array of queue ids 3245 * @num_qids: number of queue ids 3246 * @q_type: queue model 3247 * @chunks: queue ids received over mailbox 3248 * 3249 * Will initialize all queue ids with ids received as mailbox parameters 3250 * Returns number of ids filled 3251 */ 3252 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, 3253 struct virtchnl2_queue_reg_chunks *chunks) 3254 { 3255 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 3256 u32 num_q_id_filled = 0, i; 3257 u32 start_q_id, num_q; 3258 3259 while (num_chunks--) { 3260 struct virtchnl2_queue_reg_chunk *chunk; 3261 3262 chunk = &chunks->chunks[num_chunks]; 3263 if (le32_to_cpu(chunk->type) != q_type) 3264 continue; 3265 3266 num_q = le32_to_cpu(chunk->num_queues); 3267 start_q_id = le32_to_cpu(chunk->start_queue_id); 3268 3269 for (i = 0; i < num_q; i++) { 3270 if ((num_q_id_filled + i) < num_qids) { 3271 qids[num_q_id_filled + i] = start_q_id; 3272 start_q_id++; 3273 } else { 3274 break; 3275 } 3276 } 3277 num_q_id_filled = num_q_id_filled + i; 3278 } 3279 3280 return num_q_id_filled; 3281 } 3282 3283 /** 3284 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3285 * @vport: virtual port for which the queues ids are initialized 3286 * @qids: queue ids 3287 * @num_qids: number of queue ids 3288 * @q_type: type of queue 3289 * 3290 * Will initialize all queue ids with ids received as mailbox 3291 * parameters. Returns number of queue ids initialized. 3292 */ 3293 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, 3294 const u32 *qids, 3295 int num_qids, 3296 u32 q_type) 3297 { 3298 int i, j, k = 0; 3299 3300 switch (q_type) { 3301 case VIRTCHNL2_QUEUE_TYPE_TX: 3302 for (i = 0; i < vport->num_txq_grp; i++) { 3303 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3304 3305 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) 3306 tx_qgrp->txqs[j]->q_id = qids[k]; 3307 } 3308 break; 3309 case VIRTCHNL2_QUEUE_TYPE_RX: 3310 for (i = 0; i < vport->num_rxq_grp; i++) { 3311 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3312 u16 num_rxq; 3313 3314 if (idpf_is_queue_model_split(vport->rxq_model)) 3315 num_rxq = rx_qgrp->splitq.num_rxq_sets; 3316 else 3317 num_rxq = rx_qgrp->singleq.num_rxq; 3318 3319 for (j = 0; j < num_rxq && k < num_qids; j++, k++) { 3320 struct idpf_rx_queue *q; 3321 3322 if (idpf_is_queue_model_split(vport->rxq_model)) 3323 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 3324 else 3325 q = rx_qgrp->singleq.rxqs[j]; 3326 q->q_id = qids[k]; 3327 } 3328 } 3329 break; 3330 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: 3331 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { 3332 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3333 3334 tx_qgrp->complq->q_id = qids[k]; 3335 } 3336 break; 3337 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 3338 for (i = 0; i < vport->num_rxq_grp; i++) { 3339 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3340 u8 num_bufqs = vport->num_bufqs_per_qgrp; 3341 3342 for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { 3343 struct idpf_buf_queue *q; 3344 3345 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 3346 q->q_id = qids[k]; 3347 } 3348 } 3349 break; 3350 default: 3351 break; 3352 } 3353 3354 return k; 3355 } 3356 3357 /** 3358 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3359 * @vport: virtual port for which the queues ids are initialized 3360 * 3361 * Will initialize all queue ids with ids received as mailbox parameters. 3362 * Returns 0 on success, negative if all the queues are not initialized. 3363 */ 3364 int idpf_vport_queue_ids_init(struct idpf_vport *vport) 3365 { 3366 struct virtchnl2_create_vport *vport_params; 3367 struct virtchnl2_queue_reg_chunks *chunks; 3368 struct idpf_vport_config *vport_config; 3369 u16 vport_idx = vport->idx; 3370 int num_ids, err = 0; 3371 u16 q_type; 3372 u32 *qids; 3373 3374 vport_config = vport->adapter->vport_config[vport_idx]; 3375 if (vport_config->req_qs_chunks) { 3376 struct virtchnl2_add_queues *vc_aq = 3377 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 3378 chunks = &vc_aq->chunks; 3379 } else { 3380 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 3381 chunks = &vport_params->chunks; 3382 } 3383 3384 qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL); 3385 if (!qids) 3386 return -ENOMEM; 3387 3388 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3389 VIRTCHNL2_QUEUE_TYPE_TX, 3390 chunks); 3391 if (num_ids < vport->num_txq) { 3392 err = -EINVAL; 3393 goto mem_rel; 3394 } 3395 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3396 VIRTCHNL2_QUEUE_TYPE_TX); 3397 if (num_ids < vport->num_txq) { 3398 err = -EINVAL; 3399 goto mem_rel; 3400 } 3401 3402 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3403 VIRTCHNL2_QUEUE_TYPE_RX, 3404 chunks); 3405 if (num_ids < vport->num_rxq) { 3406 err = -EINVAL; 3407 goto mem_rel; 3408 } 3409 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3410 VIRTCHNL2_QUEUE_TYPE_RX); 3411 if (num_ids < vport->num_rxq) { 3412 err = -EINVAL; 3413 goto mem_rel; 3414 } 3415 3416 if (!idpf_is_queue_model_split(vport->txq_model)) 3417 goto check_rxq; 3418 3419 q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 3420 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3421 if (num_ids < vport->num_complq) { 3422 err = -EINVAL; 3423 goto mem_rel; 3424 } 3425 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3426 if (num_ids < vport->num_complq) { 3427 err = -EINVAL; 3428 goto mem_rel; 3429 } 3430 3431 check_rxq: 3432 if (!idpf_is_queue_model_split(vport->rxq_model)) 3433 goto mem_rel; 3434 3435 q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 3436 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3437 if (num_ids < vport->num_bufq) { 3438 err = -EINVAL; 3439 goto mem_rel; 3440 } 3441 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3442 if (num_ids < vport->num_bufq) 3443 err = -EINVAL; 3444 3445 mem_rel: 3446 kfree(qids); 3447 3448 return err; 3449 } 3450 3451 /** 3452 * idpf_vport_adjust_qs - Adjust to new requested queues 3453 * @vport: virtual port data struct 3454 * 3455 * Renegotiate queues. Returns 0 on success, negative on failure. 3456 */ 3457 int idpf_vport_adjust_qs(struct idpf_vport *vport) 3458 { 3459 struct virtchnl2_create_vport vport_msg; 3460 int err; 3461 3462 vport_msg.txq_model = cpu_to_le16(vport->txq_model); 3463 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); 3464 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, 3465 NULL); 3466 if (err) 3467 return err; 3468 3469 idpf_vport_init_num_qs(vport, &vport_msg); 3470 idpf_vport_calc_num_q_groups(vport); 3471 3472 return 0; 3473 } 3474 3475 /** 3476 * idpf_is_capability_ena - Default implementation of capability checking 3477 * @adapter: Private data struct 3478 * @all: all or one flag 3479 * @field: caps field to check for flags 3480 * @flag: flag to check 3481 * 3482 * Return true if all capabilities are supported, false otherwise 3483 */ 3484 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 3485 enum idpf_cap_field field, u64 flag) 3486 { 3487 u8 *caps = (u8 *)&adapter->caps; 3488 u32 *cap_field; 3489 3490 if (!caps) 3491 return false; 3492 3493 if (field == IDPF_BASE_CAPS) 3494 return false; 3495 3496 cap_field = (u32 *)(caps + field); 3497 3498 if (all) 3499 return (*cap_field & flag) == flag; 3500 else 3501 return !!(*cap_field & flag); 3502 } 3503 3504 /** 3505 * idpf_get_vport_id: Get vport id 3506 * @vport: virtual port structure 3507 * 3508 * Return vport id from the adapter persistent data 3509 */ 3510 u32 idpf_get_vport_id(struct idpf_vport *vport) 3511 { 3512 struct virtchnl2_create_vport *vport_msg; 3513 3514 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 3515 3516 return le32_to_cpu(vport_msg->vport_id); 3517 } 3518 3519 /** 3520 * idpf_mac_filter_async_handler - Async callback for mac filters 3521 * @adapter: private data struct 3522 * @xn: transaction for message 3523 * @ctlq_msg: received message 3524 * 3525 * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is 3526 * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult 3527 * situation to deal with errors returned on the reply. The best we can 3528 * ultimately do is remove it from our list of mac filters and report the 3529 * error. 3530 */ 3531 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter, 3532 struct idpf_vc_xn *xn, 3533 const struct idpf_ctlq_msg *ctlq_msg) 3534 { 3535 struct virtchnl2_mac_addr_list *ma_list; 3536 struct idpf_vport_config *vport_config; 3537 struct virtchnl2_mac_addr *mac_addr; 3538 struct idpf_mac_filter *f, *tmp; 3539 struct list_head *ma_list_head; 3540 struct idpf_vport *vport; 3541 u16 num_entries; 3542 int i; 3543 3544 /* if success we're done, we're only here if something bad happened */ 3545 if (!ctlq_msg->cookie.mbx.chnl_retval) 3546 return 0; 3547 3548 /* make sure at least struct is there */ 3549 if (xn->reply_sz < sizeof(*ma_list)) 3550 goto invalid_payload; 3551 3552 ma_list = ctlq_msg->ctx.indirect.payload->va; 3553 mac_addr = ma_list->mac_addr_list; 3554 num_entries = le16_to_cpu(ma_list->num_mac_addr); 3555 /* we should have received a buffer at least this big */ 3556 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) 3557 goto invalid_payload; 3558 3559 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); 3560 if (!vport) 3561 goto invalid_payload; 3562 3563 vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; 3564 ma_list_head = &vport_config->user_config.mac_filter_list; 3565 3566 /* We can't do much to reconcile bad filters at this point, however we 3567 * should at least remove them from our list one way or the other so we 3568 * have some idea what good filters we have. 3569 */ 3570 spin_lock_bh(&vport_config->mac_filter_list_lock); 3571 list_for_each_entry_safe(f, tmp, ma_list_head, list) 3572 for (i = 0; i < num_entries; i++) 3573 if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) 3574 list_del(&f->list); 3575 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3576 dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", 3577 xn->vc_op); 3578 3579 return 0; 3580 3581 invalid_payload: 3582 dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", 3583 xn->vc_op, xn->reply_sz); 3584 3585 return -EINVAL; 3586 } 3587 3588 /** 3589 * idpf_add_del_mac_filters - Add/del mac filters 3590 * @vport: Virtual port data structure 3591 * @np: Netdev private structure 3592 * @add: Add or delete flag 3593 * @async: Don't wait for return message 3594 * 3595 * Returns 0 on success, error on failure. 3596 **/ 3597 int idpf_add_del_mac_filters(struct idpf_vport *vport, 3598 struct idpf_netdev_priv *np, 3599 bool add, bool async) 3600 { 3601 struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL; 3602 struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL; 3603 struct idpf_adapter *adapter = np->adapter; 3604 struct idpf_vc_xn_params xn_params = {}; 3605 struct idpf_vport_config *vport_config; 3606 u32 num_msgs, total_filters = 0; 3607 struct idpf_mac_filter *f; 3608 ssize_t reply_sz; 3609 int i = 0, k; 3610 3611 xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR : 3612 VIRTCHNL2_OP_DEL_MAC_ADDR; 3613 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3614 xn_params.async = async; 3615 xn_params.async_handler = idpf_mac_filter_async_handler; 3616 3617 vport_config = adapter->vport_config[np->vport_idx]; 3618 spin_lock_bh(&vport_config->mac_filter_list_lock); 3619 3620 /* Find the number of newly added filters */ 3621 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 3622 list) { 3623 if (add && f->add) 3624 total_filters++; 3625 else if (!add && f->remove) 3626 total_filters++; 3627 } 3628 3629 if (!total_filters) { 3630 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3631 3632 return 0; 3633 } 3634 3635 /* Fill all the new filters into virtchannel message */ 3636 mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), 3637 GFP_ATOMIC); 3638 if (!mac_addr) { 3639 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3640 3641 return -ENOMEM; 3642 } 3643 3644 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 3645 list) { 3646 if (add && f->add) { 3647 ether_addr_copy(mac_addr[i].addr, f->macaddr); 3648 i++; 3649 f->add = false; 3650 if (i == total_filters) 3651 break; 3652 } 3653 if (!add && f->remove) { 3654 ether_addr_copy(mac_addr[i].addr, f->macaddr); 3655 i++; 3656 f->remove = false; 3657 if (i == total_filters) 3658 break; 3659 } 3660 } 3661 3662 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3663 3664 /* Chunk up the filters into multiple messages to avoid 3665 * sending a control queue message buffer that is too large 3666 */ 3667 num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); 3668 3669 for (i = 0, k = 0; i < num_msgs; i++) { 3670 u32 entries_size, buf_size, num_entries; 3671 3672 num_entries = min_t(u32, total_filters, 3673 IDPF_NUM_FILTERS_PER_MSG); 3674 entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; 3675 buf_size = struct_size(ma_list, mac_addr_list, num_entries); 3676 3677 if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { 3678 kfree(ma_list); 3679 ma_list = kzalloc(buf_size, GFP_ATOMIC); 3680 if (!ma_list) 3681 return -ENOMEM; 3682 } else { 3683 memset(ma_list, 0, buf_size); 3684 } 3685 3686 ma_list->vport_id = cpu_to_le32(np->vport_id); 3687 ma_list->num_mac_addr = cpu_to_le16(num_entries); 3688 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); 3689 3690 xn_params.send_buf.iov_base = ma_list; 3691 xn_params.send_buf.iov_len = buf_size; 3692 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3693 if (reply_sz < 0) 3694 return reply_sz; 3695 3696 k += num_entries; 3697 total_filters -= num_entries; 3698 } 3699 3700 return 0; 3701 } 3702 3703 /** 3704 * idpf_set_promiscuous - set promiscuous and send message to mailbox 3705 * @adapter: Driver specific private structure 3706 * @config_data: Vport specific config data 3707 * @vport_id: Vport identifier 3708 * 3709 * Request to enable promiscuous mode for the vport. Message is sent 3710 * asynchronously and won't wait for response. Returns 0 on success, negative 3711 * on failure; 3712 */ 3713 int idpf_set_promiscuous(struct idpf_adapter *adapter, 3714 struct idpf_vport_user_config_data *config_data, 3715 u32 vport_id) 3716 { 3717 struct idpf_vc_xn_params xn_params = {}; 3718 struct virtchnl2_promisc_info vpi; 3719 ssize_t reply_sz; 3720 u16 flags = 0; 3721 3722 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) 3723 flags |= VIRTCHNL2_UNICAST_PROMISC; 3724 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) 3725 flags |= VIRTCHNL2_MULTICAST_PROMISC; 3726 3727 vpi.vport_id = cpu_to_le32(vport_id); 3728 vpi.flags = cpu_to_le16(flags); 3729 3730 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE; 3731 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3732 xn_params.send_buf.iov_base = &vpi; 3733 xn_params.send_buf.iov_len = sizeof(vpi); 3734 /* setting promiscuous is only ever done asynchronously */ 3735 xn_params.async = true; 3736 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3737 3738 return reply_sz < 0 ? reply_sz : 0; 3739 } 3740