xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c (revision a55f7f5f29b32c2c53cc291899cf9b0c25a07f7c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <linux/export.h>
5 #include <net/libeth/rx.h>
6 
7 #include "idpf.h"
8 #include "idpf_virtchnl.h"
9 #include "idpf_ptp.h"
10 
11 /**
12  * struct idpf_vc_xn_manager - Manager for tracking transactions
13  * @ring: backing and lookup for transactions
14  * @free_xn_bm: bitmap for free transactions
15  * @xn_bm_lock: make bitmap access synchronous where necessary
16  * @salt: used to make cookie unique every message
17  */
18 struct idpf_vc_xn_manager {
19 	struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
20 	DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
21 	spinlock_t xn_bm_lock;
22 	u8 salt;
23 };
24 
25 /**
26  * idpf_vid_to_vport - Translate vport id to vport pointer
27  * @adapter: private data struct
28  * @v_id: vport id to translate
29  *
30  * Returns vport matching v_id, NULL if not found.
31  */
32 static
idpf_vid_to_vport(struct idpf_adapter * adapter,u32 v_id)33 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
34 {
35 	u16 num_max_vports = idpf_get_max_vports(adapter);
36 	int i;
37 
38 	for (i = 0; i < num_max_vports; i++)
39 		if (adapter->vport_ids[i] == v_id)
40 			return adapter->vports[i];
41 
42 	return NULL;
43 }
44 
45 /**
46  * idpf_handle_event_link - Handle link event message
47  * @adapter: private data struct
48  * @v2e: virtchnl event message
49  */
idpf_handle_event_link(struct idpf_adapter * adapter,const struct virtchnl2_event * v2e)50 static void idpf_handle_event_link(struct idpf_adapter *adapter,
51 				   const struct virtchnl2_event *v2e)
52 {
53 	struct idpf_netdev_priv *np;
54 	struct idpf_vport *vport;
55 
56 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
57 	if (!vport) {
58 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
59 				    v2e->vport_id);
60 		return;
61 	}
62 	np = netdev_priv(vport->netdev);
63 
64 	np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
65 
66 	if (vport->link_up == v2e->link_status)
67 		return;
68 
69 	vport->link_up = v2e->link_status;
70 
71 	if (!test_bit(IDPF_VPORT_UP, np->state))
72 		return;
73 
74 	if (vport->link_up) {
75 		netif_tx_start_all_queues(vport->netdev);
76 		netif_carrier_on(vport->netdev);
77 	} else {
78 		netif_tx_stop_all_queues(vport->netdev);
79 		netif_carrier_off(vport->netdev);
80 	}
81 }
82 
83 /**
84  * idpf_recv_event_msg - Receive virtchnl event message
85  * @adapter: Driver specific private structure
86  * @ctlq_msg: message to copy from
87  *
88  * Receive virtchnl event message
89  */
idpf_recv_event_msg(struct idpf_adapter * adapter,struct idpf_ctlq_msg * ctlq_msg)90 static void idpf_recv_event_msg(struct idpf_adapter *adapter,
91 				struct idpf_ctlq_msg *ctlq_msg)
92 {
93 	int payload_size = ctlq_msg->ctx.indirect.payload->size;
94 	struct virtchnl2_event *v2e;
95 	u32 event;
96 
97 	if (payload_size < sizeof(*v2e)) {
98 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
99 				    ctlq_msg->cookie.mbx.chnl_opcode,
100 				    payload_size);
101 		return;
102 	}
103 
104 	v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
105 	event = le32_to_cpu(v2e->event);
106 
107 	switch (event) {
108 	case VIRTCHNL2_EVENT_LINK_CHANGE:
109 		idpf_handle_event_link(adapter, v2e);
110 		return;
111 	default:
112 		dev_err(&adapter->pdev->dev,
113 			"Unknown event %d from PF\n", event);
114 		break;
115 	}
116 }
117 
118 /**
119  * idpf_mb_clean - Reclaim the send mailbox queue entries
120  * @adapter: driver specific private structure
121  * @asq: send control queue info
122  *
123  * Reclaim the send mailbox queue entries to be used to send further messages
124  *
125  * Return: 0 on success, negative on failure
126  */
idpf_mb_clean(struct idpf_adapter * adapter,struct idpf_ctlq_info * asq)127 static int idpf_mb_clean(struct idpf_adapter *adapter,
128 			 struct idpf_ctlq_info *asq)
129 {
130 	u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
131 	struct idpf_ctlq_msg **q_msg;
132 	struct idpf_dma_mem *dma_mem;
133 	int err;
134 
135 	q_msg = kzalloc_objs(struct idpf_ctlq_msg *, num_q_msg, GFP_ATOMIC);
136 	if (!q_msg)
137 		return -ENOMEM;
138 
139 	err = idpf_ctlq_clean_sq(asq, &num_q_msg, q_msg);
140 	if (err)
141 		goto err_kfree;
142 
143 	for (i = 0; i < num_q_msg; i++) {
144 		if (!q_msg[i])
145 			continue;
146 		dma_mem = q_msg[i]->ctx.indirect.payload;
147 		if (dma_mem)
148 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
149 					  dma_mem->va, dma_mem->pa);
150 		kfree(q_msg[i]);
151 		kfree(dma_mem);
152 	}
153 
154 err_kfree:
155 	kfree(q_msg);
156 
157 	return err;
158 }
159 
160 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
161 /**
162  * idpf_ptp_is_mb_msg - Check if the message is PTP-related
163  * @op: virtchnl opcode
164  *
165  * Return: true if msg is PTP-related, false otherwise.
166  */
idpf_ptp_is_mb_msg(u32 op)167 static bool idpf_ptp_is_mb_msg(u32 op)
168 {
169 	switch (op) {
170 	case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
171 	case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
172 	case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
173 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
174 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
175 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
176 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
177 		return true;
178 	default:
179 		return false;
180 	}
181 }
182 
183 /**
184  * idpf_prepare_ptp_mb_msg - Prepare PTP related message
185  *
186  * @adapter: Driver specific private structure
187  * @op: virtchnl opcode
188  * @ctlq_msg: Corresponding control queue message
189  */
idpf_prepare_ptp_mb_msg(struct idpf_adapter * adapter,u32 op,struct idpf_ctlq_msg * ctlq_msg)190 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
191 				    struct idpf_ctlq_msg *ctlq_msg)
192 {
193 	/* If the message is PTP-related and the secondary mailbox is available,
194 	 * send the message through the secondary mailbox.
195 	 */
196 	if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
197 		return;
198 
199 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
200 	ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
201 	ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
202 }
203 #else /* !CONFIG_PTP_1588_CLOCK */
idpf_prepare_ptp_mb_msg(struct idpf_adapter * adapter,u32 op,struct idpf_ctlq_msg * ctlq_msg)204 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
205 				    struct idpf_ctlq_msg *ctlq_msg)
206 { }
207 #endif /* CONFIG_PTP_1588_CLOCK */
208 
209 /**
210  * idpf_send_mb_msg - Send message over mailbox
211  * @adapter: driver specific private structure
212  * @asq: control queue to send message to
213  * @op: virtchnl opcode
214  * @msg_size: size of the payload
215  * @msg: pointer to buffer holding the payload
216  * @cookie: unique SW generated cookie per message
217  *
218  * Will prepare the control queue message and initiates the send api
219  *
220  * Return: 0 on success, negative on failure
221  */
idpf_send_mb_msg(struct idpf_adapter * adapter,struct idpf_ctlq_info * asq,u32 op,u16 msg_size,u8 * msg,u16 cookie)222 int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
223 		     u32 op, u16 msg_size, u8 *msg, u16 cookie)
224 {
225 	struct idpf_ctlq_msg *ctlq_msg;
226 	struct idpf_dma_mem *dma_mem;
227 	int err;
228 
229 	/* If we are here and a reset is detected nothing much can be
230 	 * done. This thread should silently abort and expected to
231 	 * be corrected with a new run either by user or driver
232 	 * flows after reset
233 	 */
234 	if (idpf_is_reset_detected(adapter))
235 		return 0;
236 
237 	err = idpf_mb_clean(adapter, asq);
238 	if (err)
239 		return err;
240 
241 	ctlq_msg = kzalloc_obj(*ctlq_msg, GFP_ATOMIC);
242 	if (!ctlq_msg)
243 		return -ENOMEM;
244 
245 	dma_mem = kzalloc_obj(*dma_mem, GFP_ATOMIC);
246 	if (!dma_mem) {
247 		err = -ENOMEM;
248 		goto dma_mem_error;
249 	}
250 
251 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
252 	ctlq_msg->func_id = 0;
253 
254 	idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
255 
256 	ctlq_msg->data_len = msg_size;
257 	ctlq_msg->cookie.mbx.chnl_opcode = op;
258 	ctlq_msg->cookie.mbx.chnl_retval = 0;
259 	dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
260 	dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
261 					 &dma_mem->pa, GFP_ATOMIC);
262 	if (!dma_mem->va) {
263 		err = -ENOMEM;
264 		goto dma_alloc_error;
265 	}
266 
267 	/* It's possible we're just sending an opcode but no buffer */
268 	if (msg && msg_size)
269 		memcpy(dma_mem->va, msg, msg_size);
270 	ctlq_msg->ctx.indirect.payload = dma_mem;
271 	ctlq_msg->ctx.sw_cookie.data = cookie;
272 
273 	err = idpf_ctlq_send(&adapter->hw, asq, 1, ctlq_msg);
274 	if (err)
275 		goto send_error;
276 
277 	return 0;
278 
279 send_error:
280 	dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
281 			  dma_mem->pa);
282 dma_alloc_error:
283 	kfree(dma_mem);
284 dma_mem_error:
285 	kfree(ctlq_msg);
286 
287 	return err;
288 }
289 
290 /* API for virtchnl "transaction" support ("xn" for short). */
291 
292 /**
293  * idpf_vc_xn_lock - Request exclusive access to vc transaction
294  * @xn: struct idpf_vc_xn* to access
295  */
296 #define idpf_vc_xn_lock(xn)			\
297 	spin_lock(&(xn)->lock)
298 
299 /**
300  * idpf_vc_xn_unlock - Release exclusive access to vc transaction
301  * @xn: struct idpf_vc_xn* to access
302  */
303 #define idpf_vc_xn_unlock(xn)		\
304 	spin_unlock(&(xn)->lock)
305 
306 /**
307  * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
308  * reset the transaction state.
309  * @xn: struct idpf_vc_xn to update
310  */
idpf_vc_xn_release_bufs(struct idpf_vc_xn * xn)311 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
312 {
313 	xn->reply.iov_base = NULL;
314 	xn->reply.iov_len = 0;
315 
316 	if (xn->state != IDPF_VC_XN_SHUTDOWN)
317 		xn->state = IDPF_VC_XN_IDLE;
318 }
319 
320 /**
321  * idpf_vc_xn_init - Initialize virtchnl transaction object
322  * @vcxn_mngr: pointer to vc transaction manager struct
323  */
idpf_vc_xn_init(struct idpf_vc_xn_manager * vcxn_mngr)324 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
325 {
326 	int i;
327 
328 	spin_lock_init(&vcxn_mngr->xn_bm_lock);
329 
330 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
331 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
332 
333 		xn->state = IDPF_VC_XN_IDLE;
334 		xn->idx = i;
335 		idpf_vc_xn_release_bufs(xn);
336 		spin_lock_init(&xn->lock);
337 		init_completion(&xn->completed);
338 	}
339 
340 	bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
341 }
342 
343 /**
344  * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
345  * @vcxn_mngr: pointer to vc transaction manager struct
346  *
347  * All waiting threads will be woken-up and their transaction aborted. Further
348  * operations on that object will fail.
349  */
idpf_vc_xn_shutdown(struct idpf_vc_xn_manager * vcxn_mngr)350 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
351 {
352 	int i;
353 
354 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
355 	bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
356 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
357 
358 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
359 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
360 
361 		idpf_vc_xn_lock(xn);
362 		xn->state = IDPF_VC_XN_SHUTDOWN;
363 		idpf_vc_xn_release_bufs(xn);
364 		idpf_vc_xn_unlock(xn);
365 		complete_all(&xn->completed);
366 	}
367 }
368 
369 /**
370  * idpf_vc_xn_pop_free - Pop a free transaction from free list
371  * @vcxn_mngr: transaction manager to pop from
372  *
373  * Returns NULL if no free transactions
374  */
375 static
idpf_vc_xn_pop_free(struct idpf_vc_xn_manager * vcxn_mngr)376 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
377 {
378 	struct idpf_vc_xn *xn = NULL;
379 	unsigned long free_idx;
380 
381 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
382 	free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
383 	if (free_idx == IDPF_VC_XN_RING_LEN)
384 		goto do_unlock;
385 
386 	clear_bit(free_idx, vcxn_mngr->free_xn_bm);
387 	xn = &vcxn_mngr->ring[free_idx];
388 	xn->salt = vcxn_mngr->salt++;
389 
390 do_unlock:
391 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
392 
393 	return xn;
394 }
395 
396 /**
397  * idpf_vc_xn_push_free - Push a free transaction to free list
398  * @vcxn_mngr: transaction manager to push to
399  * @xn: transaction to push
400  */
idpf_vc_xn_push_free(struct idpf_vc_xn_manager * vcxn_mngr,struct idpf_vc_xn * xn)401 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
402 				 struct idpf_vc_xn *xn)
403 {
404 	idpf_vc_xn_release_bufs(xn);
405 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
406 	set_bit(xn->idx, vcxn_mngr->free_xn_bm);
407 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
408 }
409 
410 /**
411  * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
412  * @adapter: driver specific private structure with vcxn_mngr
413  * @params: parameters for this particular transaction including
414  *   -vc_op: virtchannel operation to send
415  *   -send_buf: kvec iov for send buf and len
416  *   -recv_buf: kvec iov for recv buf and len (ignored if NULL)
417  *   -timeout_ms: timeout waiting for a reply (milliseconds)
418  *   -async: don't wait for message reply, will lose caller context
419  *   -async_handler: callback to handle async replies
420  *
421  * @returns >= 0 for success, the size of the initial reply (may or may not be
422  * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
423  * error.
424  */
idpf_vc_xn_exec(struct idpf_adapter * adapter,const struct idpf_vc_xn_params * params)425 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
426 			const struct idpf_vc_xn_params *params)
427 {
428 	const struct kvec *send_buf = &params->send_buf;
429 	struct idpf_vc_xn *xn;
430 	ssize_t retval;
431 	u16 cookie;
432 
433 	xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
434 	/* no free transactions available */
435 	if (!xn)
436 		return -ENOSPC;
437 
438 	idpf_vc_xn_lock(xn);
439 	if (xn->state == IDPF_VC_XN_SHUTDOWN) {
440 		retval = -ENXIO;
441 		goto only_unlock;
442 	} else if (xn->state != IDPF_VC_XN_IDLE) {
443 		/* We're just going to clobber this transaction even though
444 		 * it's not IDLE. If we don't reuse it we could theoretically
445 		 * eventually leak all the free transactions and not be able to
446 		 * send any messages. At least this way we make an attempt to
447 		 * remain functional even though something really bad is
448 		 * happening that's corrupting what was supposed to be free
449 		 * transactions.
450 		 */
451 		WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
452 			  xn->idx, xn->vc_op);
453 	}
454 
455 	xn->reply = params->recv_buf;
456 	xn->reply_sz = 0;
457 	xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
458 	xn->vc_op = params->vc_op;
459 	xn->async_handler = params->async_handler;
460 	idpf_vc_xn_unlock(xn);
461 
462 	if (!params->async)
463 		reinit_completion(&xn->completed);
464 	cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
465 		 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
466 
467 	retval = idpf_send_mb_msg(adapter, adapter->hw.asq, params->vc_op,
468 				  send_buf->iov_len, send_buf->iov_base,
469 				  cookie);
470 	if (retval) {
471 		idpf_vc_xn_lock(xn);
472 		goto release_and_unlock;
473 	}
474 
475 	if (params->async)
476 		return 0;
477 
478 	wait_for_completion_timeout(&xn->completed,
479 				    msecs_to_jiffies(params->timeout_ms));
480 
481 	/* No need to check the return value; we check the final state of the
482 	 * transaction below. It's possible the transaction actually gets more
483 	 * timeout than specified if we get preempted here but after
484 	 * wait_for_completion_timeout returns. This should be non-issue
485 	 * however.
486 	 */
487 	idpf_vc_xn_lock(xn);
488 	switch (xn->state) {
489 	case IDPF_VC_XN_SHUTDOWN:
490 		retval = -ENXIO;
491 		goto only_unlock;
492 	case IDPF_VC_XN_WAITING:
493 		dev_notice_ratelimited(&adapter->pdev->dev,
494 				       "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
495 				       params->vc_op, cookie, xn->vc_op,
496 				       xn->salt, params->timeout_ms);
497 		retval = -ETIME;
498 		break;
499 	case IDPF_VC_XN_COMPLETED_SUCCESS:
500 		retval = xn->reply_sz;
501 		break;
502 	case IDPF_VC_XN_COMPLETED_FAILED:
503 		dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
504 				       params->vc_op);
505 		retval = -EIO;
506 		break;
507 	default:
508 		/* Invalid state. */
509 		WARN_ON_ONCE(1);
510 		retval = -EIO;
511 		break;
512 	}
513 
514 release_and_unlock:
515 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
516 	/* If we receive a VC reply after here, it will be dropped. */
517 only_unlock:
518 	idpf_vc_xn_unlock(xn);
519 
520 	return retval;
521 }
522 
523 /**
524  * idpf_vc_xn_forward_async - Handle async reply receives
525  * @adapter: private data struct
526  * @xn: transaction to handle
527  * @ctlq_msg: corresponding ctlq_msg
528  *
529  * For async sends we're going to lose the caller's context so, if an
530  * async_handler was provided, it can deal with the reply, otherwise we'll just
531  * check and report if there is an error.
532  */
533 static int
idpf_vc_xn_forward_async(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)534 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
535 			 const struct idpf_ctlq_msg *ctlq_msg)
536 {
537 	int err = 0;
538 
539 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
540 		dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
541 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
542 		xn->reply_sz = 0;
543 		err = -EINVAL;
544 		goto release_bufs;
545 	}
546 
547 	if (xn->async_handler) {
548 		err = xn->async_handler(adapter, xn, ctlq_msg);
549 		goto release_bufs;
550 	}
551 
552 	if (ctlq_msg->cookie.mbx.chnl_retval) {
553 		xn->reply_sz = 0;
554 		dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
555 				    ctlq_msg->cookie.mbx.chnl_opcode);
556 		err = -EINVAL;
557 	}
558 
559 release_bufs:
560 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
561 
562 	return err;
563 }
564 
565 /**
566  * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
567  * @adapter: driver specific private structure with vcxn_mngr
568  * @ctlq_msg: controlq message to send back to receiving thread
569  */
570 static int
idpf_vc_xn_forward_reply(struct idpf_adapter * adapter,const struct idpf_ctlq_msg * ctlq_msg)571 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
572 			 const struct idpf_ctlq_msg *ctlq_msg)
573 {
574 	const void *payload = NULL;
575 	size_t payload_size = 0;
576 	struct idpf_vc_xn *xn;
577 	u16 msg_info;
578 	int err = 0;
579 	u16 xn_idx;
580 	u16 salt;
581 
582 	msg_info = ctlq_msg->ctx.sw_cookie.data;
583 	xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
584 	if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
585 		dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
586 				    xn_idx);
587 		return -EINVAL;
588 	}
589 	xn = &adapter->vcxn_mngr->ring[xn_idx];
590 	idpf_vc_xn_lock(xn);
591 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
592 	if (xn->salt != salt) {
593 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
594 				    xn->vc_op, xn->salt, xn->state,
595 				    ctlq_msg->cookie.mbx.chnl_opcode, salt);
596 		idpf_vc_xn_unlock(xn);
597 		return -EINVAL;
598 	}
599 
600 	switch (xn->state) {
601 	case IDPF_VC_XN_WAITING:
602 		/* success */
603 		break;
604 	case IDPF_VC_XN_IDLE:
605 		dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
606 				    ctlq_msg->cookie.mbx.chnl_opcode);
607 		err = -EINVAL;
608 		goto out_unlock;
609 	case IDPF_VC_XN_SHUTDOWN:
610 		/* ENXIO is a bit special here as the recv msg loop uses that
611 		 * know if it should stop trying to clean the ring if we lost
612 		 * the virtchnl. We need to stop playing with registers and
613 		 * yield.
614 		 */
615 		err = -ENXIO;
616 		goto out_unlock;
617 	case IDPF_VC_XN_ASYNC:
618 		/* Set reply_sz from the actual payload so that async_handler
619 		 * can evaluate the response.
620 		 */
621 		xn->reply_sz = ctlq_msg->data_len;
622 		err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
623 		idpf_vc_xn_unlock(xn);
624 		return err;
625 	default:
626 		dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
627 				    ctlq_msg->cookie.mbx.chnl_opcode);
628 		err = -EBUSY;
629 		goto out_unlock;
630 	}
631 
632 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
633 		dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
634 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
635 		xn->reply_sz = 0;
636 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
637 		err = -EINVAL;
638 		goto out_unlock;
639 	}
640 
641 	if (ctlq_msg->cookie.mbx.chnl_retval) {
642 		xn->reply_sz = 0;
643 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
644 		err = -EINVAL;
645 		goto out_unlock;
646 	}
647 
648 	if (ctlq_msg->data_len) {
649 		payload = ctlq_msg->ctx.indirect.payload->va;
650 		payload_size = ctlq_msg->data_len;
651 	}
652 
653 	xn->reply_sz = payload_size;
654 	xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
655 
656 	if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
657 		memcpy(xn->reply.iov_base, payload,
658 		       min_t(size_t, xn->reply.iov_len, payload_size));
659 
660 out_unlock:
661 	idpf_vc_xn_unlock(xn);
662 	/* we _cannot_ hold lock while calling complete */
663 	complete(&xn->completed);
664 
665 	return err;
666 }
667 
668 /**
669  * idpf_recv_mb_msg - Receive message over mailbox
670  * @adapter: driver specific private structure
671  * @arq: control queue to receive message from
672  *
673  * Will receive control queue message and posts the receive buffer.
674  *
675  * Return: 0 on success and negative on failure.
676  */
idpf_recv_mb_msg(struct idpf_adapter * adapter,struct idpf_ctlq_info * arq)677 int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq)
678 {
679 	struct idpf_ctlq_msg ctlq_msg;
680 	struct idpf_dma_mem *dma_mem;
681 	int post_err, err;
682 	u16 num_recv;
683 
684 	while (1) {
685 		/* This will get <= num_recv messages and output how many
686 		 * actually received on num_recv.
687 		 */
688 		num_recv = 1;
689 		err = idpf_ctlq_recv(arq, &num_recv, &ctlq_msg);
690 		if (err || !num_recv)
691 			break;
692 
693 		if (ctlq_msg.data_len) {
694 			dma_mem = ctlq_msg.ctx.indirect.payload;
695 		} else {
696 			dma_mem = NULL;
697 			num_recv = 0;
698 		}
699 
700 		if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
701 			idpf_recv_event_msg(adapter, &ctlq_msg);
702 		else
703 			err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
704 
705 		post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, arq,
706 						   &num_recv, &dma_mem);
707 
708 		/* If post failed clear the only buffer we supplied */
709 		if (post_err) {
710 			if (dma_mem)
711 				dma_free_coherent(&adapter->pdev->dev,
712 						  dma_mem->size, dma_mem->va,
713 						  dma_mem->pa);
714 			break;
715 		}
716 
717 		/* virtchnl trying to shutdown, stop cleaning */
718 		if (err == -ENXIO)
719 			break;
720 	}
721 
722 	return err;
723 }
724 
725 struct idpf_chunked_msg_params {
726 	u32			(*prepare_msg)(u32 vport_id, void *buf,
727 					       const void *pos, u32 num);
728 
729 	const void		*chunks;
730 	u32			num_chunks;
731 
732 	u32			chunk_sz;
733 	u32			config_sz;
734 
735 	u32			vc_op;
736 	u32			vport_id;
737 };
738 
idpf_alloc_queue_set(struct idpf_adapter * adapter,struct idpf_q_vec_rsrc * qv_rsrc,u32 vport_id,u32 num)739 struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
740 					    struct idpf_q_vec_rsrc *qv_rsrc,
741 					    u32 vport_id, u32 num)
742 {
743 	struct idpf_queue_set *qp;
744 
745 	qp = kzalloc_flex(*qp, qs, num);
746 	if (!qp)
747 		return NULL;
748 
749 	qp->adapter = adapter;
750 	qp->qv_rsrc = qv_rsrc;
751 	qp->vport_id = vport_id;
752 	qp->num = num;
753 
754 	return qp;
755 }
756 
757 /**
758  * idpf_send_chunked_msg - send VC message consisting of chunks
759  * @adapter: Driver specific private structure
760  * @params: message params
761  *
762  * Helper function for preparing a message describing queues to be enabled
763  * or disabled.
764  *
765  * Return: the total size of the prepared message.
766  */
idpf_send_chunked_msg(struct idpf_adapter * adapter,const struct idpf_chunked_msg_params * params)767 static int idpf_send_chunked_msg(struct idpf_adapter *adapter,
768 				 const struct idpf_chunked_msg_params *params)
769 {
770 	struct idpf_vc_xn_params xn_params = {
771 		.vc_op		= params->vc_op,
772 		.timeout_ms	= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
773 	};
774 	const void *pos = params->chunks;
775 	u32 num_chunks, num_msgs, buf_sz;
776 	void *buf __free(kfree) = NULL;
777 	u32 totqs = params->num_chunks;
778 	u32 vid = params->vport_id;
779 
780 	num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
781 						 params->chunk_sz), totqs);
782 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
783 
784 	buf_sz = params->config_sz + num_chunks * params->chunk_sz;
785 	buf = kzalloc(buf_sz, GFP_KERNEL);
786 	if (!buf)
787 		return -ENOMEM;
788 
789 	xn_params.send_buf.iov_base = buf;
790 
791 	for (u32 i = 0; i < num_msgs; i++) {
792 		ssize_t reply_sz;
793 
794 		memset(buf, 0, buf_sz);
795 		xn_params.send_buf.iov_len = buf_sz;
796 
797 		if (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz)
798 			return -EINVAL;
799 
800 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
801 		if (reply_sz < 0)
802 			return reply_sz;
803 
804 		pos += num_chunks * params->chunk_sz;
805 		totqs -= num_chunks;
806 
807 		num_chunks = min(num_chunks, totqs);
808 		buf_sz = params->config_sz + num_chunks * params->chunk_sz;
809 	}
810 
811 	return 0;
812 }
813 
814 /**
815  * idpf_wait_for_marker_event_set - wait for software marker response for
816  *				    selected Tx queues
817  * @qs: set of the Tx queues
818  *
819  * Return: 0 success, -errno on failure.
820  */
idpf_wait_for_marker_event_set(const struct idpf_queue_set * qs)821 static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
822 {
823 	struct net_device *netdev;
824 	struct idpf_tx_queue *txq;
825 	bool markers_rcvd = true;
826 
827 	for (u32 i = 0; i < qs->num; i++) {
828 		switch (qs->qs[i].type) {
829 		case VIRTCHNL2_QUEUE_TYPE_TX:
830 			txq = qs->qs[i].txq;
831 
832 			netdev = txq->netdev;
833 
834 			idpf_queue_set(SW_MARKER, txq);
835 			idpf_wait_for_sw_marker_completion(txq);
836 			markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
837 			break;
838 		default:
839 			break;
840 		}
841 	}
842 
843 	if (!markers_rcvd) {
844 		netdev_warn(netdev,
845 			    "Failed to receive marker packets\n");
846 		return -ETIMEDOUT;
847 	}
848 
849 	return 0;
850 }
851 
852 /**
853  * idpf_wait_for_marker_event - wait for software marker response
854  * @vport: virtual port data structure
855  *
856  * Return: 0 success, negative on failure.
857  **/
idpf_wait_for_marker_event(struct idpf_vport * vport)858 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
859 {
860 	struct idpf_queue_set *qs __free(kfree) = NULL;
861 
862 	qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
863 				  vport->vport_id, vport->num_txq);
864 	if (!qs)
865 		return -ENOMEM;
866 
867 	for (u32 i = 0; i < qs->num; i++) {
868 		qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX;
869 		qs->qs[i].txq = vport->txqs[i];
870 	}
871 
872 	return idpf_wait_for_marker_event_set(qs);
873 }
874 
875 /**
876  * idpf_send_ver_msg - send virtchnl version message
877  * @adapter: Driver specific private structure
878  *
879  * Send virtchnl version message.  Returns 0 on success, negative on failure.
880  */
idpf_send_ver_msg(struct idpf_adapter * adapter)881 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
882 {
883 	struct idpf_vc_xn_params xn_params = {};
884 	struct virtchnl2_version_info vvi;
885 	ssize_t reply_sz;
886 	u32 major, minor;
887 	int err = 0;
888 
889 	if (adapter->virt_ver_maj) {
890 		vvi.major = cpu_to_le32(adapter->virt_ver_maj);
891 		vvi.minor = cpu_to_le32(adapter->virt_ver_min);
892 	} else {
893 		vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
894 		vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
895 	}
896 
897 	xn_params.vc_op = VIRTCHNL2_OP_VERSION;
898 	xn_params.send_buf.iov_base = &vvi;
899 	xn_params.send_buf.iov_len = sizeof(vvi);
900 	xn_params.recv_buf = xn_params.send_buf;
901 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
902 
903 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
904 	if (reply_sz < 0)
905 		return reply_sz;
906 	if (reply_sz < sizeof(vvi))
907 		return -EIO;
908 
909 	major = le32_to_cpu(vvi.major);
910 	minor = le32_to_cpu(vvi.minor);
911 
912 	if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
913 		dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
914 		return -EINVAL;
915 	}
916 
917 	if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
918 	    minor > IDPF_VIRTCHNL_VERSION_MINOR)
919 		dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
920 
921 	/* If we have a mismatch, resend version to update receiver on what
922 	 * version we will use.
923 	 */
924 	if (!adapter->virt_ver_maj &&
925 	    major != IDPF_VIRTCHNL_VERSION_MAJOR &&
926 	    minor != IDPF_VIRTCHNL_VERSION_MINOR)
927 		err = -EAGAIN;
928 
929 	adapter->virt_ver_maj = major;
930 	adapter->virt_ver_min = minor;
931 
932 	return err;
933 }
934 
935 /**
936  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
937  * @adapter: Driver specific private structure
938  *
939  * Send virtchl get capabilities message. Returns 0 on success, negative on
940  * failure.
941  */
idpf_send_get_caps_msg(struct idpf_adapter * adapter)942 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
943 {
944 	struct virtchnl2_get_capabilities caps = {};
945 	struct idpf_vc_xn_params xn_params = {};
946 	ssize_t reply_sz;
947 
948 	caps.csum_caps =
949 		cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4	|
950 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
951 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
952 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
953 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
954 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
955 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
956 			    VIRTCHNL2_CAP_RX_CSUM_L3_IPV4	|
957 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
958 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
959 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
960 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
961 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
962 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
963 			    VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
964 			    VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
965 			    VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
966 			    VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
967 			    VIRTCHNL2_CAP_RX_CSUM_GENERIC);
968 
969 	caps.seg_caps =
970 		cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP		|
971 			    VIRTCHNL2_CAP_SEG_IPV4_UDP		|
972 			    VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
973 			    VIRTCHNL2_CAP_SEG_IPV6_TCP		|
974 			    VIRTCHNL2_CAP_SEG_IPV6_UDP		|
975 			    VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
976 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
977 
978 	caps.rss_caps =
979 		cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP		|
980 			    VIRTCHNL2_FLOW_IPV4_UDP		|
981 			    VIRTCHNL2_FLOW_IPV4_SCTP		|
982 			    VIRTCHNL2_FLOW_IPV4_OTHER		|
983 			    VIRTCHNL2_FLOW_IPV6_TCP		|
984 			    VIRTCHNL2_FLOW_IPV6_UDP		|
985 			    VIRTCHNL2_FLOW_IPV6_SCTP		|
986 			    VIRTCHNL2_FLOW_IPV6_OTHER);
987 
988 	caps.hsplit_caps =
989 		cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|
990 			    VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
991 
992 	caps.rsc_caps =
993 		cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP		|
994 			    VIRTCHNL2_CAP_RSC_IPV6_TCP);
995 
996 	caps.other_caps =
997 		cpu_to_le64(VIRTCHNL2_CAP_SRIOV			|
998 			    VIRTCHNL2_CAP_RDMA                  |
999 			    VIRTCHNL2_CAP_LAN_MEMORY_REGIONS	|
1000 			    VIRTCHNL2_CAP_MACFILTER		|
1001 			    VIRTCHNL2_CAP_SPLITQ_QSCHED		|
1002 			    VIRTCHNL2_CAP_PROMISC		|
1003 			    VIRTCHNL2_CAP_LOOPBACK		|
1004 			    VIRTCHNL2_CAP_PTP);
1005 
1006 	xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
1007 	xn_params.send_buf.iov_base = &caps;
1008 	xn_params.send_buf.iov_len = sizeof(caps);
1009 	xn_params.recv_buf.iov_base = &adapter->caps;
1010 	xn_params.recv_buf.iov_len = sizeof(adapter->caps);
1011 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1012 
1013 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1014 	if (reply_sz < 0)
1015 		return reply_sz;
1016 	if (reply_sz < sizeof(adapter->caps))
1017 		return -EIO;
1018 
1019 	return 0;
1020 }
1021 
1022 /**
1023  * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
1024  * @adapter: Driver specific private struct
1025  *
1026  * Return: 0 on success or error code on failure.
1027  */
idpf_send_get_lan_memory_regions(struct idpf_adapter * adapter)1028 static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
1029 {
1030 	struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
1031 	struct idpf_vc_xn_params xn_params = {
1032 		.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
1033 		.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
1034 		.send_buf.iov_len =
1035 			sizeof(struct virtchnl2_get_lan_memory_regions) +
1036 			sizeof(struct virtchnl2_mem_region),
1037 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
1038 	};
1039 	int num_regions, size;
1040 	struct idpf_hw *hw;
1041 	ssize_t reply_sz;
1042 	int err = 0;
1043 
1044 	rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
1045 	if (!rcvd_regions)
1046 		return -ENOMEM;
1047 
1048 	xn_params.recv_buf.iov_base = rcvd_regions;
1049 	rcvd_regions->num_memory_regions = cpu_to_le16(1);
1050 	xn_params.send_buf.iov_base = rcvd_regions;
1051 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1052 	if (reply_sz < 0)
1053 		return reply_sz;
1054 
1055 	num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
1056 	size = struct_size(rcvd_regions, mem_reg, num_regions);
1057 	if (reply_sz < size)
1058 		return -EIO;
1059 
1060 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
1061 		return -EINVAL;
1062 
1063 	hw = &adapter->hw;
1064 	hw->lan_regs = kzalloc_objs(*hw->lan_regs, num_regions);
1065 	if (!hw->lan_regs)
1066 		return -ENOMEM;
1067 
1068 	for (int i = 0; i < num_regions; i++) {
1069 		hw->lan_regs[i].addr_len =
1070 			le64_to_cpu(rcvd_regions->mem_reg[i].size);
1071 		hw->lan_regs[i].addr_start =
1072 			le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
1073 	}
1074 	hw->num_lan_regs = num_regions;
1075 
1076 	return err;
1077 }
1078 
1079 /**
1080  * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
1081  * @adapter: Driver specific private structure
1082  *
1083  * Called when idpf_send_get_lan_memory_regions is not supported. This will
1084  * calculate the offsets and sizes for the regions before, in between, and
1085  * after the mailbox and rstat MMIO mappings.
1086  *
1087  * Return: 0 on success or error code on failure.
1088  */
idpf_calc_remaining_mmio_regs(struct idpf_adapter * adapter)1089 static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
1090 {
1091 	struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
1092 	struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
1093 	struct idpf_hw *hw = &adapter->hw;
1094 
1095 	hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
1096 	hw->lan_regs = kzalloc_objs(*hw->lan_regs, hw->num_lan_regs);
1097 	if (!hw->lan_regs)
1098 		return -ENOMEM;
1099 
1100 	/* Region preceding mailbox */
1101 	hw->lan_regs[0].addr_start = 0;
1102 	hw->lan_regs[0].addr_len = mbx_reg->start;
1103 	/* Region between mailbox and rstat */
1104 	hw->lan_regs[1].addr_start = mbx_reg->end + 1;
1105 	hw->lan_regs[1].addr_len = rstat_reg->start -
1106 					hw->lan_regs[1].addr_start;
1107 	/* Region after rstat */
1108 	hw->lan_regs[2].addr_start = rstat_reg->end + 1;
1109 	hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
1110 					hw->lan_regs[2].addr_start;
1111 
1112 	return 0;
1113 }
1114 
1115 /**
1116  * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
1117  * @adapter: Driver specific private structure
1118  *
1119  * Return: 0 on success or error code on failure.
1120  */
idpf_map_lan_mmio_regs(struct idpf_adapter * adapter)1121 static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
1122 {
1123 	struct pci_dev *pdev = adapter->pdev;
1124 	struct idpf_hw *hw = &adapter->hw;
1125 	resource_size_t res_start;
1126 
1127 	res_start = pci_resource_start(pdev, 0);
1128 
1129 	for (int i = 0; i < hw->num_lan_regs; i++) {
1130 		resource_size_t start;
1131 		long len;
1132 
1133 		len = hw->lan_regs[i].addr_len;
1134 		if (!len)
1135 			continue;
1136 		start = hw->lan_regs[i].addr_start + res_start;
1137 
1138 		hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
1139 		if (!hw->lan_regs[i].vaddr) {
1140 			pci_err(pdev, "failed to allocate BAR0 region\n");
1141 			return -ENOMEM;
1142 		}
1143 	}
1144 
1145 	return 0;
1146 }
1147 
1148 /**
1149  * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
1150  * @adapter: adapter info struct
1151  * @rule: Flow steering rule to add/delete
1152  * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
1153  *          VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
1154  *
1155  * Send ADD/DELETE flow steering virtchnl message and receive the result.
1156  *
1157  * Return: 0 on success, negative on failure.
1158  */
idpf_add_del_fsteer_filters(struct idpf_adapter * adapter,struct virtchnl2_flow_rule_add_del * rule,enum virtchnl2_op opcode)1159 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
1160 				struct virtchnl2_flow_rule_add_del *rule,
1161 				enum virtchnl2_op opcode)
1162 {
1163 	int rule_count = le32_to_cpu(rule->count);
1164 	struct idpf_vc_xn_params xn_params = {};
1165 	ssize_t reply_sz;
1166 
1167 	if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
1168 	    opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
1169 		return -EINVAL;
1170 
1171 	xn_params.vc_op = opcode;
1172 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1173 	xn_params.async = false;
1174 	xn_params.send_buf.iov_base = rule;
1175 	xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
1176 	xn_params.recv_buf.iov_base = rule;
1177 	xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
1178 
1179 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1180 	return reply_sz < 0 ? reply_sz : 0;
1181 }
1182 
1183 /**
1184  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
1185  * @adapter: Driver specific private structure
1186  * @max_q: vport max queue structure
1187  */
idpf_vport_alloc_max_qs(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)1188 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
1189 			    struct idpf_vport_max_q *max_q)
1190 {
1191 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1192 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1193 	u16 default_vports = idpf_get_default_vports(adapter);
1194 	u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q;
1195 
1196 	mutex_lock(&adapter->queue_lock);
1197 
1198 	/* Caps are device-wide. Give each vport an equal piece */
1199 	max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
1200 	max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
1201 	max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports;
1202 	max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports;
1203 
1204 	if (adapter->num_alloc_vports >= default_vports) {
1205 		max_rx_q = IDPF_MIN_Q;
1206 		max_tx_q = IDPF_MIN_Q;
1207 	}
1208 
1209 	/*
1210 	 * Harmonize the numbers. The current implementation always creates
1211 	 * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and
1212 	 * one completion queue for each Tx queue for best performance.
1213 	 * If less buffer or completion queues is available, cap the number
1214 	 * of the corresponding Rx/Tx queues.
1215 	 */
1216 	max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP);
1217 	max_tx_q = min(max_tx_q, max_compl_q);
1218 
1219 	max_q->max_rxq = max_rx_q;
1220 	max_q->max_txq = max_tx_q;
1221 	max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP;
1222 	max_q->max_complq = max_tx_q;
1223 
1224 	if (avail_queues->avail_rxq < max_q->max_rxq ||
1225 	    avail_queues->avail_txq < max_q->max_txq ||
1226 	    avail_queues->avail_bufq < max_q->max_bufq ||
1227 	    avail_queues->avail_complq < max_q->max_complq) {
1228 		mutex_unlock(&adapter->queue_lock);
1229 
1230 		return -EINVAL;
1231 	}
1232 
1233 	avail_queues->avail_rxq -= max_q->max_rxq;
1234 	avail_queues->avail_txq -= max_q->max_txq;
1235 	avail_queues->avail_bufq -= max_q->max_bufq;
1236 	avail_queues->avail_complq -= max_q->max_complq;
1237 
1238 	mutex_unlock(&adapter->queue_lock);
1239 
1240 	return 0;
1241 }
1242 
1243 /**
1244  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
1245  * @adapter: Driver specific private structure
1246  * @max_q: vport max queue structure
1247  */
idpf_vport_dealloc_max_qs(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)1248 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
1249 			       struct idpf_vport_max_q *max_q)
1250 {
1251 	struct idpf_avail_queue_info *avail_queues;
1252 
1253 	mutex_lock(&adapter->queue_lock);
1254 	avail_queues = &adapter->avail_queues;
1255 
1256 	avail_queues->avail_rxq += max_q->max_rxq;
1257 	avail_queues->avail_txq += max_q->max_txq;
1258 	avail_queues->avail_bufq += max_q->max_bufq;
1259 	avail_queues->avail_complq += max_q->max_complq;
1260 
1261 	mutex_unlock(&adapter->queue_lock);
1262 }
1263 
1264 /**
1265  * idpf_init_avail_queues - Initialize available queues on the device
1266  * @adapter: Driver specific private structure
1267  */
idpf_init_avail_queues(struct idpf_adapter * adapter)1268 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
1269 {
1270 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1271 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1272 
1273 	avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
1274 	avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
1275 	avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
1276 	avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
1277 }
1278 
1279 /**
1280  * idpf_vport_init_queue_reg_chunks - initialize queue register chunks
1281  * @vport_config: persistent vport structure to store the queue register info
1282  * @schunks: source chunks to copy data from
1283  *
1284  * Return: 0 on success, negative on failure.
1285  */
1286 static int
idpf_vport_init_queue_reg_chunks(struct idpf_vport_config * vport_config,struct virtchnl2_queue_reg_chunks * schunks)1287 idpf_vport_init_queue_reg_chunks(struct idpf_vport_config *vport_config,
1288 				 struct virtchnl2_queue_reg_chunks *schunks)
1289 {
1290 	struct idpf_queue_id_reg_info *q_info = &vport_config->qid_reg_info;
1291 	u16 num_chunks = le16_to_cpu(schunks->num_chunks);
1292 
1293 	kfree(q_info->queue_chunks);
1294 
1295 	q_info->queue_chunks = kzalloc_objs(*q_info->queue_chunks, num_chunks);
1296 	if (!q_info->queue_chunks) {
1297 		q_info->num_chunks = 0;
1298 		return -ENOMEM;
1299 	}
1300 
1301 	q_info->num_chunks = num_chunks;
1302 
1303 	for (u16 i = 0; i < num_chunks; i++) {
1304 		struct idpf_queue_id_reg_chunk *dchunk = &q_info->queue_chunks[i];
1305 		struct virtchnl2_queue_reg_chunk *schunk = &schunks->chunks[i];
1306 
1307 		dchunk->qtail_reg_start = le64_to_cpu(schunk->qtail_reg_start);
1308 		dchunk->qtail_reg_spacing = le32_to_cpu(schunk->qtail_reg_spacing);
1309 		dchunk->type = le32_to_cpu(schunk->type);
1310 		dchunk->start_queue_id = le32_to_cpu(schunk->start_queue_id);
1311 		dchunk->num_queues = le32_to_cpu(schunk->num_queues);
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 /**
1318  * idpf_get_reg_intr_vecs - Get vector queue register offset
1319  * @adapter: adapter structure to get the vector chunks
1320  * @reg_vals: Register offsets to store in
1321  *
1322  * Return: number of registers that got populated
1323  */
idpf_get_reg_intr_vecs(struct idpf_adapter * adapter,struct idpf_vec_regs * reg_vals)1324 int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
1325 			   struct idpf_vec_regs *reg_vals)
1326 {
1327 	struct virtchnl2_vector_chunks *chunks;
1328 	struct idpf_vec_regs reg_val;
1329 	u16 num_vchunks, num_vec;
1330 	int num_regs = 0, i, j;
1331 
1332 	chunks = &adapter->req_vec_chunks->vchunks;
1333 	num_vchunks = le16_to_cpu(chunks->num_vchunks);
1334 
1335 	for (j = 0; j < num_vchunks; j++) {
1336 		struct virtchnl2_vector_chunk *chunk;
1337 		u32 dynctl_reg_spacing;
1338 		u32 itrn_reg_spacing;
1339 
1340 		chunk = &chunks->vchunks[j];
1341 		num_vec = le16_to_cpu(chunk->num_vectors);
1342 		reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1343 		reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1344 		reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1345 
1346 		dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1347 		itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1348 
1349 		for (i = 0; i < num_vec; i++) {
1350 			reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1351 			reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1352 			reg_vals[num_regs].itrn_index_spacing =
1353 						reg_val.itrn_index_spacing;
1354 
1355 			reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1356 			reg_val.itrn_reg += itrn_reg_spacing;
1357 			num_regs++;
1358 		}
1359 	}
1360 
1361 	return num_regs;
1362 }
1363 
1364 /**
1365  * idpf_vport_get_q_reg - Get the queue registers for the vport
1366  * @reg_vals: register values needing to be set
1367  * @num_regs: amount we expect to fill
1368  * @q_type: queue model
1369  * @chunks: queue regs received over mailbox
1370  *
1371  * This function parses the queue register offsets from the queue register
1372  * chunk information, with a specific queue type and stores it into the array
1373  * passed as an argument. It returns the actual number of queue registers that
1374  * are filled.
1375  */
idpf_vport_get_q_reg(u32 * reg_vals,int num_regs,u32 q_type,struct idpf_queue_id_reg_info * chunks)1376 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1377 				struct idpf_queue_id_reg_info *chunks)
1378 {
1379 	u16 num_chunks = chunks->num_chunks;
1380 	int reg_filled = 0, i;
1381 	u32 reg_val;
1382 
1383 	while (num_chunks--) {
1384 		struct idpf_queue_id_reg_chunk *chunk;
1385 		u16 num_q;
1386 
1387 		chunk = &chunks->queue_chunks[num_chunks];
1388 		if (chunk->type != q_type)
1389 			continue;
1390 
1391 		num_q = chunk->num_queues;
1392 		reg_val = chunk->qtail_reg_start;
1393 		for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1394 			reg_vals[reg_filled++] = reg_val;
1395 			reg_val += chunk->qtail_reg_spacing;
1396 		}
1397 	}
1398 
1399 	return reg_filled;
1400 }
1401 
1402 /**
1403  * __idpf_queue_reg_init - initialize queue registers
1404  * @vport: virtual port structure
1405  * @rsrc: pointer to queue and vector resources
1406  * @reg_vals: registers we are initializing
1407  * @num_regs: how many registers there are in total
1408  * @q_type: queue model
1409  *
1410  * Return number of queues that are initialized
1411  */
__idpf_queue_reg_init(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc,u32 * reg_vals,int num_regs,u32 q_type)1412 static int __idpf_queue_reg_init(struct idpf_vport *vport,
1413 				 struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals,
1414 				 int num_regs, u32 q_type)
1415 {
1416 	struct idpf_adapter *adapter = vport->adapter;
1417 	int i, j, k = 0;
1418 
1419 	switch (q_type) {
1420 	case VIRTCHNL2_QUEUE_TYPE_TX:
1421 		for (i = 0; i < rsrc->num_txq_grp; i++) {
1422 			struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
1423 
1424 			for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1425 				tx_qgrp->txqs[j]->tail =
1426 					idpf_get_reg_addr(adapter, reg_vals[k]);
1427 		}
1428 		break;
1429 	case VIRTCHNL2_QUEUE_TYPE_RX:
1430 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
1431 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1432 			u16 num_rxq = rx_qgrp->singleq.num_rxq;
1433 
1434 			for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1435 				struct idpf_rx_queue *q;
1436 
1437 				q = rx_qgrp->singleq.rxqs[j];
1438 				q->tail = idpf_get_reg_addr(adapter,
1439 							    reg_vals[k]);
1440 			}
1441 		}
1442 		break;
1443 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1444 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
1445 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1446 			u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
1447 
1448 			for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1449 				struct idpf_buf_queue *q;
1450 
1451 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1452 				q->tail = idpf_get_reg_addr(adapter,
1453 							    reg_vals[k]);
1454 			}
1455 		}
1456 		break;
1457 	default:
1458 		break;
1459 	}
1460 
1461 	return k;
1462 }
1463 
1464 /**
1465  * idpf_queue_reg_init - initialize queue registers
1466  * @vport: virtual port structure
1467  * @rsrc: pointer to queue and vector resources
1468  * @chunks: queue registers received over mailbox
1469  *
1470  * Return: 0 on success, negative on failure
1471  */
idpf_queue_reg_init(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc,struct idpf_queue_id_reg_info * chunks)1472 int idpf_queue_reg_init(struct idpf_vport *vport,
1473 			struct idpf_q_vec_rsrc *rsrc,
1474 			struct idpf_queue_id_reg_info *chunks)
1475 {
1476 	int num_regs, ret = 0;
1477 	u32 *reg_vals;
1478 
1479 	/* We may never deal with more than 256 same type of queues */
1480 	reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1481 	if (!reg_vals)
1482 		return -ENOMEM;
1483 
1484 	/* Initialize Tx queue tail register address */
1485 	num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1486 					VIRTCHNL2_QUEUE_TYPE_TX,
1487 					chunks);
1488 	if (num_regs < rsrc->num_txq) {
1489 		ret = -EINVAL;
1490 		goto free_reg_vals;
1491 	}
1492 
1493 	num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
1494 					 VIRTCHNL2_QUEUE_TYPE_TX);
1495 	if (num_regs < rsrc->num_txq) {
1496 		ret = -EINVAL;
1497 		goto free_reg_vals;
1498 	}
1499 
1500 	/* Initialize Rx/buffer queue tail register address based on Rx queue
1501 	 * model
1502 	 */
1503 	if (idpf_is_queue_model_split(rsrc->rxq_model)) {
1504 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1505 						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1506 						chunks);
1507 		if (num_regs < rsrc->num_bufq) {
1508 			ret = -EINVAL;
1509 			goto free_reg_vals;
1510 		}
1511 
1512 		num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
1513 						 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1514 		if (num_regs < rsrc->num_bufq) {
1515 			ret = -EINVAL;
1516 			goto free_reg_vals;
1517 		}
1518 	} else {
1519 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1520 						VIRTCHNL2_QUEUE_TYPE_RX,
1521 						chunks);
1522 		if (num_regs < rsrc->num_rxq) {
1523 			ret = -EINVAL;
1524 			goto free_reg_vals;
1525 		}
1526 
1527 		num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
1528 						 VIRTCHNL2_QUEUE_TYPE_RX);
1529 		if (num_regs < rsrc->num_rxq) {
1530 			ret = -EINVAL;
1531 			goto free_reg_vals;
1532 		}
1533 	}
1534 
1535 free_reg_vals:
1536 	kfree(reg_vals);
1537 
1538 	return ret;
1539 }
1540 
1541 /**
1542  * idpf_send_create_vport_msg - Send virtchnl create vport message
1543  * @adapter: Driver specific private structure
1544  * @max_q: vport max queue info
1545  *
1546  * send virtchnl creae vport message
1547  *
1548  * Returns 0 on success, negative on failure
1549  */
idpf_send_create_vport_msg(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)1550 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1551 			       struct idpf_vport_max_q *max_q)
1552 {
1553 	struct virtchnl2_create_vport *vport_msg;
1554 	struct idpf_vc_xn_params xn_params = {};
1555 	u16 idx = adapter->next_vport;
1556 	int err, buf_size;
1557 	ssize_t reply_sz;
1558 
1559 	buf_size = sizeof(struct virtchnl2_create_vport);
1560 	if (!adapter->vport_params_reqd[idx]) {
1561 		adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1562 							  GFP_KERNEL);
1563 		if (!adapter->vport_params_reqd[idx])
1564 			return -ENOMEM;
1565 	}
1566 
1567 	vport_msg = adapter->vport_params_reqd[idx];
1568 	vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1569 	vport_msg->vport_index = cpu_to_le16(idx);
1570 
1571 	if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1572 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1573 	else
1574 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1575 
1576 	if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1577 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1578 	else
1579 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1580 
1581 	err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1582 	if (err) {
1583 		dev_err(&adapter->pdev->dev, "Enough queues are not available");
1584 
1585 		return err;
1586 	}
1587 
1588 	if (!adapter->vport_params_recvd[idx]) {
1589 		adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1590 							   GFP_KERNEL);
1591 		if (!adapter->vport_params_recvd[idx]) {
1592 			err = -ENOMEM;
1593 			goto free_vport_params;
1594 		}
1595 	}
1596 
1597 	xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
1598 	xn_params.send_buf.iov_base = vport_msg;
1599 	xn_params.send_buf.iov_len = buf_size;
1600 	xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
1601 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
1602 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1603 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1604 	if (reply_sz < 0) {
1605 		err = reply_sz;
1606 		goto free_vport_params;
1607 	}
1608 
1609 	return 0;
1610 
1611 free_vport_params:
1612 	kfree(adapter->vport_params_recvd[idx]);
1613 	adapter->vport_params_recvd[idx] = NULL;
1614 	kfree(adapter->vport_params_reqd[idx]);
1615 	adapter->vport_params_reqd[idx] = NULL;
1616 
1617 	return err;
1618 }
1619 
1620 /**
1621  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1622  * @vport: virtual port structure
1623  *
1624  * Return 0 on success, error on failure
1625  */
idpf_check_supported_desc_ids(struct idpf_vport * vport)1626 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1627 {
1628 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
1629 	struct idpf_adapter *adapter = vport->adapter;
1630 	struct virtchnl2_create_vport *vport_msg;
1631 	u64 rx_desc_ids, tx_desc_ids;
1632 
1633 	vport_msg = adapter->vport_params_recvd[vport->idx];
1634 
1635 	if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
1636 	    (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
1637 	     vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
1638 		pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
1639 		return -EOPNOTSUPP;
1640 	}
1641 
1642 	rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1643 	tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1644 
1645 	if (idpf_is_queue_model_split(rsrc->rxq_model)) {
1646 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1647 			dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1648 			vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1649 		}
1650 	} else {
1651 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1652 			rsrc->base_rxd = true;
1653 	}
1654 
1655 	if (!idpf_is_queue_model_split(rsrc->txq_model))
1656 		return 0;
1657 
1658 	if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1659 		dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1660 		vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1661 	}
1662 
1663 	return 0;
1664 }
1665 
1666 /**
1667  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1668  * @adapter: adapter pointer used to send virtchnl message
1669  * @vport_id: vport identifier used while preparing the virtchnl message
1670  *
1671  * Return: 0 on success, negative on failure.
1672  */
idpf_send_destroy_vport_msg(struct idpf_adapter * adapter,u32 vport_id)1673 int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
1674 {
1675 	struct idpf_vc_xn_params xn_params = {};
1676 	struct virtchnl2_vport v_id;
1677 	ssize_t reply_sz;
1678 
1679 	v_id.vport_id = cpu_to_le32(vport_id);
1680 
1681 	xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
1682 	xn_params.send_buf.iov_base = &v_id;
1683 	xn_params.send_buf.iov_len = sizeof(v_id);
1684 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1685 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1686 
1687 	return reply_sz < 0 ? reply_sz : 0;
1688 }
1689 
1690 /**
1691  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1692  * @adapter: adapter pointer used to send virtchnl message
1693  * @vport_id: vport identifier used while preparing the virtchnl message
1694  *
1695  * Return: 0 on success, negative on failure.
1696  */
idpf_send_enable_vport_msg(struct idpf_adapter * adapter,u32 vport_id)1697 int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
1698 {
1699 	struct idpf_vc_xn_params xn_params = {};
1700 	struct virtchnl2_vport v_id;
1701 	ssize_t reply_sz;
1702 
1703 	v_id.vport_id = cpu_to_le32(vport_id);
1704 
1705 	xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
1706 	xn_params.send_buf.iov_base = &v_id;
1707 	xn_params.send_buf.iov_len = sizeof(v_id);
1708 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1709 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1710 
1711 	return reply_sz < 0 ? reply_sz : 0;
1712 }
1713 
1714 /**
1715  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1716  * @adapter: adapter pointer used to send virtchnl message
1717  * @vport_id: vport identifier used while preparing the virtchnl message
1718  *
1719  * Return: 0 on success, negative on failure.
1720  */
idpf_send_disable_vport_msg(struct idpf_adapter * adapter,u32 vport_id)1721 int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
1722 {
1723 	struct idpf_vc_xn_params xn_params = {};
1724 	struct virtchnl2_vport v_id;
1725 	ssize_t reply_sz;
1726 
1727 	v_id.vport_id = cpu_to_le32(vport_id);
1728 
1729 	xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
1730 	xn_params.send_buf.iov_base = &v_id;
1731 	xn_params.send_buf.iov_len = sizeof(v_id);
1732 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1733 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1734 
1735 	return reply_sz < 0 ? reply_sz : 0;
1736 }
1737 
1738 /**
1739  * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
1740  * @rsrc: pointer to queue and vector resources
1741  * @q: Tx queue to be inserted into VC chunk
1742  * @qi: pointer to the buffer containing the VC chunk
1743  */
idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc * rsrc,const struct idpf_tx_queue * q,struct virtchnl2_txq_info * qi)1744 static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1745 				       const struct idpf_tx_queue *q,
1746 				       struct virtchnl2_txq_info *qi)
1747 {
1748 	u32 val;
1749 
1750 	qi->queue_id = cpu_to_le32(q->q_id);
1751 	qi->model = cpu_to_le16(rsrc->txq_model);
1752 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1753 	qi->ring_len = cpu_to_le16(q->desc_count);
1754 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1755 	qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
1756 
1757 	if (!idpf_is_queue_model_split(rsrc->txq_model)) {
1758 		qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1759 		return;
1760 	}
1761 
1762 	if (idpf_queue_has(XDP, q))
1763 		val = q->complq->q_id;
1764 	else
1765 		val = q->txq_grp->complq->q_id;
1766 
1767 	qi->tx_compl_queue_id = cpu_to_le16(val);
1768 
1769 	if (idpf_queue_has(FLOW_SCH_EN, q))
1770 		val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1771 	else
1772 		val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1773 
1774 	qi->sched_mode = cpu_to_le16(val);
1775 }
1776 
1777 /**
1778  * idpf_fill_complq_config_chunk - fill chunk describing the completion queue
1779  * @rsrc: pointer to queue and vector resources
1780  * @q: completion queue to be inserted into VC chunk
1781  * @qi: pointer to the buffer containing the VC chunk
1782  */
idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc * rsrc,const struct idpf_compl_queue * q,struct virtchnl2_txq_info * qi)1783 static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1784 					  const struct idpf_compl_queue *q,
1785 					  struct virtchnl2_txq_info *qi)
1786 {
1787 	u32 val;
1788 
1789 	qi->queue_id = cpu_to_le32(q->q_id);
1790 	qi->model = cpu_to_le16(rsrc->txq_model);
1791 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1792 	qi->ring_len = cpu_to_le16(q->desc_count);
1793 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1794 
1795 	if (idpf_queue_has(FLOW_SCH_EN, q))
1796 		val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1797 	else
1798 		val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1799 
1800 	qi->sched_mode = cpu_to_le16(val);
1801 }
1802 
1803 /**
1804  * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
1805  * @vport_id: ID of virtual port queues are associated with
1806  * @buf: buffer containing the message
1807  * @pos: pointer to the first chunk describing the tx queue
1808  * @num_chunks: number of chunks in the message
1809  *
1810  * Helper function for preparing the message describing configuration of
1811  * Tx queues.
1812  *
1813  * Return: the total size of the prepared message.
1814  */
idpf_prepare_cfg_txqs_msg(u32 vport_id,void * buf,const void * pos,u32 num_chunks)1815 static u32 idpf_prepare_cfg_txqs_msg(u32 vport_id, void *buf, const void *pos,
1816 				     u32 num_chunks)
1817 {
1818 	struct virtchnl2_config_tx_queues *ctq = buf;
1819 
1820 	ctq->vport_id = cpu_to_le32(vport_id);
1821 	ctq->num_qinfo = cpu_to_le16(num_chunks);
1822 	memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
1823 
1824 	return struct_size(ctq, qinfo, num_chunks);
1825 }
1826 
1827 /**
1828  * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues
1829  *				       message for selected queues
1830  * @qs: set of the Tx queues to configure
1831  *
1832  * Send config queues virtchnl message for queues contained in the @qs array.
1833  * The @qs array can contain Tx queues (or completion queues) only.
1834  *
1835  * Return: 0 on success, -errno on failure.
1836  */
idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set * qs)1837 static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
1838 {
1839 	struct virtchnl2_txq_info *qi __free(kfree) = NULL;
1840 	struct idpf_chunked_msg_params params = {
1841 		.vport_id	= qs->vport_id,
1842 		.vc_op		= VIRTCHNL2_OP_CONFIG_TX_QUEUES,
1843 		.prepare_msg	= idpf_prepare_cfg_txqs_msg,
1844 		.config_sz	= sizeof(struct virtchnl2_config_tx_queues),
1845 		.chunk_sz	= sizeof(*qi),
1846 	};
1847 
1848 	qi = kzalloc_objs(*qi, qs->num);
1849 	if (!qi)
1850 		return -ENOMEM;
1851 
1852 	params.chunks = qi;
1853 
1854 	for (u32 i = 0; i < qs->num; i++) {
1855 		if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
1856 			idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq,
1857 						   &qi[params.num_chunks++]);
1858 		else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
1859 			idpf_fill_complq_config_chunk(qs->qv_rsrc,
1860 						      qs->qs[i].complq,
1861 						      &qi[params.num_chunks++]);
1862 	}
1863 
1864 	return idpf_send_chunked_msg(qs->adapter, &params);
1865 }
1866 
1867 /**
1868  * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
1869  * @adapter: adapter pointer used to send virtchnl message
1870  * @rsrc: pointer to queue and vector resources
1871  * @vport_id: vport identifier used while preparing the virtchnl message
1872  *
1873  * Return: 0 on success, -errno on failure.
1874  */
idpf_send_config_tx_queues_msg(struct idpf_adapter * adapter,struct idpf_q_vec_rsrc * rsrc,u32 vport_id)1875 static int idpf_send_config_tx_queues_msg(struct idpf_adapter *adapter,
1876 					  struct idpf_q_vec_rsrc *rsrc,
1877 					  u32 vport_id)
1878 {
1879 	struct idpf_queue_set *qs __free(kfree) = NULL;
1880 	u32 totqs = rsrc->num_txq + rsrc->num_complq;
1881 	u32 k = 0;
1882 
1883 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
1884 	if (!qs)
1885 		return -ENOMEM;
1886 
1887 	/* Populate the queue info buffer with all queue context info */
1888 	for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
1889 		const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
1890 
1891 		for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
1892 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
1893 			qs->qs[k++].txq = tx_qgrp->txqs[j];
1894 		}
1895 
1896 		if (idpf_is_queue_model_split(rsrc->txq_model)) {
1897 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1898 			qs->qs[k++].complq = tx_qgrp->complq;
1899 		}
1900 	}
1901 
1902 	/* Make sure accounting agrees */
1903 	if (k != totqs)
1904 		return -EINVAL;
1905 
1906 	return idpf_send_config_tx_queue_set_msg(qs);
1907 }
1908 
1909 /**
1910  * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
1911  * @rsrc: pointer to queue and vector resources
1912  * @q: Rx queue to be inserted into VC chunk
1913  * @qi: pointer to the buffer containing the VC chunk
1914  */
idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc * rsrc,struct idpf_rx_queue * q,struct virtchnl2_rxq_info * qi)1915 static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1916 				       struct idpf_rx_queue *q,
1917 				       struct virtchnl2_rxq_info *qi)
1918 {
1919 	const struct idpf_bufq_set *sets;
1920 
1921 	qi->queue_id = cpu_to_le32(q->q_id);
1922 	qi->model = cpu_to_le16(rsrc->rxq_model);
1923 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1924 	qi->ring_len = cpu_to_le16(q->desc_count);
1925 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1926 	qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
1927 	qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
1928 	qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1929 	if (idpf_queue_has(RSC_EN, q))
1930 		qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1931 
1932 	if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1933 		qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
1934 		qi->desc_ids = cpu_to_le64(q->rxdids);
1935 
1936 		return;
1937 	}
1938 
1939 	sets = q->bufq_sets;
1940 
1941 	/*
1942 	 * In splitq mode, RxQ buffer size should be set to that of the first
1943 	 * buffer queue associated with this RxQ.
1944 	 */
1945 	q->rx_buf_size = sets[0].bufq.rx_buf_size;
1946 	qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
1947 
1948 	qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
1949 	if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1950 		qi->bufq2_ena = IDPF_BUFQ2_ENA;
1951 		qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
1952 	}
1953 
1954 	q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
1955 
1956 	if (idpf_queue_has(HSPLIT_EN, q)) {
1957 		qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1958 		qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
1959 	}
1960 
1961 	qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1962 }
1963 
1964 /**
1965  * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
1966  * @rsrc: pointer to queue and vector resources
1967  * @q: buffer queue to be inserted into VC chunk
1968  * @qi: pointer to the buffer containing the VC chunk
1969  */
idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc * rsrc,const struct idpf_buf_queue * q,struct virtchnl2_rxq_info * qi)1970 static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1971 					const struct idpf_buf_queue *q,
1972 					struct virtchnl2_rxq_info *qi)
1973 {
1974 	qi->queue_id = cpu_to_le32(q->q_id);
1975 	qi->model = cpu_to_le16(rsrc->rxq_model);
1976 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1977 	qi->ring_len = cpu_to_le16(q->desc_count);
1978 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1979 	qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
1980 	qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
1981 	qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1982 	qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1983 	if (idpf_queue_has(RSC_EN, q))
1984 		qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1985 
1986 	if (idpf_queue_has(HSPLIT_EN, q)) {
1987 		qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1988 		qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
1989 	}
1990 }
1991 
1992 /**
1993  * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
1994  * @vport_id: ID of virtual port queues are associated with
1995  * @buf: buffer containing the message
1996  * @pos: pointer to the first chunk describing the rx queue
1997  * @num_chunks: number of chunks in the message
1998  *
1999  * Helper function for preparing the message describing configuration of
2000  * Rx queues.
2001  *
2002  * Return: the total size of the prepared message.
2003  */
idpf_prepare_cfg_rxqs_msg(u32 vport_id,void * buf,const void * pos,u32 num_chunks)2004 static u32 idpf_prepare_cfg_rxqs_msg(u32 vport_id, void *buf, const void *pos,
2005 				     u32 num_chunks)
2006 {
2007 	struct virtchnl2_config_rx_queues *crq = buf;
2008 
2009 	crq->vport_id = cpu_to_le32(vport_id);
2010 	crq->num_qinfo = cpu_to_le16(num_chunks);
2011 	memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
2012 
2013 	return struct_size(crq, qinfo, num_chunks);
2014 }
2015 
2016 /**
2017  * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message
2018  *				       for selected queues.
2019  * @qs: set of the Rx queues to configure
2020  *
2021  * Send config queues virtchnl message for queues contained in the @qs array.
2022  * The @qs array can contain Rx queues (or buffer queues) only.
2023  *
2024  * Return: 0 on success, -errno on failure.
2025  */
idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set * qs)2026 static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
2027 {
2028 	struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
2029 	struct idpf_chunked_msg_params params = {
2030 		.vport_id	= qs->vport_id,
2031 		.vc_op		= VIRTCHNL2_OP_CONFIG_RX_QUEUES,
2032 		.prepare_msg	= idpf_prepare_cfg_rxqs_msg,
2033 		.config_sz	= sizeof(struct virtchnl2_config_rx_queues),
2034 		.chunk_sz	= sizeof(*qi),
2035 	};
2036 
2037 	qi = kzalloc_objs(*qi, qs->num);
2038 	if (!qi)
2039 		return -ENOMEM;
2040 
2041 	params.chunks = qi;
2042 
2043 	for (u32 i = 0; i < qs->num; i++) {
2044 		if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
2045 			idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq,
2046 						   &qi[params.num_chunks++]);
2047 		else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
2048 			idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq,
2049 						    &qi[params.num_chunks++]);
2050 	}
2051 
2052 	return idpf_send_chunked_msg(qs->adapter, &params);
2053 }
2054 
2055 /**
2056  * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
2057  * @adapter: adapter pointer used to send virtchnl message
2058  * @rsrc: pointer to queue and vector resources
2059  * @vport_id: vport identifier used while preparing the virtchnl message
2060  *
2061  * Return: 0 on success, -errno on failure.
2062  */
idpf_send_config_rx_queues_msg(struct idpf_adapter * adapter,struct idpf_q_vec_rsrc * rsrc,u32 vport_id)2063 static int idpf_send_config_rx_queues_msg(struct idpf_adapter *adapter,
2064 					  struct idpf_q_vec_rsrc *rsrc,
2065 					  u32 vport_id)
2066 {
2067 	bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
2068 	struct idpf_queue_set *qs __free(kfree) = NULL;
2069 	u32 totqs = rsrc->num_rxq + rsrc->num_bufq;
2070 	u32 k = 0;
2071 
2072 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
2073 	if (!qs)
2074 		return -ENOMEM;
2075 
2076 	/* Populate the queue info buffer with all queue context info */
2077 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
2078 		const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
2079 		u32 num_rxq;
2080 
2081 		if (!splitq) {
2082 			num_rxq = rx_qgrp->singleq.num_rxq;
2083 			goto rxq;
2084 		}
2085 
2086 		for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
2087 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
2088 			qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
2089 		}
2090 
2091 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
2092 
2093 rxq:
2094 		for (u32 j = 0; j < num_rxq; j++) {
2095 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
2096 
2097 			if (splitq)
2098 				qs->qs[k++].rxq =
2099 					&rx_qgrp->splitq.rxq_sets[j]->rxq;
2100 			else
2101 				qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
2102 		}
2103 	}
2104 
2105 	/* Make sure accounting agrees */
2106 	if (k != totqs)
2107 		return -EINVAL;
2108 
2109 	return idpf_send_config_rx_queue_set_msg(qs);
2110 }
2111 
2112 /**
2113  * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
2114  *				 queues
2115  * @vport_id: ID of virtual port queues are associated with
2116  * @buf: buffer containing the message
2117  * @pos: pointer to the first chunk describing the queue
2118  * @num_chunks: number of chunks in the message
2119  *
2120  * Helper function for preparing the message describing queues to be enabled
2121  * or disabled.
2122  *
2123  * Return: the total size of the prepared message.
2124  */
idpf_prepare_ena_dis_qs_msg(u32 vport_id,void * buf,const void * pos,u32 num_chunks)2125 static u32 idpf_prepare_ena_dis_qs_msg(u32 vport_id, void *buf, const void *pos,
2126 				       u32 num_chunks)
2127 {
2128 	struct virtchnl2_del_ena_dis_queues *eq = buf;
2129 
2130 	eq->vport_id = cpu_to_le32(vport_id);
2131 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2132 	memcpy(eq->chunks.chunks, pos,
2133 	       num_chunks * sizeof(*eq->chunks.chunks));
2134 
2135 	return struct_size(eq, chunks.chunks, num_chunks);
2136 }
2137 
2138 /**
2139  * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues
2140  *				     message for selected queues
2141  * @qs: set of the queues to enable or disable
2142  * @en: whether to enable or disable queues
2143  *
2144  * Send enable or disable queues virtchnl message for queues contained
2145  * in the @qs array.
2146  * The @qs array can contain pointers to both Rx and Tx queues.
2147  *
2148  * Return: 0 on success, -errno on failure.
2149  */
idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set * qs,bool en)2150 static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
2151 					   bool en)
2152 {
2153 	struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
2154 	struct idpf_chunked_msg_params params = {
2155 		.vport_id	= qs->vport_id,
2156 		.vc_op		= en ? VIRTCHNL2_OP_ENABLE_QUEUES :
2157 				       VIRTCHNL2_OP_DISABLE_QUEUES,
2158 		.prepare_msg	= idpf_prepare_ena_dis_qs_msg,
2159 		.config_sz	= sizeof(struct virtchnl2_del_ena_dis_queues),
2160 		.chunk_sz	= sizeof(*qc),
2161 		.num_chunks	= qs->num,
2162 	};
2163 
2164 	qc = kzalloc_objs(*qc, qs->num);
2165 	if (!qc)
2166 		return -ENOMEM;
2167 
2168 	params.chunks = qc;
2169 
2170 	for (u32 i = 0; i < qs->num; i++) {
2171 		const struct idpf_queue_ptr *q = &qs->qs[i];
2172 		u32 qid;
2173 
2174 		qc[i].type = cpu_to_le32(q->type);
2175 		qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
2176 
2177 		switch (q->type) {
2178 		case VIRTCHNL2_QUEUE_TYPE_RX:
2179 			qid = q->rxq->q_id;
2180 			break;
2181 		case VIRTCHNL2_QUEUE_TYPE_TX:
2182 			qid = q->txq->q_id;
2183 			break;
2184 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
2185 			qid = q->bufq->q_id;
2186 			break;
2187 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
2188 			qid = q->complq->q_id;
2189 			break;
2190 		default:
2191 			return -EINVAL;
2192 		}
2193 
2194 		qc[i].start_queue_id = cpu_to_le32(qid);
2195 	}
2196 
2197 	return idpf_send_chunked_msg(qs->adapter, &params);
2198 }
2199 
2200 /**
2201  * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
2202  *				  message
2203  * @adapter: adapter pointer used to send virtchnl message
2204  * @rsrc: pointer to queue and vector resources
2205  * @vport_id: vport identifier used while preparing the virtchnl message
2206  * @en: whether to enable or disable queues
2207  *
2208  * Return: 0 on success, -errno on failure.
2209  */
idpf_send_ena_dis_queues_msg(struct idpf_adapter * adapter,struct idpf_q_vec_rsrc * rsrc,u32 vport_id,bool en)2210 static int idpf_send_ena_dis_queues_msg(struct idpf_adapter *adapter,
2211 					struct idpf_q_vec_rsrc *rsrc,
2212 					u32 vport_id, bool en)
2213 {
2214 	struct idpf_queue_set *qs __free(kfree) = NULL;
2215 	u32 num_txq, num_q, k = 0;
2216 	bool split;
2217 
2218 	num_txq = rsrc->num_txq + rsrc->num_complq;
2219 	num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq;
2220 
2221 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
2222 	if (!qs)
2223 		return -ENOMEM;
2224 
2225 	split = idpf_is_queue_model_split(rsrc->txq_model);
2226 
2227 	for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
2228 		const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
2229 
2230 		for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
2231 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
2232 			qs->qs[k++].txq = tx_qgrp->txqs[j];
2233 		}
2234 
2235 		if (!split)
2236 			continue;
2237 
2238 		qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
2239 		qs->qs[k++].complq = tx_qgrp->complq;
2240 	}
2241 
2242 	if (k != num_txq)
2243 		return -EINVAL;
2244 
2245 	split = idpf_is_queue_model_split(rsrc->rxq_model);
2246 
2247 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
2248 		const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
2249 		u32 num_rxq;
2250 
2251 		if (split)
2252 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
2253 		else
2254 			num_rxq = rx_qgrp->singleq.num_rxq;
2255 
2256 		for (u32 j = 0; j < num_rxq; j++) {
2257 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
2258 
2259 			if (split)
2260 				qs->qs[k++].rxq =
2261 					&rx_qgrp->splitq.rxq_sets[j]->rxq;
2262 			else
2263 				qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
2264 		}
2265 
2266 		if (!split)
2267 			continue;
2268 
2269 		for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
2270 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
2271 			qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
2272 		}
2273 	}
2274 
2275 	if (k != num_q)
2276 		return -EINVAL;
2277 
2278 	return idpf_send_ena_dis_queue_set_msg(qs, en);
2279 }
2280 
2281 /**
2282  * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
2283  *					      queue set to the interrupt vector
2284  * @vport_id: ID of virtual port queues are associated with
2285  * @buf: buffer containing the message
2286  * @pos: pointer to the first chunk describing the vector mapping
2287  * @num_chunks: number of chunks in the message
2288  *
2289  * Helper function for preparing the message describing mapping queues to
2290  * q_vectors.
2291  *
2292  * Return: the total size of the prepared message.
2293  */
2294 static u32
idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id,void * buf,const void * pos,u32 num_chunks)2295 idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id, void *buf,
2296 					 const void *pos, u32 num_chunks)
2297 {
2298 	struct virtchnl2_queue_vector_maps *vqvm = buf;
2299 
2300 	vqvm->vport_id = cpu_to_le32(vport_id);
2301 	vqvm->num_qv_maps = cpu_to_le16(num_chunks);
2302 	memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
2303 
2304 	return struct_size(vqvm, qv_maps, num_chunks);
2305 }
2306 
2307 /**
2308  * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap
2309  *					      queue set vector message
2310  * @qs: set of the queues to map or unmap
2311  * @map: true for map and false for unmap
2312  *
2313  * Return: 0 on success, -errno on failure.
2314  */
2315 static int
idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set * qs,bool map)2316 idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
2317 					 bool map)
2318 {
2319 	struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
2320 	struct idpf_chunked_msg_params params = {
2321 		.vport_id	= qs->vport_id,
2322 		.vc_op		= map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
2323 					VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
2324 		.prepare_msg	= idpf_prep_map_unmap_queue_set_vector_msg,
2325 		.config_sz	= sizeof(struct virtchnl2_queue_vector_maps),
2326 		.chunk_sz	= sizeof(*vqv),
2327 		.num_chunks	= qs->num,
2328 	};
2329 	bool split;
2330 
2331 	vqv = kzalloc_objs(*vqv, qs->num);
2332 	if (!vqv)
2333 		return -ENOMEM;
2334 
2335 	params.chunks = vqv;
2336 
2337 	split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model);
2338 
2339 	for (u32 i = 0; i < qs->num; i++) {
2340 		const struct idpf_queue_ptr *q = &qs->qs[i];
2341 		const struct idpf_q_vector *vec;
2342 		u32 qid, v_idx, itr_idx;
2343 
2344 		vqv[i].queue_type = cpu_to_le32(q->type);
2345 
2346 		switch (q->type) {
2347 		case VIRTCHNL2_QUEUE_TYPE_RX:
2348 			qid = q->rxq->q_id;
2349 
2350 			if (idpf_queue_has(NOIRQ, q->rxq))
2351 				vec = NULL;
2352 			else
2353 				vec = q->rxq->q_vector;
2354 
2355 			if (vec) {
2356 				v_idx = vec->v_idx;
2357 				itr_idx = vec->rx_itr_idx;
2358 			} else {
2359 				v_idx = qs->qv_rsrc->noirq_v_idx;
2360 				itr_idx = VIRTCHNL2_ITR_IDX_0;
2361 			}
2362 			break;
2363 		case VIRTCHNL2_QUEUE_TYPE_TX:
2364 			qid = q->txq->q_id;
2365 
2366 			if (idpf_queue_has(NOIRQ, q->txq))
2367 				vec = NULL;
2368 			else if (idpf_queue_has(XDP, q->txq))
2369 				vec = q->txq->complq->q_vector;
2370 			else if (split)
2371 				vec = q->txq->txq_grp->complq->q_vector;
2372 			else
2373 				vec = q->txq->q_vector;
2374 
2375 			if (vec) {
2376 				v_idx = vec->v_idx;
2377 				itr_idx = vec->tx_itr_idx;
2378 			} else {
2379 				v_idx = qs->qv_rsrc->noirq_v_idx;
2380 				itr_idx = VIRTCHNL2_ITR_IDX_1;
2381 			}
2382 			break;
2383 		default:
2384 			return -EINVAL;
2385 		}
2386 
2387 		vqv[i].queue_id = cpu_to_le32(qid);
2388 		vqv[i].vector_id = cpu_to_le16(v_idx);
2389 		vqv[i].itr_idx = cpu_to_le32(itr_idx);
2390 	}
2391 
2392 	return idpf_send_chunked_msg(qs->adapter, &params);
2393 }
2394 
2395 /**
2396  * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
2397  *					  vector message
2398  * @adapter: adapter pointer used to send virtchnl message
2399  * @rsrc: pointer to queue and vector resources
2400  * @vport_id: vport identifier used while preparing the virtchnl message
2401  * @map: true for map and false for unmap
2402  *
2403  * Return: 0 on success, -errno on failure.
2404  */
idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter * adapter,struct idpf_q_vec_rsrc * rsrc,u32 vport_id,bool map)2405 int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
2406 					 struct idpf_q_vec_rsrc *rsrc,
2407 					 u32 vport_id, bool map)
2408 {
2409 	struct idpf_queue_set *qs __free(kfree) = NULL;
2410 	u32 num_q = rsrc->num_txq + rsrc->num_rxq;
2411 	u32 k = 0;
2412 
2413 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
2414 	if (!qs)
2415 		return -ENOMEM;
2416 
2417 	for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
2418 		const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
2419 
2420 		for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
2421 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
2422 			qs->qs[k++].txq = tx_qgrp->txqs[j];
2423 		}
2424 	}
2425 
2426 	if (k != rsrc->num_txq)
2427 		return -EINVAL;
2428 
2429 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
2430 		const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
2431 		u32 num_rxq;
2432 
2433 		if (idpf_is_queue_model_split(rsrc->rxq_model))
2434 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
2435 		else
2436 			num_rxq = rx_qgrp->singleq.num_rxq;
2437 
2438 		for (u32 j = 0; j < num_rxq; j++) {
2439 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
2440 
2441 			if (idpf_is_queue_model_split(rsrc->rxq_model))
2442 				qs->qs[k++].rxq =
2443 					&rx_qgrp->splitq.rxq_sets[j]->rxq;
2444 			else
2445 				qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
2446 		}
2447 	}
2448 
2449 	if (k != num_q)
2450 		return -EINVAL;
2451 
2452 	return idpf_send_map_unmap_queue_set_vector_msg(qs, map);
2453 }
2454 
2455 /**
2456  * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for
2457  *				    selected queues
2458  * @qs: set of the queues
2459  *
2460  * Send enable queues virtchnl message for queues contained in the @qs array.
2461  *
2462  * Return: 0 on success, -errno on failure.
2463  */
idpf_send_enable_queue_set_msg(const struct idpf_queue_set * qs)2464 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs)
2465 {
2466 	return idpf_send_ena_dis_queue_set_msg(qs, true);
2467 }
2468 
2469 /**
2470  * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for
2471  *				     selected queues
2472  * @qs: set of the queues
2473  *
2474  * Return: 0 on success, -errno on failure.
2475  */
idpf_send_disable_queue_set_msg(const struct idpf_queue_set * qs)2476 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs)
2477 {
2478 	int err;
2479 
2480 	err = idpf_send_ena_dis_queue_set_msg(qs, false);
2481 	if (err)
2482 		return err;
2483 
2484 	return idpf_wait_for_marker_event_set(qs);
2485 }
2486 
2487 /**
2488  * idpf_send_config_queue_set_msg - send virtchnl config queues message for
2489  *				    selected queues
2490  * @qs: set of the queues
2491  *
2492  * Send config queues virtchnl message for queues contained in the @qs array.
2493  * The @qs array can contain both Rx or Tx queues.
2494  *
2495  * Return: 0 on success, -errno on failure.
2496  */
idpf_send_config_queue_set_msg(const struct idpf_queue_set * qs)2497 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
2498 {
2499 	int err;
2500 
2501 	err = idpf_send_config_tx_queue_set_msg(qs);
2502 	if (err)
2503 		return err;
2504 
2505 	return idpf_send_config_rx_queue_set_msg(qs);
2506 }
2507 
2508 /**
2509  * idpf_send_enable_queues_msg - send enable queues virtchnl message
2510  * @vport: Virtual port private data structure
2511  *
2512  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
2513  * failure.
2514  */
idpf_send_enable_queues_msg(struct idpf_vport * vport)2515 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
2516 {
2517 	return idpf_send_ena_dis_queues_msg(vport->adapter,
2518 					    &vport->dflt_qv_rsrc,
2519 					    vport->vport_id, true);
2520 }
2521 
2522 /**
2523  * idpf_send_disable_queues_msg - send disable queues virtchnl message
2524  * @vport: Virtual port private data structure
2525  *
2526  * Will send disable queues virtchnl message.  Returns 0 on success, negative
2527  * on failure.
2528  */
idpf_send_disable_queues_msg(struct idpf_vport * vport)2529 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
2530 {
2531 	int err;
2532 
2533 	err = idpf_send_ena_dis_queues_msg(vport->adapter,
2534 					   &vport->dflt_qv_rsrc,
2535 					   vport->vport_id, false);
2536 	if (err)
2537 		return err;
2538 
2539 	return idpf_wait_for_marker_event(vport);
2540 }
2541 
2542 /**
2543  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2544  * structure
2545  * @dchunks: Destination chunks to store data to
2546  * @schunks: Source chunks to copy data from
2547  * @num_chunks: number of chunks to copy
2548  */
idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk * dchunks,struct idpf_queue_id_reg_chunk * schunks,u16 num_chunks)2549 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
2550 					     struct idpf_queue_id_reg_chunk *schunks,
2551 					     u16 num_chunks)
2552 {
2553 	u16 i;
2554 
2555 	for (i = 0; i < num_chunks; i++) {
2556 		dchunks[i].type = cpu_to_le32(schunks[i].type);
2557 		dchunks[i].start_queue_id = cpu_to_le32(schunks[i].start_queue_id);
2558 		dchunks[i].num_queues = cpu_to_le32(schunks[i].num_queues);
2559 	}
2560 }
2561 
2562 /**
2563  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2564  * @adapter: adapter pointer used to send virtchnl message
2565  * @chunks: queue ids received over mailbox
2566  * @vport_id: vport identifier used while preparing the virtchnl message
2567  *
2568  * Return: 0 on success, negative on failure.
2569  */
idpf_send_delete_queues_msg(struct idpf_adapter * adapter,struct idpf_queue_id_reg_info * chunks,u32 vport_id)2570 int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
2571 				struct idpf_queue_id_reg_info *chunks,
2572 				u32 vport_id)
2573 {
2574 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
2575 	struct idpf_vc_xn_params xn_params = {};
2576 	ssize_t reply_sz;
2577 	u16 num_chunks;
2578 	int buf_size;
2579 
2580 	num_chunks = chunks->num_chunks;
2581 	buf_size = struct_size(eq, chunks.chunks, num_chunks);
2582 
2583 	eq = kzalloc(buf_size, GFP_KERNEL);
2584 	if (!eq)
2585 		return -ENOMEM;
2586 
2587 	eq->vport_id = cpu_to_le32(vport_id);
2588 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2589 
2590 	idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks,
2591 					 num_chunks);
2592 
2593 	xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
2594 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2595 	xn_params.send_buf.iov_base = eq;
2596 	xn_params.send_buf.iov_len = buf_size;
2597 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2598 
2599 	return reply_sz < 0 ? reply_sz : 0;
2600 }
2601 
2602 /**
2603  * idpf_send_config_queues_msg - Send config queues virtchnl message
2604  * @adapter: adapter pointer used to send virtchnl message
2605  * @rsrc: pointer to queue and vector resources
2606  * @vport_id: vport identifier used while preparing the virtchnl message
2607  *
2608  * Return: 0 on success, negative on failure.
2609  */
idpf_send_config_queues_msg(struct idpf_adapter * adapter,struct idpf_q_vec_rsrc * rsrc,u32 vport_id)2610 int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
2611 				struct idpf_q_vec_rsrc *rsrc,
2612 				u32 vport_id)
2613 {
2614 	int err;
2615 
2616 	err = idpf_send_config_tx_queues_msg(adapter, rsrc, vport_id);
2617 	if (err)
2618 		return err;
2619 
2620 	return idpf_send_config_rx_queues_msg(adapter, rsrc, vport_id);
2621 }
2622 
2623 /**
2624  * idpf_send_add_queues_msg - Send virtchnl add queues message
2625  * @adapter: adapter pointer used to send virtchnl message
2626  * @vport_config: vport persistent structure to store the queue chunk info
2627  * @rsrc: pointer to queue and vector resources
2628  * @vport_id: vport identifier used while preparing the virtchnl message
2629  *
2630  * Return: 0 on success, negative on failure.
2631  */
idpf_send_add_queues_msg(struct idpf_adapter * adapter,struct idpf_vport_config * vport_config,struct idpf_q_vec_rsrc * rsrc,u32 vport_id)2632 int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
2633 			     struct idpf_vport_config *vport_config,
2634 			     struct idpf_q_vec_rsrc *rsrc,
2635 			     u32 vport_id)
2636 {
2637 	struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
2638 	struct idpf_vc_xn_params xn_params = {};
2639 	struct virtchnl2_add_queues aq = {};
2640 	ssize_t reply_sz;
2641 	int size;
2642 
2643 	vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2644 	if (!vc_msg)
2645 		return -ENOMEM;
2646 
2647 	aq.vport_id = cpu_to_le32(vport_id);
2648 	aq.num_tx_q = cpu_to_le16(rsrc->num_txq);
2649 	aq.num_tx_complq = cpu_to_le16(rsrc->num_complq);
2650 	aq.num_rx_q = cpu_to_le16(rsrc->num_rxq);
2651 	aq.num_rx_bufq = cpu_to_le16(rsrc->num_bufq);
2652 
2653 	xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
2654 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2655 	xn_params.send_buf.iov_base = &aq;
2656 	xn_params.send_buf.iov_len = sizeof(aq);
2657 	xn_params.recv_buf.iov_base = vc_msg;
2658 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2659 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2660 	if (reply_sz < 0)
2661 		return reply_sz;
2662 
2663 	/* compare vc_msg num queues with vport num queues */
2664 	if (le16_to_cpu(vc_msg->num_tx_q) != rsrc->num_txq ||
2665 	    le16_to_cpu(vc_msg->num_rx_q) != rsrc->num_rxq ||
2666 	    le16_to_cpu(vc_msg->num_tx_complq) != rsrc->num_complq ||
2667 	    le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq)
2668 		return -EINVAL;
2669 
2670 	size = struct_size(vc_msg, chunks.chunks,
2671 			   le16_to_cpu(vc_msg->chunks.num_chunks));
2672 	if (reply_sz < size)
2673 		return -EIO;
2674 
2675 	return idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);
2676 }
2677 
2678 /**
2679  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2680  * @adapter: Driver specific private structure
2681  * @num_vectors: number of vectors to be allocated
2682  *
2683  * Returns 0 on success, negative on failure.
2684  */
idpf_send_alloc_vectors_msg(struct idpf_adapter * adapter,u16 num_vectors)2685 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2686 {
2687 	struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
2688 	struct idpf_vc_xn_params xn_params = {};
2689 	struct virtchnl2_alloc_vectors ac = {};
2690 	ssize_t reply_sz;
2691 	u16 num_vchunks;
2692 	int size;
2693 
2694 	ac.num_vectors = cpu_to_le16(num_vectors);
2695 
2696 	rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2697 	if (!rcvd_vec)
2698 		return -ENOMEM;
2699 
2700 	xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
2701 	xn_params.send_buf.iov_base = &ac;
2702 	xn_params.send_buf.iov_len = sizeof(ac);
2703 	xn_params.recv_buf.iov_base = rcvd_vec;
2704 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2705 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2706 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2707 	if (reply_sz < 0)
2708 		return reply_sz;
2709 
2710 	num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2711 	size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2712 	if (reply_sz < size)
2713 		return -EIO;
2714 
2715 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
2716 		return -EINVAL;
2717 
2718 	kfree(adapter->req_vec_chunks);
2719 	adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
2720 	if (!adapter->req_vec_chunks)
2721 		return -ENOMEM;
2722 
2723 	if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
2724 		kfree(adapter->req_vec_chunks);
2725 		adapter->req_vec_chunks = NULL;
2726 		return -EINVAL;
2727 	}
2728 
2729 	return 0;
2730 }
2731 
2732 /**
2733  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2734  * @adapter: Driver specific private structure
2735  *
2736  * Returns 0 on success, negative on failure.
2737  */
idpf_send_dealloc_vectors_msg(struct idpf_adapter * adapter)2738 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2739 {
2740 	struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2741 	struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2742 	struct idpf_vc_xn_params xn_params = {};
2743 	ssize_t reply_sz;
2744 	int buf_size;
2745 
2746 	buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2747 
2748 	xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
2749 	xn_params.send_buf.iov_base = vcs;
2750 	xn_params.send_buf.iov_len = buf_size;
2751 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2752 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2753 	if (reply_sz < 0)
2754 		return reply_sz;
2755 
2756 	kfree(adapter->req_vec_chunks);
2757 	adapter->req_vec_chunks = NULL;
2758 
2759 	return 0;
2760 }
2761 
2762 /**
2763  * idpf_get_max_vfs - Get max number of vfs supported
2764  * @adapter: Driver specific private structure
2765  *
2766  * Returns max number of VFs
2767  */
idpf_get_max_vfs(struct idpf_adapter * adapter)2768 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2769 {
2770 	return le16_to_cpu(adapter->caps.max_sriov_vfs);
2771 }
2772 
2773 /**
2774  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2775  * @adapter: Driver specific private structure
2776  * @num_vfs: number of virtual functions to be created
2777  *
2778  * Returns 0 on success, negative on failure.
2779  */
idpf_send_set_sriov_vfs_msg(struct idpf_adapter * adapter,u16 num_vfs)2780 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2781 {
2782 	struct virtchnl2_sriov_vfs_info svi = {};
2783 	struct idpf_vc_xn_params xn_params = {};
2784 	ssize_t reply_sz;
2785 
2786 	svi.num_vfs = cpu_to_le16(num_vfs);
2787 	xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
2788 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2789 	xn_params.send_buf.iov_base = &svi;
2790 	xn_params.send_buf.iov_len = sizeof(svi);
2791 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2792 
2793 	return reply_sz < 0 ? reply_sz : 0;
2794 }
2795 
2796 /**
2797  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2798  * @np: netdev private structure
2799  * @port_stats: structure to store the vport statistics
2800  *
2801  * Return: 0 on success, negative on failure.
2802  */
idpf_send_get_stats_msg(struct idpf_netdev_priv * np,struct idpf_port_stats * port_stats)2803 int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
2804 			    struct idpf_port_stats *port_stats)
2805 {
2806 	struct rtnl_link_stats64 *netstats = &np->netstats;
2807 	struct virtchnl2_vport_stats stats_msg = {};
2808 	struct idpf_vc_xn_params xn_params = {};
2809 	ssize_t reply_sz;
2810 
2811 
2812 	/* Don't send get_stats message if the link is down */
2813 	if (!test_bit(IDPF_VPORT_UP, np->state))
2814 		return 0;
2815 
2816 	stats_msg.vport_id = cpu_to_le32(np->vport_id);
2817 
2818 	xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
2819 	xn_params.send_buf.iov_base = &stats_msg;
2820 	xn_params.send_buf.iov_len = sizeof(stats_msg);
2821 	xn_params.recv_buf = xn_params.send_buf;
2822 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2823 
2824 	reply_sz = idpf_vc_xn_exec(np->adapter, &xn_params);
2825 	if (reply_sz < 0)
2826 		return reply_sz;
2827 	if (reply_sz < sizeof(stats_msg))
2828 		return -EIO;
2829 
2830 	spin_lock_bh(&np->stats_lock);
2831 
2832 	netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
2833 			       le64_to_cpu(stats_msg.rx_multicast) +
2834 			       le64_to_cpu(stats_msg.rx_broadcast);
2835 	netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
2836 			       le64_to_cpu(stats_msg.tx_multicast) +
2837 			       le64_to_cpu(stats_msg.tx_broadcast);
2838 	netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
2839 	netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
2840 	netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
2841 	netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
2842 	netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
2843 	netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
2844 
2845 	port_stats->vport_stats = stats_msg;
2846 
2847 	spin_unlock_bh(&np->stats_lock);
2848 
2849 	return 0;
2850 }
2851 
2852 /**
2853  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set RSS lut message
2854  * @adapter: adapter pointer used to send virtchnl message
2855  * @rss_data: pointer to RSS key and lut info
2856  * @vport_id: vport identifier used while preparing the virtchnl message
2857  * @get: flag to set or get RSS look up table
2858  *
2859  * When rxhash is disabled, RSS LUT will be configured with zeros.  If rxhash
2860  * is enabled, the LUT values stored in driver's soft copy will be used to setup
2861  * the HW.
2862  *
2863  * Return: 0 on success, negative on failure.
2864  */
idpf_send_get_set_rss_lut_msg(struct idpf_adapter * adapter,struct idpf_rss_data * rss_data,u32 vport_id,bool get)2865 int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
2866 				  struct idpf_rss_data *rss_data,
2867 				  u32 vport_id, bool get)
2868 {
2869 	struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
2870 	struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
2871 	struct idpf_vc_xn_params xn_params = {};
2872 	int buf_size, lut_buf_size;
2873 	struct idpf_vport *vport;
2874 	ssize_t reply_sz;
2875 	bool rxhash_ena;
2876 	int i;
2877 
2878 	vport = idpf_vid_to_vport(adapter, vport_id);
2879 	if (!vport)
2880 		return -EINVAL;
2881 
2882 	rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
2883 
2884 	buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2885 	rl = kzalloc(buf_size, GFP_KERNEL);
2886 	if (!rl)
2887 		return -ENOMEM;
2888 
2889 	rl->vport_id = cpu_to_le32(vport_id);
2890 
2891 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2892 	xn_params.send_buf.iov_base = rl;
2893 	xn_params.send_buf.iov_len = buf_size;
2894 
2895 	if (get) {
2896 		recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2897 		if (!recv_rl)
2898 			return -ENOMEM;
2899 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
2900 		xn_params.recv_buf.iov_base = recv_rl;
2901 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2902 	} else {
2903 		rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2904 		for (i = 0; i < rss_data->rss_lut_size; i++)
2905 			rl->lut[i] = rxhash_ena ?
2906 				cpu_to_le32(rss_data->rss_lut[i]) : 0;
2907 
2908 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
2909 	}
2910 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2911 	if (reply_sz < 0)
2912 		return reply_sz;
2913 	if (!get)
2914 		return 0;
2915 	if (reply_sz < sizeof(struct virtchnl2_rss_lut))
2916 		return -EIO;
2917 
2918 	lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
2919 	if (reply_sz < lut_buf_size)
2920 		return -EIO;
2921 
2922 	/* size didn't change, we can reuse existing lut buf */
2923 	if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2924 		goto do_memcpy;
2925 
2926 	rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2927 	kfree(rss_data->rss_lut);
2928 
2929 	rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2930 	if (!rss_data->rss_lut) {
2931 		rss_data->rss_lut_size = 0;
2932 		return -ENOMEM;
2933 	}
2934 
2935 do_memcpy:
2936 	memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
2937 
2938 	return 0;
2939 }
2940 
2941 /**
2942  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set RSS key message
2943  * @adapter: adapter pointer used to send virtchnl message
2944  * @rss_data: pointer to RSS key and lut info
2945  * @vport_id: vport identifier used while preparing the virtchnl message
2946  * @get: flag to set or get RSS look up table
2947  *
2948  * Return: 0 on success, negative on failure
2949  */
idpf_send_get_set_rss_key_msg(struct idpf_adapter * adapter,struct idpf_rss_data * rss_data,u32 vport_id,bool get)2950 int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
2951 				  struct idpf_rss_data *rss_data,
2952 				  u32 vport_id, bool get)
2953 {
2954 	struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
2955 	struct virtchnl2_rss_key *rk __free(kfree) = NULL;
2956 	struct idpf_vc_xn_params xn_params = {};
2957 	ssize_t reply_sz;
2958 	int i, buf_size;
2959 	u16 key_size;
2960 
2961 	buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2962 	rk = kzalloc(buf_size, GFP_KERNEL);
2963 	if (!rk)
2964 		return -ENOMEM;
2965 
2966 	rk->vport_id = cpu_to_le32(vport_id);
2967 	xn_params.send_buf.iov_base = rk;
2968 	xn_params.send_buf.iov_len = buf_size;
2969 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2970 	if (get) {
2971 		recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2972 		if (!recv_rk)
2973 			return -ENOMEM;
2974 
2975 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
2976 		xn_params.recv_buf.iov_base = recv_rk;
2977 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2978 	} else {
2979 		rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2980 		for (i = 0; i < rss_data->rss_key_size; i++)
2981 			rk->key_flex[i] = rss_data->rss_key[i];
2982 
2983 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
2984 	}
2985 
2986 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2987 	if (reply_sz < 0)
2988 		return reply_sz;
2989 	if (!get)
2990 		return 0;
2991 	if (reply_sz < sizeof(struct virtchnl2_rss_key))
2992 		return -EIO;
2993 
2994 	key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
2995 			 le16_to_cpu(recv_rk->key_len));
2996 	if (reply_sz < key_size)
2997 		return -EIO;
2998 
2999 	/* key len didn't change, reuse existing buf */
3000 	if (rss_data->rss_key_size == key_size)
3001 		goto do_memcpy;
3002 
3003 	rss_data->rss_key_size = key_size;
3004 	kfree(rss_data->rss_key);
3005 	rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
3006 	if (!rss_data->rss_key) {
3007 		rss_data->rss_key_size = 0;
3008 		return -ENOMEM;
3009 	}
3010 
3011 do_memcpy:
3012 	memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
3013 
3014 	return 0;
3015 }
3016 
3017 /**
3018  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
3019  * @ptype: ptype lookup table
3020  * @pstate: state machine for ptype lookup table
3021  * @ipv4: ipv4 or ipv6
3022  * @frag: fragmentation allowed
3023  *
3024  */
idpf_fill_ptype_lookup(struct libeth_rx_pt * ptype,struct idpf_ptype_state * pstate,bool ipv4,bool frag)3025 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
3026 				   struct idpf_ptype_state *pstate,
3027 				   bool ipv4, bool frag)
3028 {
3029 	if (!pstate->outer_ip || !pstate->outer_frag) {
3030 		pstate->outer_ip = true;
3031 
3032 		if (ipv4)
3033 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
3034 		else
3035 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
3036 
3037 		if (frag) {
3038 			ptype->outer_frag = LIBETH_RX_PT_FRAG;
3039 			pstate->outer_frag = true;
3040 		}
3041 	} else {
3042 		ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
3043 		pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
3044 
3045 		if (ipv4)
3046 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
3047 		else
3048 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
3049 
3050 		if (frag)
3051 			ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
3052 	}
3053 }
3054 
idpf_finalize_ptype_lookup(struct libeth_rx_pt * ptype)3055 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
3056 {
3057 	if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
3058 	    ptype->inner_prot)
3059 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
3060 	else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
3061 		 ptype->outer_ip)
3062 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
3063 	else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
3064 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
3065 	else
3066 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
3067 
3068 	libeth_rx_pt_gen_hash_type(ptype);
3069 }
3070 
3071 /**
3072  * idpf_parse_protocol_ids - parse protocol IDs for a given packet type
3073  * @ptype: packet type to parse
3074  * @rx_pt: store the parsed packet type info into
3075  */
idpf_parse_protocol_ids(struct virtchnl2_ptype * ptype,struct libeth_rx_pt * rx_pt)3076 static void idpf_parse_protocol_ids(struct virtchnl2_ptype *ptype,
3077 				    struct libeth_rx_pt *rx_pt)
3078 {
3079 	struct idpf_ptype_state pstate = {};
3080 
3081 	for (u32 j = 0; j < ptype->proto_id_count; j++) {
3082 		u16 id = le16_to_cpu(ptype->proto_id[j]);
3083 
3084 		switch (id) {
3085 		case VIRTCHNL2_PROTO_HDR_GRE:
3086 			if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) {
3087 				rx_pt->tunnel_type =
3088 					LIBETH_RX_PT_TUNNEL_IP_GRENAT;
3089 				pstate.tunnel_state |=
3090 					IDPF_PTYPE_TUNNEL_IP_GRENAT;
3091 			}
3092 			break;
3093 		case VIRTCHNL2_PROTO_HDR_MAC:
3094 			rx_pt->outer_ip = LIBETH_RX_PT_OUTER_L2;
3095 			if (pstate.tunnel_state == IDPF_TUN_IP_GRE) {
3096 				rx_pt->tunnel_type =
3097 					LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
3098 				pstate.tunnel_state |=
3099 					IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
3100 			}
3101 			break;
3102 		case VIRTCHNL2_PROTO_HDR_IPV4:
3103 			idpf_fill_ptype_lookup(rx_pt, &pstate, true, false);
3104 			break;
3105 		case VIRTCHNL2_PROTO_HDR_IPV6:
3106 			idpf_fill_ptype_lookup(rx_pt, &pstate, false, false);
3107 			break;
3108 		case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
3109 			idpf_fill_ptype_lookup(rx_pt, &pstate, true, true);
3110 			break;
3111 		case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
3112 			idpf_fill_ptype_lookup(rx_pt, &pstate, false, true);
3113 			break;
3114 		case VIRTCHNL2_PROTO_HDR_UDP:
3115 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_UDP;
3116 			break;
3117 		case VIRTCHNL2_PROTO_HDR_TCP:
3118 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_TCP;
3119 			break;
3120 		case VIRTCHNL2_PROTO_HDR_SCTP:
3121 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_SCTP;
3122 			break;
3123 		case VIRTCHNL2_PROTO_HDR_ICMP:
3124 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_ICMP;
3125 			break;
3126 		case VIRTCHNL2_PROTO_HDR_PAY:
3127 			rx_pt->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
3128 			break;
3129 		case VIRTCHNL2_PROTO_HDR_ICMPV6:
3130 		case VIRTCHNL2_PROTO_HDR_IPV6_EH:
3131 		case VIRTCHNL2_PROTO_HDR_PRE_MAC:
3132 		case VIRTCHNL2_PROTO_HDR_POST_MAC:
3133 		case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
3134 		case VIRTCHNL2_PROTO_HDR_SVLAN:
3135 		case VIRTCHNL2_PROTO_HDR_CVLAN:
3136 		case VIRTCHNL2_PROTO_HDR_MPLS:
3137 		case VIRTCHNL2_PROTO_HDR_MMPLS:
3138 		case VIRTCHNL2_PROTO_HDR_PTP:
3139 		case VIRTCHNL2_PROTO_HDR_CTRL:
3140 		case VIRTCHNL2_PROTO_HDR_LLDP:
3141 		case VIRTCHNL2_PROTO_HDR_ARP:
3142 		case VIRTCHNL2_PROTO_HDR_ECP:
3143 		case VIRTCHNL2_PROTO_HDR_EAPOL:
3144 		case VIRTCHNL2_PROTO_HDR_PPPOD:
3145 		case VIRTCHNL2_PROTO_HDR_PPPOE:
3146 		case VIRTCHNL2_PROTO_HDR_IGMP:
3147 		case VIRTCHNL2_PROTO_HDR_AH:
3148 		case VIRTCHNL2_PROTO_HDR_ESP:
3149 		case VIRTCHNL2_PROTO_HDR_IKE:
3150 		case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
3151 		case VIRTCHNL2_PROTO_HDR_L2TPV2:
3152 		case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
3153 		case VIRTCHNL2_PROTO_HDR_L2TPV3:
3154 		case VIRTCHNL2_PROTO_HDR_GTP:
3155 		case VIRTCHNL2_PROTO_HDR_GTP_EH:
3156 		case VIRTCHNL2_PROTO_HDR_GTPCV2:
3157 		case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
3158 		case VIRTCHNL2_PROTO_HDR_GTPU:
3159 		case VIRTCHNL2_PROTO_HDR_GTPU_UL:
3160 		case VIRTCHNL2_PROTO_HDR_GTPU_DL:
3161 		case VIRTCHNL2_PROTO_HDR_ECPRI:
3162 		case VIRTCHNL2_PROTO_HDR_VRRP:
3163 		case VIRTCHNL2_PROTO_HDR_OSPF:
3164 		case VIRTCHNL2_PROTO_HDR_TUN:
3165 		case VIRTCHNL2_PROTO_HDR_NVGRE:
3166 		case VIRTCHNL2_PROTO_HDR_VXLAN:
3167 		case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
3168 		case VIRTCHNL2_PROTO_HDR_GENEVE:
3169 		case VIRTCHNL2_PROTO_HDR_NSH:
3170 		case VIRTCHNL2_PROTO_HDR_QUIC:
3171 		case VIRTCHNL2_PROTO_HDR_PFCP:
3172 		case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
3173 		case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
3174 		case VIRTCHNL2_PROTO_HDR_RTP:
3175 		case VIRTCHNL2_PROTO_HDR_NO_PROTO:
3176 			break;
3177 		default:
3178 			break;
3179 		}
3180 	}
3181 }
3182 
3183 /**
3184  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
3185  * @adapter: driver specific private structure
3186  *
3187  * Return: 0 on success, negative on failure.
3188  */
idpf_send_get_rx_ptype_msg(struct idpf_adapter * adapter)3189 static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)
3190 {
3191 	struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
3192 	struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
3193 	struct libeth_rx_pt *singleq_pt_lkup __free(kfree) = NULL;
3194 	struct libeth_rx_pt *splitq_pt_lkup __free(kfree) = NULL;
3195 	struct idpf_vc_xn_params xn_params = {};
3196 	int ptypes_recvd = 0, ptype_offset;
3197 	u32 max_ptype = IDPF_RX_MAX_PTYPE;
3198 	u16 next_ptype_id = 0;
3199 	ssize_t reply_sz;
3200 
3201 	singleq_pt_lkup = kzalloc_objs(*singleq_pt_lkup, IDPF_RX_MAX_BASE_PTYPE);
3202 	if (!singleq_pt_lkup)
3203 		return -ENOMEM;
3204 
3205 	splitq_pt_lkup = kzalloc_objs(*splitq_pt_lkup, max_ptype);
3206 	if (!splitq_pt_lkup)
3207 		return -ENOMEM;
3208 
3209 	get_ptype_info = kzalloc_obj(*get_ptype_info);
3210 	if (!get_ptype_info)
3211 		return -ENOMEM;
3212 
3213 	ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
3214 	if (!ptype_info)
3215 		return -ENOMEM;
3216 
3217 	xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
3218 	xn_params.send_buf.iov_base = get_ptype_info;
3219 	xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
3220 	xn_params.recv_buf.iov_base = ptype_info;
3221 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
3222 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3223 
3224 	while (next_ptype_id < max_ptype) {
3225 		get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
3226 
3227 		if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
3228 			get_ptype_info->num_ptypes =
3229 				cpu_to_le16(max_ptype - next_ptype_id);
3230 		else
3231 			get_ptype_info->num_ptypes =
3232 				cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
3233 
3234 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3235 		if (reply_sz < 0)
3236 			return reply_sz;
3237 
3238 		ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
3239 		if (ptypes_recvd > max_ptype)
3240 			return -EINVAL;
3241 
3242 		next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
3243 				le16_to_cpu(get_ptype_info->num_ptypes);
3244 
3245 		ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
3246 
3247 		for (u16 i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
3248 			struct libeth_rx_pt rx_pt = {};
3249 			struct virtchnl2_ptype *ptype;
3250 			u16 pt_10, pt_8;
3251 
3252 			ptype = (struct virtchnl2_ptype *)
3253 					((u8 *)ptype_info + ptype_offset);
3254 
3255 			pt_10 = le16_to_cpu(ptype->ptype_id_10);
3256 			pt_8 = ptype->ptype_id_8;
3257 
3258 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
3259 			if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
3260 				return -EINVAL;
3261 
3262 			/* 0xFFFF indicates end of ptypes */
3263 			if (pt_10 == IDPF_INVALID_PTYPE_ID)
3264 				goto out;
3265 			if (pt_10 >= max_ptype)
3266 				return -EINVAL;
3267 
3268 			idpf_parse_protocol_ids(ptype, &rx_pt);
3269 			idpf_finalize_ptype_lookup(&rx_pt);
3270 
3271 			/* For a given protocol ID stack, the ptype value might
3272 			 * vary between ptype_id_10 and ptype_id_8. So store
3273 			 * them separately for splitq and singleq. Also skip
3274 			 * the repeated ptypes in case of singleq.
3275 			 */
3276 			splitq_pt_lkup[pt_10] = rx_pt;
3277 			if (!singleq_pt_lkup[pt_8].outer_ip)
3278 				singleq_pt_lkup[pt_8] = rx_pt;
3279 		}
3280 	}
3281 
3282 out:
3283 	adapter->splitq_pt_lkup = no_free_ptr(splitq_pt_lkup);
3284 	adapter->singleq_pt_lkup = no_free_ptr(singleq_pt_lkup);
3285 
3286 	return 0;
3287 }
3288 
3289 /**
3290  * idpf_rel_rx_pt_lkup - release RX ptype lookup table
3291  * @adapter: adapter pointer to get the lookup table
3292  */
idpf_rel_rx_pt_lkup(struct idpf_adapter * adapter)3293 static void idpf_rel_rx_pt_lkup(struct idpf_adapter *adapter)
3294 {
3295 	kfree(adapter->splitq_pt_lkup);
3296 	adapter->splitq_pt_lkup = NULL;
3297 
3298 	kfree(adapter->singleq_pt_lkup);
3299 	adapter->singleq_pt_lkup = NULL;
3300 }
3301 
3302 /**
3303  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
3304  *				    message
3305  * @adapter: adapter pointer used to send virtchnl message
3306  * @vport_id: vport identifier used while preparing the virtchnl message
3307  * @loopback_ena: flag to enable or disable loopback
3308  *
3309  * Return: 0 on success, negative on failure.
3310  */
idpf_send_ena_dis_loopback_msg(struct idpf_adapter * adapter,u32 vport_id,bool loopback_ena)3311 int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
3312 				   bool loopback_ena)
3313 {
3314 	struct idpf_vc_xn_params xn_params = {};
3315 	struct virtchnl2_loopback loopback;
3316 	ssize_t reply_sz;
3317 
3318 	loopback.vport_id = cpu_to_le32(vport_id);
3319 	loopback.enable = loopback_ena;
3320 
3321 	xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
3322 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3323 	xn_params.send_buf.iov_base = &loopback;
3324 	xn_params.send_buf.iov_len = sizeof(loopback);
3325 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3326 
3327 	return reply_sz < 0 ? reply_sz : 0;
3328 }
3329 
3330 /**
3331  * idpf_find_ctlq - Given a type and id, find ctlq info
3332  * @hw: hardware struct
3333  * @type: type of ctrlq to find
3334  * @id: ctlq id to find
3335  *
3336  * Returns pointer to found ctlq info struct, NULL otherwise.
3337  */
idpf_find_ctlq(struct idpf_hw * hw,enum idpf_ctlq_type type,int id)3338 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
3339 					     enum idpf_ctlq_type type, int id)
3340 {
3341 	struct idpf_ctlq_info *cq, *tmp;
3342 
3343 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
3344 		if (cq->q_id == id && cq->cq_type == type)
3345 			return cq;
3346 
3347 	return NULL;
3348 }
3349 
3350 /**
3351  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
3352  * @adapter: adapter info struct
3353  *
3354  * Returns 0 on success, negative otherwise
3355  */
idpf_init_dflt_mbx(struct idpf_adapter * adapter)3356 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
3357 {
3358 	struct idpf_ctlq_create_info ctlq_info[] = {
3359 		{
3360 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
3361 			.id = IDPF_DFLT_MBX_ID,
3362 			.len = IDPF_DFLT_MBX_Q_LEN,
3363 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
3364 		},
3365 		{
3366 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
3367 			.id = IDPF_DFLT_MBX_ID,
3368 			.len = IDPF_DFLT_MBX_Q_LEN,
3369 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
3370 		}
3371 	};
3372 	struct idpf_hw *hw = &adapter->hw;
3373 	int err;
3374 
3375 	adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
3376 
3377 	err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
3378 	if (err)
3379 		return err;
3380 
3381 	hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
3382 				 IDPF_DFLT_MBX_ID);
3383 	hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
3384 				 IDPF_DFLT_MBX_ID);
3385 
3386 	if (!hw->asq || !hw->arq) {
3387 		idpf_ctlq_deinit(hw);
3388 
3389 		return -ENOENT;
3390 	}
3391 
3392 	adapter->state = __IDPF_VER_CHECK;
3393 
3394 	return 0;
3395 }
3396 
3397 /**
3398  * idpf_deinit_dflt_mbx - Free up ctlqs setup
3399  * @adapter: Driver specific private data structure
3400  */
idpf_deinit_dflt_mbx(struct idpf_adapter * adapter)3401 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
3402 {
3403 	if (adapter->hw.arq && adapter->hw.asq) {
3404 		idpf_mb_clean(adapter, adapter->hw.asq);
3405 		idpf_ctlq_deinit(&adapter->hw);
3406 	}
3407 	adapter->hw.arq = NULL;
3408 	adapter->hw.asq = NULL;
3409 }
3410 
3411 /**
3412  * idpf_vport_params_buf_rel - Release memory for MailBox resources
3413  * @adapter: Driver specific private data structure
3414  *
3415  * Will release memory to hold the vport parameters received on MailBox
3416  */
idpf_vport_params_buf_rel(struct idpf_adapter * adapter)3417 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
3418 {
3419 	kfree(adapter->vport_params_recvd);
3420 	adapter->vport_params_recvd = NULL;
3421 	kfree(adapter->vport_params_reqd);
3422 	adapter->vport_params_reqd = NULL;
3423 	kfree(adapter->vport_ids);
3424 	adapter->vport_ids = NULL;
3425 }
3426 
3427 /**
3428  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
3429  * @adapter: Driver specific private data structure
3430  *
3431  * Will alloc memory to hold the vport parameters received on MailBox
3432  */
idpf_vport_params_buf_alloc(struct idpf_adapter * adapter)3433 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
3434 {
3435 	u16 num_max_vports = idpf_get_max_vports(adapter);
3436 
3437 	adapter->vport_params_reqd = kzalloc_objs(*adapter->vport_params_reqd,
3438 						  num_max_vports);
3439 	if (!adapter->vport_params_reqd)
3440 		return -ENOMEM;
3441 
3442 	adapter->vport_params_recvd = kzalloc_objs(*adapter->vport_params_recvd,
3443 						   num_max_vports);
3444 	if (!adapter->vport_params_recvd)
3445 		goto err_mem;
3446 
3447 	adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
3448 	if (!adapter->vport_ids)
3449 		goto err_mem;
3450 
3451 	if (adapter->vport_config)
3452 		return 0;
3453 
3454 	adapter->vport_config = kzalloc_objs(*adapter->vport_config,
3455 					     num_max_vports);
3456 	if (!adapter->vport_config)
3457 		goto err_mem;
3458 
3459 	return 0;
3460 
3461 err_mem:
3462 	idpf_vport_params_buf_rel(adapter);
3463 
3464 	return -ENOMEM;
3465 }
3466 
3467 /**
3468  * idpf_vc_core_init - Initialize state machine and get driver specific
3469  * resources
3470  * @adapter: Driver specific private structure
3471  *
3472  * This function will initialize the state machine and request all necessary
3473  * resources required by the device driver. Once the state machine is
3474  * initialized, allocate memory to store vport specific information and also
3475  * requests required interrupts.
3476  *
3477  * Returns 0 on success, -EAGAIN function will get called again,
3478  * otherwise negative on failure.
3479  */
idpf_vc_core_init(struct idpf_adapter * adapter)3480 int idpf_vc_core_init(struct idpf_adapter *adapter)
3481 {
3482 	int task_delay = 30;
3483 	u16 num_max_vports;
3484 	int err = 0;
3485 
3486 	if (!adapter->vcxn_mngr) {
3487 		adapter->vcxn_mngr = kzalloc_obj(*adapter->vcxn_mngr);
3488 		if (!adapter->vcxn_mngr) {
3489 			err = -ENOMEM;
3490 			goto init_failed;
3491 		}
3492 	}
3493 	idpf_vc_xn_init(adapter->vcxn_mngr);
3494 
3495 	while (adapter->state != __IDPF_INIT_SW) {
3496 		switch (adapter->state) {
3497 		case __IDPF_VER_CHECK:
3498 			err = idpf_send_ver_msg(adapter);
3499 			switch (err) {
3500 			case 0:
3501 				/* success, move state machine forward */
3502 				adapter->state = __IDPF_GET_CAPS;
3503 				fallthrough;
3504 			case -EAGAIN:
3505 				goto restart;
3506 			default:
3507 				/* Something bad happened, try again but only a
3508 				 * few times.
3509 				 */
3510 				goto init_failed;
3511 			}
3512 		case __IDPF_GET_CAPS:
3513 			err = idpf_send_get_caps_msg(adapter);
3514 			if (err)
3515 				goto init_failed;
3516 			adapter->state = __IDPF_INIT_SW;
3517 			break;
3518 		default:
3519 			dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
3520 				adapter->state);
3521 			err = -EINVAL;
3522 			goto init_failed;
3523 		}
3524 		break;
3525 restart:
3526 		/* Give enough time before proceeding further with
3527 		 * state machine
3528 		 */
3529 		msleep(task_delay);
3530 	}
3531 
3532 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
3533 		err = idpf_send_get_lan_memory_regions(adapter);
3534 		if (err) {
3535 			dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
3536 				err);
3537 			return -EINVAL;
3538 		}
3539 	} else {
3540 		/* Fallback to mapping the remaining regions of the entire BAR */
3541 		err = idpf_calc_remaining_mmio_regs(adapter);
3542 		if (err) {
3543 			dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
3544 				err);
3545 			return -ENOMEM;
3546 		}
3547 	}
3548 
3549 	err = idpf_map_lan_mmio_regs(adapter);
3550 	if (err) {
3551 		dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
3552 			err);
3553 		return -ENOMEM;
3554 	}
3555 
3556 	pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
3557 	num_max_vports = idpf_get_max_vports(adapter);
3558 	adapter->max_vports = num_max_vports;
3559 	adapter->vports = kzalloc_objs(*adapter->vports, num_max_vports);
3560 	if (!adapter->vports)
3561 		return -ENOMEM;
3562 
3563 	if (!adapter->netdevs) {
3564 		adapter->netdevs = kzalloc_objs(struct net_device *,
3565 						num_max_vports);
3566 		if (!adapter->netdevs) {
3567 			err = -ENOMEM;
3568 			goto err_netdev_alloc;
3569 		}
3570 	}
3571 
3572 	err = idpf_vport_params_buf_alloc(adapter);
3573 	if (err) {
3574 		dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
3575 			err);
3576 		goto err_netdev_alloc;
3577 	}
3578 
3579 	/* Start the mailbox task before requesting vectors. This will ensure
3580 	 * vector information response from mailbox is handled
3581 	 */
3582 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
3583 
3584 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
3585 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3586 
3587 	err = idpf_intr_req(adapter);
3588 	if (err) {
3589 		dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3590 			err);
3591 		goto err_intr_req;
3592 	}
3593 
3594 	err = idpf_send_get_rx_ptype_msg(adapter);
3595 	if (err) {
3596 		dev_err(&adapter->pdev->dev, "failed to get RX ptypes: %d\n",
3597 			err);
3598 		goto intr_rel;
3599 	}
3600 
3601 	err = idpf_ptp_init(adapter);
3602 	if (err)
3603 		pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
3604 			ERR_PTR(err));
3605 
3606 	idpf_init_avail_queues(adapter);
3607 
3608 	/* Skew the delay for init tasks for each function based on fn number
3609 	 * to prevent every function from making the same call simultaneously.
3610 	 */
3611 	queue_delayed_work(adapter->init_wq, &adapter->init_task,
3612 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3613 
3614 	set_bit(IDPF_VC_CORE_INIT, adapter->flags);
3615 
3616 	return 0;
3617 
3618 intr_rel:
3619 	idpf_intr_rel(adapter);
3620 err_intr_req:
3621 	cancel_delayed_work_sync(&adapter->serv_task);
3622 	cancel_delayed_work_sync(&adapter->mbx_task);
3623 	idpf_vport_params_buf_rel(adapter);
3624 err_netdev_alloc:
3625 	kfree(adapter->vports);
3626 	adapter->vports = NULL;
3627 	return err;
3628 
3629 init_failed:
3630 	/* Don't retry if we're trying to go down, just bail. */
3631 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3632 		return err;
3633 
3634 	if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3635 		dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3636 
3637 		return -EFAULT;
3638 	}
3639 	/* If it reached here, it is possible that mailbox queue initialization
3640 	 * register writes might not have taken effect. Retry to initialize
3641 	 * the mailbox again
3642 	 */
3643 	adapter->state = __IDPF_VER_CHECK;
3644 	if (adapter->vcxn_mngr)
3645 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3646 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3647 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3648 			   msecs_to_jiffies(task_delay));
3649 
3650 	return -EAGAIN;
3651 }
3652 
3653 /**
3654  * idpf_vc_core_deinit - Device deinit routine
3655  * @adapter: Driver specific private structure
3656  *
3657  */
idpf_vc_core_deinit(struct idpf_adapter * adapter)3658 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3659 {
3660 	struct idpf_hw *hw = &adapter->hw;
3661 	bool remove_in_prog;
3662 
3663 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
3664 		return;
3665 
3666 	/* Avoid transaction timeouts when called during reset */
3667 	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
3668 	if (!remove_in_prog)
3669 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3670 
3671 	idpf_ptp_release(adapter);
3672 	idpf_deinit_task(adapter);
3673 	idpf_idc_deinit_core_aux_device(adapter);
3674 	idpf_rel_rx_pt_lkup(adapter);
3675 	idpf_intr_rel(adapter);
3676 
3677 	if (remove_in_prog)
3678 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3679 
3680 	cancel_delayed_work_sync(&adapter->serv_task);
3681 	cancel_delayed_work_sync(&adapter->mbx_task);
3682 
3683 	idpf_vport_params_buf_rel(adapter);
3684 
3685 	kfree(hw->lan_regs);
3686 	hw->lan_regs = NULL;
3687 
3688 	kfree(adapter->vports);
3689 	adapter->vports = NULL;
3690 
3691 	clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
3692 }
3693 
3694 /**
3695  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3696  * @vport: virtual port data struct
3697  * @rsrc: pointer to queue and vector resources
3698  *
3699  * This function requests the vector information required for the vport and
3700  * stores the vector indexes received from the 'global vector distribution'
3701  * in the vport's queue vectors array.
3702  *
3703  * Return: 0 on success, error on failure
3704  */
idpf_vport_alloc_vec_indexes(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)3705 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
3706 				 struct idpf_q_vec_rsrc *rsrc)
3707 {
3708 	struct idpf_vector_info vec_info;
3709 	int num_alloc_vecs;
3710 	u32 req;
3711 
3712 	vec_info.num_curr_vecs = rsrc->num_q_vectors;
3713 	if (vec_info.num_curr_vecs)
3714 		vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
3715 
3716 	/* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
3717 	req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) +
3718 	      IDPF_RESERVED_VECS;
3719 	vec_info.num_req_vecs = req;
3720 
3721 	vec_info.default_vport = vport->default_vport;
3722 	vec_info.index = vport->idx;
3723 
3724 	num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3725 						     rsrc->q_vector_idxs,
3726 						     &vec_info);
3727 	if (num_alloc_vecs <= 0) {
3728 		dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3729 			num_alloc_vecs);
3730 		return -EINVAL;
3731 	}
3732 
3733 	rsrc->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
3734 
3735 	return 0;
3736 }
3737 
3738 /**
3739  * idpf_vport_init - Initialize virtual port
3740  * @vport: virtual port to be initialized
3741  * @max_q: vport max queue info
3742  *
3743  * Will initialize vport with the info received through MB earlier
3744  *
3745  * Return: 0 on success, negative on failure.
3746  */
idpf_vport_init(struct idpf_vport * vport,struct idpf_vport_max_q * max_q)3747 int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3748 {
3749 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
3750 	struct idpf_adapter *adapter = vport->adapter;
3751 	struct virtchnl2_create_vport *vport_msg;
3752 	struct idpf_vport_config *vport_config;
3753 	u16 tx_itr[] = {2, 8, 64, 128, 256};
3754 	u16 rx_itr[] = {2, 8, 32, 96, 128};
3755 	struct idpf_rss_data *rss_data;
3756 	u16 idx = vport->idx;
3757 	int err;
3758 
3759 	vport_config = adapter->vport_config[idx];
3760 	rss_data = &vport_config->user_config.rss_data;
3761 	vport_msg = adapter->vport_params_recvd[idx];
3762 
3763 	err = idpf_vport_init_queue_reg_chunks(vport_config,
3764 					       &vport_msg->chunks);
3765 	if (err)
3766 		return err;
3767 
3768 	vport_config->max_q.max_txq = max_q->max_txq;
3769 	vport_config->max_q.max_rxq = max_q->max_rxq;
3770 	vport_config->max_q.max_complq = max_q->max_complq;
3771 	vport_config->max_q.max_bufq = max_q->max_bufq;
3772 
3773 	rsrc->txq_model = le16_to_cpu(vport_msg->txq_model);
3774 	rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3775 	vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3776 	vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3777 
3778 	rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3779 				       le16_to_cpu(vport_msg->rss_key_size));
3780 	rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3781 
3782 	ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3783 	vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
3784 
3785 	/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3786 	memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3787 	memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3788 
3789 	idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
3790 
3791 	idpf_vport_init_num_qs(vport, vport_msg, rsrc);
3792 	idpf_vport_calc_num_q_desc(vport, rsrc);
3793 	idpf_vport_calc_num_q_groups(rsrc);
3794 	idpf_vport_alloc_vec_indexes(vport, rsrc);
3795 
3796 	vport->crc_enable = adapter->crc_enable;
3797 
3798 	if (!(vport_msg->vport_flags &
3799 	      cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
3800 		return 0;
3801 
3802 	err = idpf_ptp_get_vport_tstamps_caps(vport);
3803 	if (err) {
3804 		/* Do not error on timestamp failure */
3805 		pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
3806 		return 0;
3807 	}
3808 
3809 	INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
3810 
3811 	return 0;
3812 }
3813 
3814 /**
3815  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3816  * @adapter: adapter structure to get the mailbox vector id
3817  * @vecids: Array of vector ids
3818  * @num_vecids: number of vector ids
3819  * @chunks: vector ids received over mailbox
3820  *
3821  * Will initialize the mailbox vector id which is received from the
3822  * get capabilities and data queue vector ids with ids received as
3823  * mailbox parameters.
3824  * Returns number of ids filled
3825  */
idpf_get_vec_ids(struct idpf_adapter * adapter,u16 * vecids,int num_vecids,struct virtchnl2_vector_chunks * chunks)3826 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3827 		     u16 *vecids, int num_vecids,
3828 		     struct virtchnl2_vector_chunks *chunks)
3829 {
3830 	u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3831 	int num_vecid_filled = 0;
3832 	int i, j;
3833 
3834 	vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3835 	num_vecid_filled++;
3836 
3837 	for (j = 0; j < num_chunks; j++) {
3838 		struct virtchnl2_vector_chunk *chunk;
3839 		u16 start_vecid, num_vec;
3840 
3841 		chunk = &chunks->vchunks[j];
3842 		num_vec = le16_to_cpu(chunk->num_vectors);
3843 		start_vecid = le16_to_cpu(chunk->start_vector_id);
3844 
3845 		for (i = 0; i < num_vec; i++) {
3846 			if ((num_vecid_filled + i) < num_vecids) {
3847 				vecids[num_vecid_filled + i] = start_vecid;
3848 				start_vecid++;
3849 			} else {
3850 				break;
3851 			}
3852 		}
3853 		num_vecid_filled = num_vecid_filled + i;
3854 	}
3855 
3856 	return num_vecid_filled;
3857 }
3858 
3859 /**
3860  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3861  * @qids: Array of queue ids
3862  * @num_qids: number of queue ids
3863  * @q_type: queue model
3864  * @chunks: queue ids received over mailbox
3865  *
3866  * Will initialize all queue ids with ids received as mailbox parameters
3867  * Returns number of ids filled
3868  */
idpf_vport_get_queue_ids(u32 * qids,int num_qids,u16 q_type,struct idpf_queue_id_reg_info * chunks)3869 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3870 				    struct idpf_queue_id_reg_info *chunks)
3871 {
3872 	u16 num_chunks = chunks->num_chunks;
3873 	u32 num_q_id_filled = 0, i;
3874 	u32 start_q_id, num_q;
3875 
3876 	while (num_chunks--) {
3877 		struct idpf_queue_id_reg_chunk *chunk;
3878 
3879 		chunk = &chunks->queue_chunks[num_chunks];
3880 		if (chunk->type != q_type)
3881 			continue;
3882 
3883 		num_q = chunk->num_queues;
3884 		start_q_id = chunk->start_queue_id;
3885 
3886 		for (i = 0; i < num_q; i++) {
3887 			if ((num_q_id_filled + i) < num_qids) {
3888 				qids[num_q_id_filled + i] = start_q_id;
3889 				start_q_id++;
3890 			} else {
3891 				break;
3892 			}
3893 		}
3894 		num_q_id_filled = num_q_id_filled + i;
3895 	}
3896 
3897 	return num_q_id_filled;
3898 }
3899 
3900 /**
3901  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3902  * @vport: virtual port for which the queues ids are initialized
3903  * @rsrc: pointer to queue and vector resources
3904  * @qids: queue ids
3905  * @num_qids: number of queue ids
3906  * @q_type: type of queue
3907  *
3908  * Will initialize all queue ids with ids received as mailbox
3909  * parameters. Returns number of queue ids initialized.
3910  */
__idpf_vport_queue_ids_init(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc,const u32 * qids,int num_qids,u32 q_type)3911 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3912 				       struct idpf_q_vec_rsrc *rsrc,
3913 				       const u32 *qids,
3914 				       int num_qids,
3915 				       u32 q_type)
3916 {
3917 	int i, j, k = 0;
3918 
3919 	switch (q_type) {
3920 	case VIRTCHNL2_QUEUE_TYPE_TX:
3921 		for (i = 0; i < rsrc->num_txq_grp; i++) {
3922 			struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
3923 
3924 			for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
3925 				tx_qgrp->txqs[j]->q_id = qids[k];
3926 		}
3927 		break;
3928 	case VIRTCHNL2_QUEUE_TYPE_RX:
3929 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
3930 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
3931 			u16 num_rxq;
3932 
3933 			if (idpf_is_queue_model_split(rsrc->rxq_model))
3934 				num_rxq = rx_qgrp->splitq.num_rxq_sets;
3935 			else
3936 				num_rxq = rx_qgrp->singleq.num_rxq;
3937 
3938 			for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3939 				struct idpf_rx_queue *q;
3940 
3941 				if (idpf_is_queue_model_split(rsrc->rxq_model))
3942 					q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3943 				else
3944 					q = rx_qgrp->singleq.rxqs[j];
3945 				q->q_id = qids[k];
3946 			}
3947 		}
3948 		break;
3949 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3950 		for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) {
3951 			struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
3952 
3953 			tx_qgrp->complq->q_id = qids[k];
3954 		}
3955 		break;
3956 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3957 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
3958 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
3959 			u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
3960 
3961 			for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3962 				struct idpf_buf_queue *q;
3963 
3964 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3965 				q->q_id = qids[k];
3966 			}
3967 		}
3968 		break;
3969 	default:
3970 		break;
3971 	}
3972 
3973 	return k;
3974 }
3975 
3976 /**
3977  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3978  * @vport: virtual port for which the queues ids are initialized
3979  * @rsrc: pointer to queue and vector resources
3980  * @chunks: queue ids received over mailbox
3981  *
3982  * Will initialize all queue ids with ids received as mailbox parameters.
3983  *
3984  * Return: 0 on success, negative if all the queues are not initialized.
3985  */
idpf_vport_queue_ids_init(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc,struct idpf_queue_id_reg_info * chunks)3986 int idpf_vport_queue_ids_init(struct idpf_vport *vport,
3987 			      struct idpf_q_vec_rsrc *rsrc,
3988 			      struct idpf_queue_id_reg_info *chunks)
3989 {
3990 	int num_ids, err = 0;
3991 	u16 q_type;
3992 	u32 *qids;
3993 
3994 	qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
3995 	if (!qids)
3996 		return -ENOMEM;
3997 
3998 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3999 					   VIRTCHNL2_QUEUE_TYPE_TX,
4000 					   chunks);
4001 	if (num_ids < rsrc->num_txq) {
4002 		err = -EINVAL;
4003 		goto mem_rel;
4004 	}
4005 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
4006 					      VIRTCHNL2_QUEUE_TYPE_TX);
4007 	if (num_ids < rsrc->num_txq) {
4008 		err = -EINVAL;
4009 		goto mem_rel;
4010 	}
4011 
4012 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
4013 					   VIRTCHNL2_QUEUE_TYPE_RX,
4014 					   chunks);
4015 	if (num_ids < rsrc->num_rxq) {
4016 		err = -EINVAL;
4017 		goto mem_rel;
4018 	}
4019 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
4020 					      VIRTCHNL2_QUEUE_TYPE_RX);
4021 	if (num_ids < rsrc->num_rxq) {
4022 		err = -EINVAL;
4023 		goto mem_rel;
4024 	}
4025 
4026 	if (!idpf_is_queue_model_split(rsrc->txq_model))
4027 		goto check_rxq;
4028 
4029 	q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
4030 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
4031 	if (num_ids < rsrc->num_complq) {
4032 		err = -EINVAL;
4033 		goto mem_rel;
4034 	}
4035 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
4036 					      num_ids, q_type);
4037 	if (num_ids < rsrc->num_complq) {
4038 		err = -EINVAL;
4039 		goto mem_rel;
4040 	}
4041 
4042 check_rxq:
4043 	if (!idpf_is_queue_model_split(rsrc->rxq_model))
4044 		goto mem_rel;
4045 
4046 	q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
4047 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
4048 	if (num_ids < rsrc->num_bufq) {
4049 		err = -EINVAL;
4050 		goto mem_rel;
4051 	}
4052 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
4053 					      num_ids, q_type);
4054 	if (num_ids < rsrc->num_bufq)
4055 		err = -EINVAL;
4056 
4057 mem_rel:
4058 	kfree(qids);
4059 
4060 	return err;
4061 }
4062 
4063 /**
4064  * idpf_vport_adjust_qs - Adjust to new requested queues
4065  * @vport: virtual port data struct
4066  * @rsrc: pointer to queue and vector resources
4067  *
4068  * Renegotiate queues.  Returns 0 on success, negative on failure.
4069  */
idpf_vport_adjust_qs(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4070 int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
4071 {
4072 	struct virtchnl2_create_vport vport_msg;
4073 	int err;
4074 
4075 	vport_msg.txq_model = cpu_to_le16(rsrc->txq_model);
4076 	vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model);
4077 	err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
4078 				       NULL);
4079 	if (err)
4080 		return err;
4081 
4082 	idpf_vport_init_num_qs(vport, &vport_msg, rsrc);
4083 	idpf_vport_calc_num_q_groups(rsrc);
4084 
4085 	return 0;
4086 }
4087 
4088 /**
4089  * idpf_is_capability_ena - Default implementation of capability checking
4090  * @adapter: Private data struct
4091  * @all: all or one flag
4092  * @field: caps field to check for flags
4093  * @flag: flag to check
4094  *
4095  * Return true if all capabilities are supported, false otherwise
4096  */
idpf_is_capability_ena(struct idpf_adapter * adapter,bool all,enum idpf_cap_field field,u64 flag)4097 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
4098 			    enum idpf_cap_field field, u64 flag)
4099 {
4100 	u8 *caps = (u8 *)&adapter->caps;
4101 	u32 *cap_field;
4102 
4103 	if (!caps)
4104 		return false;
4105 
4106 	if (field == IDPF_BASE_CAPS)
4107 		return false;
4108 
4109 	cap_field = (u32 *)(caps + field);
4110 
4111 	if (all)
4112 		return (*cap_field & flag) == flag;
4113 	else
4114 		return !!(*cap_field & flag);
4115 }
4116 
4117 /**
4118  * idpf_vport_is_cap_ena - Check if vport capability is enabled
4119  * @vport: Private data struct
4120  * @flag: flag(s) to check
4121  *
4122  * Return: true if the capability is supported, false otherwise
4123  */
idpf_vport_is_cap_ena(struct idpf_vport * vport,u16 flag)4124 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
4125 {
4126 	struct virtchnl2_create_vport *vport_msg;
4127 
4128 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4129 
4130 	return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
4131 }
4132 
4133 /**
4134  * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
4135  * @vport: Private data struct
4136  * @flow_type: flow type to check (from ethtool.h)
4137  *
4138  * Return: true if sideband filters are allowed for @flow_type, false otherwise
4139  */
idpf_sideband_flow_type_ena(struct idpf_vport * vport,u32 flow_type)4140 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
4141 {
4142 	struct virtchnl2_create_vport *vport_msg;
4143 	__le64 caps;
4144 
4145 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4146 	caps = vport_msg->sideband_flow_caps;
4147 
4148 	switch (flow_type) {
4149 	case TCP_V4_FLOW:
4150 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
4151 	case UDP_V4_FLOW:
4152 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
4153 	default:
4154 		return false;
4155 	}
4156 }
4157 
4158 /**
4159  * idpf_sideband_action_ena - Check if steering is enabled for action
4160  * @vport: Private data struct
4161  * @fsp: flow spec
4162  *
4163  * Return: true if sideband filters are allowed for @fsp, false otherwise
4164  */
idpf_sideband_action_ena(struct idpf_vport * vport,struct ethtool_rx_flow_spec * fsp)4165 bool idpf_sideband_action_ena(struct idpf_vport *vport,
4166 			      struct ethtool_rx_flow_spec *fsp)
4167 {
4168 	struct virtchnl2_create_vport *vport_msg;
4169 	unsigned int supp_actions;
4170 
4171 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4172 	supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
4173 
4174 	/* Actions Drop/Wake are not supported */
4175 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
4176 	    fsp->ring_cookie == RX_CLS_FLOW_WAKE)
4177 		return false;
4178 
4179 	return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
4180 }
4181 
idpf_fsteer_max_rules(struct idpf_vport * vport)4182 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
4183 {
4184 	struct virtchnl2_create_vport *vport_msg;
4185 
4186 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4187 	return le32_to_cpu(vport_msg->flow_steer_max_rules);
4188 }
4189 
4190 /**
4191  * idpf_get_vport_id: Get vport id
4192  * @vport: virtual port structure
4193  *
4194  * Return vport id from the adapter persistent data
4195  */
idpf_get_vport_id(struct idpf_vport * vport)4196 u32 idpf_get_vport_id(struct idpf_vport *vport)
4197 {
4198 	struct virtchnl2_create_vport *vport_msg;
4199 
4200 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4201 
4202 	return le32_to_cpu(vport_msg->vport_id);
4203 }
4204 
idpf_set_mac_type(const u8 * default_mac_addr,struct virtchnl2_mac_addr * mac_addr)4205 static void idpf_set_mac_type(const u8 *default_mac_addr,
4206 			      struct virtchnl2_mac_addr *mac_addr)
4207 {
4208 	bool is_primary;
4209 
4210 	is_primary = ether_addr_equal(default_mac_addr, mac_addr->addr);
4211 	mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
4212 				      VIRTCHNL2_MAC_ADDR_EXTRA;
4213 }
4214 
4215 /**
4216  * idpf_mac_filter_async_handler - Async callback for mac filters
4217  * @adapter: private data struct
4218  * @xn: transaction for message
4219  * @ctlq_msg: received message
4220  *
4221  * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
4222  * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
4223  * situation to deal with errors returned on the reply. The best we can
4224  * ultimately do is remove it from our list of mac filters and report the
4225  * error.
4226  */
idpf_mac_filter_async_handler(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)4227 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
4228 					 struct idpf_vc_xn *xn,
4229 					 const struct idpf_ctlq_msg *ctlq_msg)
4230 {
4231 	struct virtchnl2_mac_addr_list *ma_list;
4232 	struct idpf_vport_config *vport_config;
4233 	struct virtchnl2_mac_addr *mac_addr;
4234 	struct idpf_mac_filter *f, *tmp;
4235 	struct list_head *ma_list_head;
4236 	struct idpf_vport *vport;
4237 	u16 num_entries;
4238 	int i;
4239 
4240 	/* if success we're done, we're only here if something bad happened */
4241 	if (!ctlq_msg->cookie.mbx.chnl_retval)
4242 		return 0;
4243 
4244 	/* make sure at least struct is there */
4245 	if (xn->reply_sz < sizeof(*ma_list))
4246 		goto invalid_payload;
4247 
4248 	ma_list = ctlq_msg->ctx.indirect.payload->va;
4249 	mac_addr = ma_list->mac_addr_list;
4250 	num_entries = le16_to_cpu(ma_list->num_mac_addr);
4251 	/* we should have received a buffer at least this big */
4252 	if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
4253 		goto invalid_payload;
4254 
4255 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
4256 	if (!vport)
4257 		goto invalid_payload;
4258 
4259 	vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
4260 	ma_list_head = &vport_config->user_config.mac_filter_list;
4261 
4262 	/* We can't do much to reconcile bad filters at this point, however we
4263 	 * should at least remove them from our list one way or the other so we
4264 	 * have some idea what good filters we have.
4265 	 */
4266 	spin_lock_bh(&vport_config->mac_filter_list_lock);
4267 	list_for_each_entry_safe(f, tmp, ma_list_head, list)
4268 		for (i = 0; i < num_entries; i++)
4269 			if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
4270 				list_del(&f->list);
4271 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
4272 	dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
4273 			    xn->vc_op);
4274 
4275 	return 0;
4276 
4277 invalid_payload:
4278 	dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
4279 			    xn->vc_op, xn->reply_sz);
4280 
4281 	return -EINVAL;
4282 }
4283 
4284 /**
4285  * idpf_add_del_mac_filters - Add/del mac filters
4286  * @adapter: adapter pointer used to send virtchnl message
4287  * @vport_config: persistent vport structure to get the MAC filter list
4288  * @default_mac_addr: default MAC address to compare with
4289  * @vport_id: vport identifier used while preparing the virtchnl message
4290  * @add: Add or delete flag
4291  * @async: Don't wait for return message
4292  *
4293  * Return: 0 on success, error on failure.
4294  **/
idpf_add_del_mac_filters(struct idpf_adapter * adapter,struct idpf_vport_config * vport_config,const u8 * default_mac_addr,u32 vport_id,bool add,bool async)4295 int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
4296 			     struct idpf_vport_config *vport_config,
4297 			     const u8 *default_mac_addr, u32 vport_id,
4298 			     bool add, bool async)
4299 {
4300 	struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
4301 	struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
4302 	struct idpf_vc_xn_params xn_params = {};
4303 	u32 num_msgs, total_filters = 0;
4304 	struct idpf_mac_filter *f;
4305 	ssize_t reply_sz;
4306 	int i = 0, k;
4307 
4308 	xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
4309 				VIRTCHNL2_OP_DEL_MAC_ADDR;
4310 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4311 	xn_params.async = async;
4312 	xn_params.async_handler = idpf_mac_filter_async_handler;
4313 
4314 	spin_lock_bh(&vport_config->mac_filter_list_lock);
4315 
4316 	/* Find the number of newly added filters */
4317 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
4318 			    list) {
4319 		if (add && f->add)
4320 			total_filters++;
4321 		else if (!add && f->remove)
4322 			total_filters++;
4323 	}
4324 
4325 	if (!total_filters) {
4326 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
4327 
4328 		return 0;
4329 	}
4330 
4331 	/* Fill all the new filters into virtchannel message */
4332 	mac_addr = kzalloc_objs(struct virtchnl2_mac_addr, total_filters,
4333 				GFP_ATOMIC);
4334 	if (!mac_addr) {
4335 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
4336 
4337 		return -ENOMEM;
4338 	}
4339 
4340 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
4341 			    list) {
4342 		if (add && f->add) {
4343 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
4344 			idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
4345 			i++;
4346 			f->add = false;
4347 			if (i == total_filters)
4348 				break;
4349 		}
4350 		if (!add && f->remove) {
4351 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
4352 			idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
4353 			i++;
4354 			f->remove = false;
4355 			if (i == total_filters)
4356 				break;
4357 		}
4358 	}
4359 
4360 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
4361 
4362 	/* Chunk up the filters into multiple messages to avoid
4363 	 * sending a control queue message buffer that is too large
4364 	 */
4365 	num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
4366 
4367 	for (i = 0, k = 0; i < num_msgs; i++) {
4368 		u32 entries_size, buf_size, num_entries;
4369 
4370 		num_entries = min_t(u32, total_filters,
4371 				    IDPF_NUM_FILTERS_PER_MSG);
4372 		entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
4373 		buf_size = struct_size(ma_list, mac_addr_list, num_entries);
4374 
4375 		if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
4376 			kfree(ma_list);
4377 			ma_list = kzalloc(buf_size, GFP_ATOMIC);
4378 			if (!ma_list)
4379 				return -ENOMEM;
4380 		} else {
4381 			memset(ma_list, 0, buf_size);
4382 		}
4383 
4384 		ma_list->vport_id = cpu_to_le32(vport_id);
4385 		ma_list->num_mac_addr = cpu_to_le16(num_entries);
4386 		memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
4387 
4388 		xn_params.send_buf.iov_base = ma_list;
4389 		xn_params.send_buf.iov_len = buf_size;
4390 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4391 		if (reply_sz < 0)
4392 			return reply_sz;
4393 
4394 		k += num_entries;
4395 		total_filters -= num_entries;
4396 	}
4397 
4398 	return 0;
4399 }
4400 
4401 /**
4402  * idpf_set_promiscuous - set promiscuous and send message to mailbox
4403  * @adapter: Driver specific private structure
4404  * @config_data: Vport specific config data
4405  * @vport_id: Vport identifier
4406  *
4407  * Request to enable promiscuous mode for the vport. Message is sent
4408  * asynchronously and won't wait for response.  Returns 0 on success, negative
4409  * on failure;
4410  */
idpf_set_promiscuous(struct idpf_adapter * adapter,struct idpf_vport_user_config_data * config_data,u32 vport_id)4411 int idpf_set_promiscuous(struct idpf_adapter *adapter,
4412 			 struct idpf_vport_user_config_data *config_data,
4413 			 u32 vport_id)
4414 {
4415 	struct idpf_vc_xn_params xn_params = {};
4416 	struct virtchnl2_promisc_info vpi;
4417 	ssize_t reply_sz;
4418 	u16 flags = 0;
4419 
4420 	if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
4421 		flags |= VIRTCHNL2_UNICAST_PROMISC;
4422 	if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
4423 		flags |= VIRTCHNL2_MULTICAST_PROMISC;
4424 
4425 	vpi.vport_id = cpu_to_le32(vport_id);
4426 	vpi.flags = cpu_to_le16(flags);
4427 
4428 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
4429 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4430 	xn_params.send_buf.iov_base = &vpi;
4431 	xn_params.send_buf.iov_len = sizeof(vpi);
4432 	/* setting promiscuous is only ever done asynchronously */
4433 	xn_params.async = true;
4434 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4435 
4436 	return reply_sz < 0 ? reply_sz : 0;
4437 }
4438 
4439 /**
4440  * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
4441  * @cdev_info: IDC core device info pointer
4442  * @send_msg: message to send
4443  * @msg_size: size of message to send
4444  * @recv_msg: message to populate on reception of response
4445  * @recv_len: length of message copied into recv_msg or 0 on error
4446  *
4447  * Return: 0 on success or error code on failure.
4448  */
idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info * cdev_info,u8 * send_msg,u16 msg_size,u8 * recv_msg,u16 * recv_len)4449 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
4450 			       u8 *send_msg, u16 msg_size,
4451 			       u8 *recv_msg, u16 *recv_len)
4452 {
4453 	struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
4454 	struct idpf_vc_xn_params xn_params = { };
4455 	ssize_t reply_sz;
4456 	u16 recv_size;
4457 
4458 	if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
4459 		return -EINVAL;
4460 
4461 	recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
4462 	*recv_len = 0;
4463 	xn_params.vc_op = VIRTCHNL2_OP_RDMA;
4464 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4465 	xn_params.send_buf.iov_base = send_msg;
4466 	xn_params.send_buf.iov_len = msg_size;
4467 	xn_params.recv_buf.iov_base = recv_msg;
4468 	xn_params.recv_buf.iov_len = recv_size;
4469 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4470 	if (reply_sz < 0)
4471 		return reply_sz;
4472 	*recv_len = reply_sz;
4473 
4474 	return 0;
4475 }
4476 EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
4477