xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/eq.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1652e3581SLeon Romanovsky // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2e126ba97SEli Cohen /*
3652e3581SLeon Romanovsky  * Copyright (c) 2013-2021, Mellanox Technologies inc.  All rights reserved.
4e126ba97SEli Cohen  */
5e126ba97SEli Cohen 
6e126ba97SEli Cohen #include <linux/interrupt.h>
70f597ed4SSaeed Mahameed #include <linux/notifier.h>
8e126ba97SEli Cohen #include <linux/mlx5/driver.h>
9bf3e4d38SBodong Wang #include <linux/mlx5/vport.h>
107701707cSSaeed Mahameed #include <linux/mlx5/eq.h>
111ef903bfSDaniel Jurgens #ifdef CONFIG_RFS_ACCEL
121ef903bfSDaniel Jurgens #include <linux/cpu_rmap.h>
131ef903bfSDaniel Jurgens #endif
14e126ba97SEli Cohen #include "mlx5_core.h"
15f2f3df55SSaeed Mahameed #include "lib/eq.h"
16e29341fbSIlan Tayari #include "fpga/core.h"
17073bb189SSaeed Mahameed #include "eswitch.h"
186dbc80caSMoshe Shemesh #include "lib/clock.h"
19c71ad41cSFeras Daoud #include "diag/fw_tracer.h"
203b43190bSShay Drory #include "mlx5_irq.h"
213354822cSEli Cohen #include "pci_irq.h"
220844fa5fSShay Drory #include "devlink.h"
238c582ddfSLeon Romanovsky #include "en_accel/ipsec.h"
24e126ba97SEli Cohen 
25e126ba97SEli Cohen enum {
26e126ba97SEli Cohen 	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
27e126ba97SEli Cohen };
28e126ba97SEli Cohen 
29e126ba97SEli Cohen enum {
30e126ba97SEli Cohen 	MLX5_EQ_STATE_ARMED		= 0x9,
31e126ba97SEli Cohen 	MLX5_EQ_STATE_FIRED		= 0xa,
32e126ba97SEli Cohen 	MLX5_EQ_STATE_ALWAYS_ARMED	= 0xb,
33e126ba97SEli Cohen };
34e126ba97SEli Cohen 
35e126ba97SEli Cohen enum {
36e126ba97SEli Cohen 	MLX5_EQ_DOORBEL_OFFSET	= 0x40,
37e126ba97SEli Cohen };
38e126ba97SEli Cohen 
39081cc2d7SYuval Avnery /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
40081cc2d7SYuval Avnery  * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
41081cc2d7SYuval Avnery  * used to set the EQ size, budget must be smaller than the EQ size.
42081cc2d7SYuval Avnery  */
43081cc2d7SYuval Avnery enum {
44081cc2d7SYuval Avnery 	MLX5_EQ_POLLING_BUDGET	= 128,
45081cc2d7SYuval Avnery };
46081cc2d7SYuval Avnery 
47081cc2d7SYuval Avnery static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
48081cc2d7SYuval Avnery 
49f2f3df55SSaeed Mahameed struct mlx5_eq_table {
50273c697fSMaher Sanalla 	struct xarray           comp_eqs;
51ca390799SYuval Avnery 	struct mlx5_eq_async    pages_eq;
52ca390799SYuval Avnery 	struct mlx5_eq_async    cmd_eq;
53ca390799SYuval Avnery 	struct mlx5_eq_async    async_eq;
540f597ed4SSaeed Mahameed 
550f597ed4SSaeed Mahameed 	struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
5616d76083SSaeed Mahameed 
572742bc90SSaeed Mahameed 	/* Since CQ DB is stored in async_eq */
582742bc90SSaeed Mahameed 	struct mlx5_nb          cq_err_nb;
592742bc90SSaeed Mahameed 
6016d76083SSaeed Mahameed 	struct mutex            lock; /* sync async eqs creations */
61f14c1a14SMaher Sanalla 	struct mutex            comp_lock; /* sync comp eqs creations */
6218cf3d31SMaher Sanalla 	int			curr_comp_eqs;
6318cf3d31SMaher Sanalla 	int			max_comp_eqs;
64561aa15aSYuval Avnery 	struct mlx5_irq_table	*irq_table;
65c8a0245cSMaher Sanalla 	struct xarray           comp_irqs;
6679b60ca8SShay Drory 	struct mlx5_irq         *ctrl_irq;
672d74524cSShay Drory 	struct cpu_rmap		*rmap;
68a1772de7SMaher Sanalla 	struct cpumask          used_cpus;
69f2f3df55SSaeed Mahameed };
70f2f3df55SSaeed Mahameed 
71e126ba97SEli Cohen #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \
72e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \
73e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \
74e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \
75e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
76e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
77e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
78e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
79e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \
80e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
81e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \
82e126ba97SEli Cohen 			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
83e126ba97SEli Cohen 
mlx5_cmd_destroy_eq(struct mlx5_core_dev * dev,u8 eqn)84e126ba97SEli Cohen static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
85e126ba97SEli Cohen {
8649d7fcd1SLeon Romanovsky 	u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
87e126ba97SEli Cohen 
8873b626c1SSaeed Mahameed 	MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
8973b626c1SSaeed Mahameed 	MLX5_SET(destroy_eq_in, in, eq_number, eqn);
9049d7fcd1SLeon Romanovsky 	return mlx5_cmd_exec_in(dev, destroy_eq, in);
91e126ba97SEli Cohen }
92e126ba97SEli Cohen 
933ac7afdbSSaeed Mahameed /* caller must eventually call mlx5_cq_put on the returned cq */
mlx5_eq_cq_get(struct mlx5_eq * eq,u32 cqn)943ac7afdbSSaeed Mahameed static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
953ac7afdbSSaeed Mahameed {
963ac7afdbSSaeed Mahameed 	struct mlx5_cq_table *table = &eq->cq_table;
973ac7afdbSSaeed Mahameed 	struct mlx5_core_cq *cq = NULL;
983ac7afdbSSaeed Mahameed 
991fbf1252SCong Wang 	rcu_read_lock();
1003ac7afdbSSaeed Mahameed 	cq = radix_tree_lookup(&table->tree, cqn);
1013ac7afdbSSaeed Mahameed 	if (likely(cq))
1023ac7afdbSSaeed Mahameed 		mlx5_cq_hold(cq);
1031fbf1252SCong Wang 	rcu_read_unlock();
1043ac7afdbSSaeed Mahameed 
1053ac7afdbSSaeed Mahameed 	return cq;
1063ac7afdbSSaeed Mahameed }
1073ac7afdbSSaeed Mahameed 
mlx5_eq_comp_int(struct notifier_block * nb,__always_unused unsigned long action,__always_unused void * data)108ca390799SYuval Avnery static int mlx5_eq_comp_int(struct notifier_block *nb,
109ca390799SYuval Avnery 			    __always_unused unsigned long action,
110ca390799SYuval Avnery 			    __always_unused void *data)
11116d76083SSaeed Mahameed {
112ca390799SYuval Avnery 	struct mlx5_eq_comp *eq_comp =
113ca390799SYuval Avnery 		container_of(nb, struct mlx5_eq_comp, irq_nb);
114ca390799SYuval Avnery 	struct mlx5_eq *eq = &eq_comp->core;
11516d76083SSaeed Mahameed 	struct mlx5_eqe *eqe;
116081cc2d7SYuval Avnery 	int num_eqes = 0;
11716d76083SSaeed Mahameed 
11877693e6cSCaleb Sander Mateos 	while ((eqe = next_eqe_sw(eq))) {
11916d76083SSaeed Mahameed 		struct mlx5_core_cq *cq;
1200ac20437SCaleb Sander Mateos 		u32 cqn;
121081cc2d7SYuval Avnery 
12216d76083SSaeed Mahameed 		/* Make sure we read EQ entry contents after we've
12316d76083SSaeed Mahameed 		 * checked the ownership bit.
12416d76083SSaeed Mahameed 		 */
12516d76083SSaeed Mahameed 		dma_rmb();
12616d76083SSaeed Mahameed 		/* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
12716d76083SSaeed Mahameed 		cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
12816d76083SSaeed Mahameed 
12916d76083SSaeed Mahameed 		cq = mlx5_eq_cq_get(eq, cqn);
13016d76083SSaeed Mahameed 		if (likely(cq)) {
13116d76083SSaeed Mahameed 			++cq->arm_sn;
1324e0e2ea1SYishai Hadas 			cq->comp(cq, eqe);
13316d76083SSaeed Mahameed 			mlx5_cq_put(cq);
13416d76083SSaeed Mahameed 		} else {
1357396ae3dSParav Pandit 			dev_dbg_ratelimited(eq->dev->device,
1367396ae3dSParav Pandit 					    "Completion event for bogus CQ 0x%x\n", cqn);
13716d76083SSaeed Mahameed 		}
13816d76083SSaeed Mahameed 
13916d76083SSaeed Mahameed 		++eq->cons_index;
14016d76083SSaeed Mahameed 
14177693e6cSCaleb Sander Mateos 		if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
14277693e6cSCaleb Sander Mateos 			break;
14377693e6cSCaleb Sander Mateos 	}
1447a545077SShay Drory 
14516d76083SSaeed Mahameed 	eq_update_ci(eq, 1);
14616d76083SSaeed Mahameed 
147ca390799SYuval Avnery 	return 0;
14816d76083SSaeed Mahameed }
14916d76083SSaeed Mahameed 
15016d76083SSaeed Mahameed /* Some architectures don't latch interrupts when they are disabled, so using
15116d76083SSaeed Mahameed  * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
15216d76083SSaeed Mahameed  * avoid losing them.  It is not recommended to use it, unless this is the last
15316d76083SSaeed Mahameed  * resort.
15416d76083SSaeed Mahameed  */
mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp * eq)15516d76083SSaeed Mahameed u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
15616d76083SSaeed Mahameed {
15716d76083SSaeed Mahameed 	u32 count_eqe;
15816d76083SSaeed Mahameed 
15916d76083SSaeed Mahameed 	disable_irq(eq->core.irqn);
16016d76083SSaeed Mahameed 	count_eqe = eq->core.cons_index;
161ca390799SYuval Avnery 	mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
16216d76083SSaeed Mahameed 	count_eqe = eq->core.cons_index - count_eqe;
16316d76083SSaeed Mahameed 	enable_irq(eq->core.irqn);
16416d76083SSaeed Mahameed 
16516d76083SSaeed Mahameed 	return count_eqe;
16616d76083SSaeed Mahameed }
16716d76083SSaeed Mahameed 
mlx5_eq_async_int_lock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)16851443685SSebastian Andrzej Siewior static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
16951443685SSebastian Andrzej Siewior 				   unsigned long *flags)
1701d5558b1SEran Ben Elisha 	__acquires(&eq->lock)
1711d5558b1SEran Ben Elisha {
17251443685SSebastian Andrzej Siewior 	if (!recovery)
1731d5558b1SEran Ben Elisha 		spin_lock(&eq->lock);
1741d5558b1SEran Ben Elisha 	else
1751d5558b1SEran Ben Elisha 		spin_lock_irqsave(&eq->lock, *flags);
1761d5558b1SEran Ben Elisha }
1771d5558b1SEran Ben Elisha 
mlx5_eq_async_int_unlock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)17851443685SSebastian Andrzej Siewior static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
17951443685SSebastian Andrzej Siewior 				     unsigned long *flags)
1801d5558b1SEran Ben Elisha 	__releases(&eq->lock)
1811d5558b1SEran Ben Elisha {
18251443685SSebastian Andrzej Siewior 	if (!recovery)
1831d5558b1SEran Ben Elisha 		spin_unlock(&eq->lock);
1841d5558b1SEran Ben Elisha 	else
1851d5558b1SEran Ben Elisha 		spin_unlock_irqrestore(&eq->lock, *flags);
1861d5558b1SEran Ben Elisha }
1871d5558b1SEran Ben Elisha 
1881d5558b1SEran Ben Elisha enum async_eq_nb_action {
1891d5558b1SEran Ben Elisha 	ASYNC_EQ_IRQ_HANDLER = 0,
1901d5558b1SEran Ben Elisha 	ASYNC_EQ_RECOVER = 1,
1911d5558b1SEran Ben Elisha };
1921d5558b1SEran Ben Elisha 
mlx5_eq_async_int(struct notifier_block * nb,unsigned long action,void * data)193ca390799SYuval Avnery static int mlx5_eq_async_int(struct notifier_block *nb,
194ca390799SYuval Avnery 			     unsigned long action, void *data)
195d9aaed83SArtemy Kovalyov {
196ca390799SYuval Avnery 	struct mlx5_eq_async *eq_async =
197ca390799SYuval Avnery 		container_of(nb, struct mlx5_eq_async, irq_nb);
198ca390799SYuval Avnery 	struct mlx5_eq *eq = &eq_async->core;
1990f597ed4SSaeed Mahameed 	struct mlx5_eq_table *eqt;
2000f597ed4SSaeed Mahameed 	struct mlx5_core_dev *dev;
201d9aaed83SArtemy Kovalyov 	struct mlx5_eqe *eqe;
2021d5558b1SEran Ben Elisha 	unsigned long flags;
203081cc2d7SYuval Avnery 	int num_eqes = 0;
20451443685SSebastian Andrzej Siewior 	bool recovery;
205e126ba97SEli Cohen 
2060f597ed4SSaeed Mahameed 	dev = eq->dev;
2070f597ed4SSaeed Mahameed 	eqt = dev->priv.eq_table;
2080f597ed4SSaeed Mahameed 
20951443685SSebastian Andrzej Siewior 	recovery = action == ASYNC_EQ_RECOVER;
21051443685SSebastian Andrzej Siewior 	mlx5_eq_async_int_lock(eq_async, recovery, &flags);
2111d5558b1SEran Ben Elisha 
21277693e6cSCaleb Sander Mateos 	while ((eqe = next_eqe_sw(eq))) {
213e126ba97SEli Cohen 		/*
214e126ba97SEli Cohen 		 * Make sure we read EQ entry contents after we've
215e126ba97SEli Cohen 		 * checked the ownership bit.
216e126ba97SEli Cohen 		 */
21712b3375fSAlexander Duyck 		dma_rmb();
218e126ba97SEli Cohen 
2190f597ed4SSaeed Mahameed 		atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
2200f597ed4SSaeed Mahameed 		atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
2210f597ed4SSaeed Mahameed 
222e126ba97SEli Cohen 		++eq->cons_index;
223e126ba97SEli Cohen 
22477693e6cSCaleb Sander Mateos 		if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
22577693e6cSCaleb Sander Mateos 			break;
22677693e6cSCaleb Sander Mateos 	}
227e126ba97SEli Cohen 
2287a545077SShay Drory 	eq_update_ci(eq, 1);
22951443685SSebastian Andrzej Siewior 	mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
230e126ba97SEli Cohen 
23151443685SSebastian Andrzej Siewior 	return unlikely(recovery) ? num_eqes : 0;
2321d5558b1SEran Ben Elisha }
2331d5558b1SEran Ben Elisha 
mlx5_cmd_eq_recover(struct mlx5_core_dev * dev)2341d5558b1SEran Ben Elisha void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
2351d5558b1SEran Ben Elisha {
2361d5558b1SEran Ben Elisha 	struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
2371d5558b1SEran Ben Elisha 	int eqes;
2381d5558b1SEran Ben Elisha 
2391d5558b1SEran Ben Elisha 	eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
2401d5558b1SEran Ben Elisha 	if (eqes)
2411d5558b1SEran Ben Elisha 		mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
242e126ba97SEli Cohen }
243e126ba97SEli Cohen 
init_eq_buf(struct mlx5_eq * eq)244e126ba97SEli Cohen static void init_eq_buf(struct mlx5_eq *eq)
245e126ba97SEli Cohen {
246e126ba97SEli Cohen 	struct mlx5_eqe *eqe;
247e126ba97SEli Cohen 	int i;
248e126ba97SEli Cohen 
24926bf3090STariq Toukan 	for (i = 0; i < eq_get_size(eq); i++) {
250e126ba97SEli Cohen 		eqe = get_eqe(eq, i);
251e126ba97SEli Cohen 		eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
252e126ba97SEli Cohen 	}
253e126ba97SEli Cohen }
254e126ba97SEli Cohen 
255f2f3df55SSaeed Mahameed static int
create_map_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)25624163189SYuval Avnery create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
2577701707cSSaeed Mahameed 	      struct mlx5_eq_param *param)
258e126ba97SEli Cohen {
25926bf3090STariq Toukan 	u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
26002d92f79SSaeed Mahameed 	struct mlx5_cq_table *cq_table = &eq->cq_table;
26173b626c1SSaeed Mahameed 	u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
26226bf3090STariq Toukan 	u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
263db058a18SSaeed Mahameed 	struct mlx5_priv *priv = &dev->priv;
26473b626c1SSaeed Mahameed 	__be64 *pas;
26579b60ca8SShay Drory 	u16 vecidx;
26673b626c1SSaeed Mahameed 	void *eqc;
267e126ba97SEli Cohen 	int inlen;
26873b626c1SSaeed Mahameed 	u32 *in;
26973b626c1SSaeed Mahameed 	int err;
270b9a7ba55SYishai Hadas 	int i;
271e126ba97SEli Cohen 
27202d92f79SSaeed Mahameed 	/* Init CQ table */
27302d92f79SSaeed Mahameed 	memset(cq_table, 0, sizeof(*cq_table));
27402d92f79SSaeed Mahameed 	spin_lock_init(&cq_table->lock);
27502d92f79SSaeed Mahameed 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
27602d92f79SSaeed Mahameed 
277a31208b1SMajd Dibbiny 	eq->cons_index = 0;
27826bf3090STariq Toukan 
27926bf3090STariq Toukan 	err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
28026bf3090STariq Toukan 				       &eq->frag_buf, dev->priv.numa_node);
281e126ba97SEli Cohen 	if (err)
282e126ba97SEli Cohen 		return err;
283e126ba97SEli Cohen 
28426bf3090STariq Toukan 	mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
285e126ba97SEli Cohen 	init_eq_buf(eq);
286e126ba97SEli Cohen 
28779b60ca8SShay Drory 	eq->irq = param->irq;
288c36326d3SShay Drory 	vecidx = mlx5_irq_get_index(eq->irq);
28979b60ca8SShay Drory 
29073b626c1SSaeed Mahameed 	inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
29126bf3090STariq Toukan 		MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
29273b626c1SSaeed Mahameed 
2931b9a07eeSLeon Romanovsky 	in = kvzalloc(inlen, GFP_KERNEL);
294e126ba97SEli Cohen 	if (!in) {
295e126ba97SEli Cohen 		err = -ENOMEM;
29679b60ca8SShay Drory 		goto err_buf;
297e126ba97SEli Cohen 	}
298e126ba97SEli Cohen 
29973b626c1SSaeed Mahameed 	pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
30026bf3090STariq Toukan 	mlx5_fill_page_frag_array(&eq->frag_buf, pas);
301e126ba97SEli Cohen 
30273b626c1SSaeed Mahameed 	MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
303b9a7ba55SYishai Hadas 	if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
304c191f934SYishai Hadas 		MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
305c191f934SYishai Hadas 
306b9a7ba55SYishai Hadas 	for (i = 0; i < 4; i++)
307b9a7ba55SYishai Hadas 		MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
308b9a7ba55SYishai Hadas 				 param->mask[i]);
309e126ba97SEli Cohen 
31073b626c1SSaeed Mahameed 	eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
31126bf3090STariq Toukan 	MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
31201187175SEli Cohen 	MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
31373b626c1SSaeed Mahameed 	MLX5_SET(eqc, eqc, intr, vecidx);
31473b626c1SSaeed Mahameed 	MLX5_SET(eqc, eqc, log_page_size,
31526bf3090STariq Toukan 		 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
31673b626c1SSaeed Mahameed 
31773b626c1SSaeed Mahameed 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
318e126ba97SEli Cohen 	if (err)
319e126ba97SEli Cohen 		goto err_in;
320e126ba97SEli Cohen 
3217701707cSSaeed Mahameed 	eq->vecidx = vecidx;
32273b626c1SSaeed Mahameed 	eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
32378249c42SSagi Grimberg 	eq->irqn = pci_irq_vector(dev->pdev, vecidx);
324a158906dSEli Cohen 	eq->dev = dev;
32501187175SEli Cohen 	eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
326e126ba97SEli Cohen 
327ca390799SYuval Avnery 	err = mlx5_debug_eq_add(dev, eq);
328ca390799SYuval Avnery 	if (err)
3291f8a7beeSYuval Avnery 		goto err_eq;
330e126ba97SEli Cohen 
331479163f4SAl Viro 	kvfree(in);
332e126ba97SEli Cohen 	return 0;
333e126ba97SEli Cohen 
334e126ba97SEli Cohen err_eq:
335e126ba97SEli Cohen 	mlx5_cmd_destroy_eq(dev, eq->eqn);
336e126ba97SEli Cohen 
337e126ba97SEli Cohen err_in:
338479163f4SAl Viro 	kvfree(in);
339e126ba97SEli Cohen 
340e126ba97SEli Cohen err_buf:
34126bf3090STariq Toukan 	mlx5_frag_buf_free(dev, &eq->frag_buf);
342e126ba97SEli Cohen 	return err;
343e126ba97SEli Cohen }
344e126ba97SEli Cohen 
3451f8a7beeSYuval Avnery /**
3461f8a7beeSYuval Avnery  * mlx5_eq_enable - Enable EQ for receiving EQEs
347866ff8f2SSaeed Mahameed  * @dev : Device which owns the eq
348866ff8f2SSaeed Mahameed  * @eq  : EQ to enable
349866ff8f2SSaeed Mahameed  * @nb  : Notifier call block
350866ff8f2SSaeed Mahameed  *
351866ff8f2SSaeed Mahameed  * Must be called after EQ is created in device.
352866ff8f2SSaeed Mahameed  *
353866ff8f2SSaeed Mahameed  * @return: 0 if no error
3541f8a7beeSYuval Avnery  */
mlx5_eq_enable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)3551f8a7beeSYuval Avnery int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
3561f8a7beeSYuval Avnery 		   struct notifier_block *nb)
3571f8a7beeSYuval Avnery {
3581f8a7beeSYuval Avnery 	int err;
3591f8a7beeSYuval Avnery 
3603b43190bSShay Drory 	err = mlx5_irq_attach_nb(eq->irq, nb);
3611f8a7beeSYuval Avnery 	if (!err)
3621f8a7beeSYuval Avnery 		eq_update_ci(eq, 1);
3631f8a7beeSYuval Avnery 
3641f8a7beeSYuval Avnery 	return err;
3651f8a7beeSYuval Avnery }
3661f8a7beeSYuval Avnery EXPORT_SYMBOL(mlx5_eq_enable);
3671f8a7beeSYuval Avnery 
3681f8a7beeSYuval Avnery /**
369866ff8f2SSaeed Mahameed  * mlx5_eq_disable - Disable EQ for receiving EQEs
370866ff8f2SSaeed Mahameed  * @dev : Device which owns the eq
371866ff8f2SSaeed Mahameed  * @eq  : EQ to disable
372866ff8f2SSaeed Mahameed  * @nb  : Notifier call block
373866ff8f2SSaeed Mahameed  *
374866ff8f2SSaeed Mahameed  * Must be called before EQ is destroyed.
3751f8a7beeSYuval Avnery  */
mlx5_eq_disable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)3761f8a7beeSYuval Avnery void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
3771f8a7beeSYuval Avnery 		     struct notifier_block *nb)
3781f8a7beeSYuval Avnery {
3793b43190bSShay Drory 	mlx5_irq_detach_nb(eq->irq, nb);
3801f8a7beeSYuval Avnery }
3811f8a7beeSYuval Avnery EXPORT_SYMBOL(mlx5_eq_disable);
3821f8a7beeSYuval Avnery 
destroy_unmap_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)3837701707cSSaeed Mahameed static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
384e126ba97SEli Cohen {
385e126ba97SEli Cohen 	int err;
386e126ba97SEli Cohen 
387e126ba97SEli Cohen 	mlx5_debug_eq_remove(dev, eq);
3887701707cSSaeed Mahameed 
389e126ba97SEli Cohen 	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
390e126ba97SEli Cohen 	if (err)
391e126ba97SEli Cohen 		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
392e126ba97SEli Cohen 			       eq->eqn);
393d9aaed83SArtemy Kovalyov 
39426bf3090STariq Toukan 	mlx5_frag_buf_free(dev, &eq->frag_buf);
395e126ba97SEli Cohen 	return err;
396e126ba97SEli Cohen }
397e126ba97SEli Cohen 
mlx5_eq_add_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)398d5c07157SSaeed Mahameed int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
399d5c07157SSaeed Mahameed {
400d5c07157SSaeed Mahameed 	struct mlx5_cq_table *table = &eq->cq_table;
401d5c07157SSaeed Mahameed 	int err;
402d5c07157SSaeed Mahameed 
4031fbf1252SCong Wang 	spin_lock(&table->lock);
404d5c07157SSaeed Mahameed 	err = radix_tree_insert(&table->tree, cq->cqn, cq);
4051fbf1252SCong Wang 	spin_unlock(&table->lock);
406d5c07157SSaeed Mahameed 
407d5c07157SSaeed Mahameed 	return err;
408d5c07157SSaeed Mahameed }
409d5c07157SSaeed Mahameed 
mlx5_eq_del_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)4101d49ce1eSYishai Hadas void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
411d5c07157SSaeed Mahameed {
412d5c07157SSaeed Mahameed 	struct mlx5_cq_table *table = &eq->cq_table;
413d5c07157SSaeed Mahameed 	struct mlx5_core_cq *tmp;
414d5c07157SSaeed Mahameed 
4151fbf1252SCong Wang 	spin_lock(&table->lock);
416d5c07157SSaeed Mahameed 	tmp = radix_tree_delete(&table->tree, cq->cqn);
4171fbf1252SCong Wang 	spin_unlock(&table->lock);
418d5c07157SSaeed Mahameed 
419d5c07157SSaeed Mahameed 	if (!tmp) {
4201d49ce1eSYishai Hadas 		mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
4211d49ce1eSYishai Hadas 			      eq->eqn, cq->cqn);
4221d49ce1eSYishai Hadas 		return;
423d5c07157SSaeed Mahameed 	}
424d5c07157SSaeed Mahameed 
4251d49ce1eSYishai Hadas 	if (tmp != cq)
4261d49ce1eSYishai Hadas 		mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
4271d49ce1eSYishai Hadas 			      eq->eqn, cq->cqn);
428d5c07157SSaeed Mahameed }
429d5c07157SSaeed Mahameed 
mlx5_eq_table_init(struct mlx5_core_dev * dev)430f2f3df55SSaeed Mahameed int mlx5_eq_table_init(struct mlx5_core_dev *dev)
431e126ba97SEli Cohen {
432f2f3df55SSaeed Mahameed 	struct mlx5_eq_table *eq_table;
4339f818c8aSGreg Kroah-Hartman 	int i;
434e126ba97SEli Cohen 
435e894246dSTariq Toukan 	eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
436e894246dSTariq Toukan 				 dev->priv.numa_node);
437f2f3df55SSaeed Mahameed 	if (!eq_table)
438f2f3df55SSaeed Mahameed 		return -ENOMEM;
439f2f3df55SSaeed Mahameed 
440f2f3df55SSaeed Mahameed 	dev->priv.eq_table = eq_table;
441f2f3df55SSaeed Mahameed 
4429f818c8aSGreg Kroah-Hartman 	mlx5_eq_debugfs_init(dev);
443e126ba97SEli Cohen 
44416d76083SSaeed Mahameed 	mutex_init(&eq_table->lock);
4450f597ed4SSaeed Mahameed 	for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
4460f597ed4SSaeed Mahameed 		ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
44716d76083SSaeed Mahameed 
4481958fc2fSParav Pandit 	eq_table->irq_table = mlx5_irq_table_get(dev);
449a1772de7SMaher Sanalla 	cpumask_clear(&eq_table->used_cpus);
450273c697fSMaher Sanalla 	xa_init(&eq_table->comp_eqs);
451c8a0245cSMaher Sanalla 	xa_init(&eq_table->comp_irqs);
452f14c1a14SMaher Sanalla 	mutex_init(&eq_table->comp_lock);
45318cf3d31SMaher Sanalla 	eq_table->curr_comp_eqs = 0;
45416d76083SSaeed Mahameed 	return 0;
455e126ba97SEli Cohen }
456e126ba97SEli Cohen 
mlx5_eq_table_cleanup(struct mlx5_core_dev * dev)457f2f3df55SSaeed Mahameed void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
458e126ba97SEli Cohen {
459c8a0245cSMaher Sanalla 	struct mlx5_eq_table *table = dev->priv.eq_table;
460c8a0245cSMaher Sanalla 
461e126ba97SEli Cohen 	mlx5_eq_debugfs_cleanup(dev);
462c8a0245cSMaher Sanalla 	xa_destroy(&table->comp_irqs);
463273c697fSMaher Sanalla 	xa_destroy(&table->comp_eqs);
464c8a0245cSMaher Sanalla 	kvfree(table);
465e126ba97SEli Cohen }
466e126ba97SEli Cohen 
467ca828cb4SSaeed Mahameed /* Async EQs */
468ca828cb4SSaeed Mahameed 
create_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)46924163189SYuval Avnery static int create_async_eq(struct mlx5_core_dev *dev,
4707701707cSSaeed Mahameed 			   struct mlx5_eq *eq, struct mlx5_eq_param *param)
47116d76083SSaeed Mahameed {
47216d76083SSaeed Mahameed 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
47316d76083SSaeed Mahameed 	int err;
47416d76083SSaeed Mahameed 
47516d76083SSaeed Mahameed 	mutex_lock(&eq_table->lock);
47624163189SYuval Avnery 	err = create_map_eq(dev, eq, param);
47716d76083SSaeed Mahameed 	mutex_unlock(&eq_table->lock);
47816d76083SSaeed Mahameed 	return err;
47916d76083SSaeed Mahameed }
48016d76083SSaeed Mahameed 
destroy_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)4817701707cSSaeed Mahameed static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
48216d76083SSaeed Mahameed {
48316d76083SSaeed Mahameed 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
48416d76083SSaeed Mahameed 	int err;
48516d76083SSaeed Mahameed 
48616d76083SSaeed Mahameed 	mutex_lock(&eq_table->lock);
4877701707cSSaeed Mahameed 	err = destroy_unmap_eq(dev, eq);
48816d76083SSaeed Mahameed 	mutex_unlock(&eq_table->lock);
48916d76083SSaeed Mahameed 	return err;
49016d76083SSaeed Mahameed }
49116d76083SSaeed Mahameed 
cq_err_event_notifier(struct notifier_block * nb,unsigned long type,void * data)4922742bc90SSaeed Mahameed static int cq_err_event_notifier(struct notifier_block *nb,
4932742bc90SSaeed Mahameed 				 unsigned long type, void *data)
4942742bc90SSaeed Mahameed {
4952742bc90SSaeed Mahameed 	struct mlx5_eq_table *eqt;
4962742bc90SSaeed Mahameed 	struct mlx5_core_cq *cq;
4972742bc90SSaeed Mahameed 	struct mlx5_eqe *eqe;
4982742bc90SSaeed Mahameed 	struct mlx5_eq *eq;
4992742bc90SSaeed Mahameed 	u32 cqn;
5002742bc90SSaeed Mahameed 
5012742bc90SSaeed Mahameed 	/* type == MLX5_EVENT_TYPE_CQ_ERROR */
5022742bc90SSaeed Mahameed 
5032742bc90SSaeed Mahameed 	eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
504ca390799SYuval Avnery 	eq  = &eqt->async_eq.core;
5052742bc90SSaeed Mahameed 	eqe = data;
5062742bc90SSaeed Mahameed 
5072742bc90SSaeed Mahameed 	cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
5082742bc90SSaeed Mahameed 	mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
5092742bc90SSaeed Mahameed 		       cqn, eqe->data.cq_err.syndrome);
5102742bc90SSaeed Mahameed 
5112742bc90SSaeed Mahameed 	cq = mlx5_eq_cq_get(eq, cqn);
5122742bc90SSaeed Mahameed 	if (unlikely(!cq)) {
5132742bc90SSaeed Mahameed 		mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
5142742bc90SSaeed Mahameed 		return NOTIFY_OK;
5152742bc90SSaeed Mahameed 	}
5162742bc90SSaeed Mahameed 
51770a43d3fSYishai Hadas 	if (cq->event)
5182742bc90SSaeed Mahameed 		cq->event(cq, type);
5192742bc90SSaeed Mahameed 
5202742bc90SSaeed Mahameed 	mlx5_cq_put(cq);
5212742bc90SSaeed Mahameed 
5222742bc90SSaeed Mahameed 	return NOTIFY_OK;
5232742bc90SSaeed Mahameed }
5242742bc90SSaeed Mahameed 
gather_user_async_events(struct mlx5_core_dev * dev,u64 mask[4])525b9a7ba55SYishai Hadas static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
526b9a7ba55SYishai Hadas {
527b9a7ba55SYishai Hadas 	__be64 *user_unaffiliated_events;
528b9a7ba55SYishai Hadas 	__be64 *user_affiliated_events;
529b9a7ba55SYishai Hadas 	int i;
530b9a7ba55SYishai Hadas 
531b9a7ba55SYishai Hadas 	user_affiliated_events =
532b9a7ba55SYishai Hadas 		MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
533b9a7ba55SYishai Hadas 	user_unaffiliated_events =
534b9a7ba55SYishai Hadas 		MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
535b9a7ba55SYishai Hadas 
536b9a7ba55SYishai Hadas 	for (i = 0; i < 4; i++)
537b9a7ba55SYishai Hadas 		mask[i] |= be64_to_cpu(user_affiliated_events[i] |
538b9a7ba55SYishai Hadas 				       user_unaffiliated_events[i]);
539b9a7ba55SYishai Hadas }
540b9a7ba55SYishai Hadas 
gather_async_events_mask(struct mlx5_core_dev * dev,u64 mask[4])541b9a7ba55SYishai Hadas static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
542e126ba97SEli Cohen {
5436887a825SEugenia Emantayev 	u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
544e126ba97SEli Cohen 
545a9f7705fSSaeed Mahameed 	if (MLX5_VPORT_MANAGER(dev))
546073bb189SSaeed Mahameed 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
547073bb189SSaeed Mahameed 
54872c6f524SAya Levin 	if (MLX5_CAP_GEN(dev, general_notification_event))
549246ac981SMaor Gottlieb 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
550246ac981SMaor Gottlieb 
551d4eb4cd7SHuy Nguyen 	if (MLX5_CAP_GEN(dev, port_module_event))
552d4eb4cd7SHuy Nguyen 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
553d4eb4cd7SHuy Nguyen 	else
554d4eb4cd7SHuy Nguyen 		mlx5_core_dbg(dev, "port_module_event is not set\n");
555d4eb4cd7SHuy Nguyen 
556fa367688SEugenia Emantayev 	if (MLX5_PPS_CAP(dev))
557f9a1ef72SEugenia Emantayev 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
558f9a1ef72SEugenia Emantayev 
559e29341fbSIlan Tayari 	if (MLX5_CAP_GEN(dev, fpga))
5601f0cf89bSIlan Tayari 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
5611f0cf89bSIlan Tayari 				    (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
56257cda166SMoni Shoua 	if (MLX5_CAP_GEN_MAX(dev, dct))
56357cda166SMoni Shoua 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
56457cda166SMoni Shoua 
5651865ea9aSIlan Tayari 	if (MLX5_CAP_GEN(dev, temp_warn_event))
5661865ea9aSIlan Tayari 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
5671865ea9aSIlan Tayari 
568c71ad41cSFeras Daoud 	if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
569c71ad41cSFeras Daoud 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
570c71ad41cSFeras Daoud 
571fd4572b3SEyal Davidovich 	if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
572fd4572b3SEyal Davidovich 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
573fd4572b3SEyal Davidovich 
5746706a3b9SVu Pham 	if (mlx5_eswitch_is_funcs_handler(dev))
575cd56f929SVu Pham 		async_event_mask |=
576cd56f929SVu Pham 			(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
5777f0d11c7SBodong Wang 
578f3196bb0SParav Pandit 	if (MLX5_CAP_GEN_MAX(dev, vhca_state))
579f3196bb0SParav Pandit 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
580f3196bb0SParav Pandit 
5814411a6c0SEmeel Hakim 	if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
5824411a6c0SEmeel Hakim 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
5834411a6c0SEmeel Hakim 
5848c582ddfSLeon Romanovsky 	if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
5858c582ddfSLeon Romanovsky 		async_event_mask |=
5868c582ddfSLeon Romanovsky 			(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
5878c582ddfSLeon Romanovsky 
5888890ee6dSDragos Tatulea 	if (mlx5_pcie_cong_event_supported(dev))
5898890ee6dSDragos Tatulea 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
5908890ee6dSDragos Tatulea 
591b9a7ba55SYishai Hadas 	mask[0] = async_event_mask;
592b9a7ba55SYishai Hadas 
593b9a7ba55SYishai Hadas 	if (MLX5_CAP_GEN(dev, event_cap))
594b9a7ba55SYishai Hadas 		gather_user_async_events(dev, mask);
5957701707cSSaeed Mahameed }
5967701707cSSaeed Mahameed 
5973ed87996SParav Pandit static int
setup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,struct mlx5_eq_param * param,const char * name)5983ed87996SParav Pandit setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
5993ed87996SParav Pandit 	       struct mlx5_eq_param *param, const char *name)
6003ed87996SParav Pandit {
6013ed87996SParav Pandit 	int err;
6023ed87996SParav Pandit 
6033ed87996SParav Pandit 	eq->irq_nb.notifier_call = mlx5_eq_async_int;
6041d5558b1SEran Ben Elisha 	spin_lock_init(&eq->lock);
6053ed87996SParav Pandit 
6063ed87996SParav Pandit 	err = create_async_eq(dev, &eq->core, param);
6073ed87996SParav Pandit 	if (err) {
6083ed87996SParav Pandit 		mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
6093ed87996SParav Pandit 		return err;
6103ed87996SParav Pandit 	}
6113ed87996SParav Pandit 	err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
6123ed87996SParav Pandit 	if (err) {
6133ed87996SParav Pandit 		mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
6143ed87996SParav Pandit 		destroy_async_eq(dev, &eq->core);
6153ed87996SParav Pandit 	}
6163ed87996SParav Pandit 	return err;
6173ed87996SParav Pandit }
6183ed87996SParav Pandit 
cleanup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,const char * name)6193ed87996SParav Pandit static void cleanup_async_eq(struct mlx5_core_dev *dev,
6203ed87996SParav Pandit 			     struct mlx5_eq_async *eq, const char *name)
6213ed87996SParav Pandit {
6223ed87996SParav Pandit 	int err;
6233ed87996SParav Pandit 
6243ed87996SParav Pandit 	mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
6253ed87996SParav Pandit 	err = destroy_async_eq(dev, &eq->core);
6263ed87996SParav Pandit 	if (err)
6273ed87996SParav Pandit 		mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
6283ed87996SParav Pandit 			      name, err);
6293ed87996SParav Pandit }
6303ed87996SParav Pandit 
async_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)63157ca7678SShay Drory static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
63257ca7678SShay Drory {
63357ca7678SShay Drory 	struct devlink *devlink = priv_to_devlink(dev);
63457ca7678SShay Drory 	union devlink_param_value val;
63557ca7678SShay Drory 	int err;
63657ca7678SShay Drory 
637075935f0SJiri Pirko 	err = devl_param_driverinit_value_get(devlink,
63857ca7678SShay Drory 					      DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
63957ca7678SShay Drory 					      &val);
64057ca7678SShay Drory 	if (!err)
64157ca7678SShay Drory 		return val.vu32;
64257ca7678SShay Drory 	mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
64357ca7678SShay Drory 	return MLX5_NUM_ASYNC_EQE;
64457ca7678SShay Drory }
645b94616d9SEli Cohen 
create_async_eqs(struct mlx5_core_dev * dev)6467701707cSSaeed Mahameed static int create_async_eqs(struct mlx5_core_dev *dev)
6477701707cSSaeed Mahameed {
6487701707cSSaeed Mahameed 	struct mlx5_eq_table *table = dev->priv.eq_table;
6497701707cSSaeed Mahameed 	struct mlx5_eq_param param = {};
6507701707cSSaeed Mahameed 	int err;
6517701707cSSaeed Mahameed 
65279b60ca8SShay Drory 	/* All the async_eqs are using single IRQ, request one IRQ and share its
65379b60ca8SShay Drory 	 * index among all the async_eqs of this device.
65479b60ca8SShay Drory 	 */
65579b60ca8SShay Drory 	table->ctrl_irq = mlx5_ctrl_irq_request(dev);
65679b60ca8SShay Drory 	if (IS_ERR(table->ctrl_irq))
65779b60ca8SShay Drory 		return PTR_ERR(table->ctrl_irq);
65879b60ca8SShay Drory 
6592742bc90SSaeed Mahameed 	MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
6602742bc90SSaeed Mahameed 	mlx5_eq_notifier_register(dev, &table->cq_err_nb);
6612742bc90SSaeed Mahameed 
6627701707cSSaeed Mahameed 	param = (struct mlx5_eq_param) {
66379b60ca8SShay Drory 		.irq = table->ctrl_irq,
6647701707cSSaeed Mahameed 		.nent = MLX5_NUM_CMD_EQE,
6653ed87996SParav Pandit 		.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
6667701707cSSaeed Mahameed 	};
667d43b7007SEran Ben Elisha 	mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
6683ed87996SParav Pandit 	err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
6693ed87996SParav Pandit 	if (err)
6701f8a7beeSYuval Avnery 		goto err1;
6713ed87996SParav Pandit 
672e126ba97SEli Cohen 	mlx5_cmd_use_events(dev);
673d43b7007SEran Ben Elisha 	mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
674e126ba97SEli Cohen 
6757701707cSSaeed Mahameed 	param = (struct mlx5_eq_param) {
67679b60ca8SShay Drory 		.irq = table->ctrl_irq,
67757ca7678SShay Drory 		.nent = async_eq_depth_devlink_param_get(dev),
6787701707cSSaeed Mahameed 	};
679b9a7ba55SYishai Hadas 
680b9a7ba55SYishai Hadas 	gather_async_events_mask(dev, param.mask);
6813ed87996SParav Pandit 	err = setup_async_eq(dev, &table->async_eq, &param, "async");
6823ed87996SParav Pandit 	if (err)
6831f8a7beeSYuval Avnery 		goto err2;
684e126ba97SEli Cohen 
685c788d79cSJianbo Liu 	/* Skip page eq creation when the device does not request for page requests */
686c788d79cSJianbo Liu 	if (MLX5_CAP_GEN(dev, page_request_disable)) {
687c788d79cSJianbo Liu 		mlx5_core_dbg(dev, "Skip page EQ creation\n");
688c788d79cSJianbo Liu 		return 0;
689c788d79cSJianbo Liu 	}
690c788d79cSJianbo Liu 
6917701707cSSaeed Mahameed 	param = (struct mlx5_eq_param) {
69279b60ca8SShay Drory 		.irq = table->ctrl_irq,
6937701707cSSaeed Mahameed 		.nent = /* TODO: sriov max_vf + */ 1,
6943ed87996SParav Pandit 		.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
6957701707cSSaeed Mahameed 	};
696b9a7ba55SYishai Hadas 
6973ed87996SParav Pandit 	err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
6983ed87996SParav Pandit 	if (err)
6993ed87996SParav Pandit 		goto err3;
700e126ba97SEli Cohen 
7013ed87996SParav Pandit 	return 0;
702e126ba97SEli Cohen 
7031f8a7beeSYuval Avnery err3:
7043ed87996SParav Pandit 	cleanup_async_eq(dev, &table->async_eq, "async");
7051f8a7beeSYuval Avnery err2:
706e126ba97SEli Cohen 	mlx5_cmd_use_polling(dev);
7073ed87996SParav Pandit 	cleanup_async_eq(dev, &table->cmd_eq, "cmd");
7081f8a7beeSYuval Avnery err1:
709d43b7007SEran Ben Elisha 	mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
7102742bc90SSaeed Mahameed 	mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
7110477d516SShay Drory 	mlx5_ctrl_irq_release(dev, table->ctrl_irq);
712e126ba97SEli Cohen 	return err;
713e126ba97SEli Cohen }
714e126ba97SEli Cohen 
destroy_async_eqs(struct mlx5_core_dev * dev)715c8e21b3bSSaeed Mahameed static void destroy_async_eqs(struct mlx5_core_dev *dev)
716e126ba97SEli Cohen {
717f2f3df55SSaeed Mahameed 	struct mlx5_eq_table *table = dev->priv.eq_table;
718e126ba97SEli Cohen 
719c788d79cSJianbo Liu 	if (!MLX5_CAP_GEN(dev, page_request_disable))
7203ed87996SParav Pandit 		cleanup_async_eq(dev, &table->pages_eq, "pages");
7213ed87996SParav Pandit 	cleanup_async_eq(dev, &table->async_eq, "async");
722432161eaSEran Ben Elisha 	mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
723e126ba97SEli Cohen 	mlx5_cmd_use_polling(dev);
7243ed87996SParav Pandit 	cleanup_async_eq(dev, &table->cmd_eq, "cmd");
725432161eaSEran Ben Elisha 	mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
7262742bc90SSaeed Mahameed 	mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
7270477d516SShay Drory 	mlx5_ctrl_irq_release(dev, table->ctrl_irq);
728e126ba97SEli Cohen }
729e126ba97SEli Cohen 
mlx5_get_async_eq(struct mlx5_core_dev * dev)730f2f3df55SSaeed Mahameed struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
731f2f3df55SSaeed Mahameed {
732ca390799SYuval Avnery 	return &dev->priv.eq_table->async_eq.core;
733f2f3df55SSaeed Mahameed }
734f2f3df55SSaeed Mahameed 
mlx5_eq_synchronize_async_irq(struct mlx5_core_dev * dev)73516d76083SSaeed Mahameed void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
73616d76083SSaeed Mahameed {
737ca390799SYuval Avnery 	synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
73816d76083SSaeed Mahameed }
73916d76083SSaeed Mahameed 
mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev * dev)74016d76083SSaeed Mahameed void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
74116d76083SSaeed Mahameed {
742ca390799SYuval Avnery 	synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
74316d76083SSaeed Mahameed }
74416d76083SSaeed Mahameed 
7457701707cSSaeed Mahameed /* Generic EQ API for mlx5_core consumers
7467701707cSSaeed Mahameed  * Needed For RDMA ODP EQ for now
7477701707cSSaeed Mahameed  */
7487701707cSSaeed Mahameed struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev * dev,struct mlx5_eq_param * param)74924163189SYuval Avnery mlx5_eq_create_generic(struct mlx5_core_dev *dev,
7507701707cSSaeed Mahameed 		       struct mlx5_eq_param *param)
7517701707cSSaeed Mahameed {
7527f880719STariq Toukan 	struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
7537f880719STariq Toukan 					   dev->priv.numa_node);
7547701707cSSaeed Mahameed 	int err;
7557701707cSSaeed Mahameed 
7567701707cSSaeed Mahameed 	if (!eq)
7577701707cSSaeed Mahameed 		return ERR_PTR(-ENOMEM);
7587701707cSSaeed Mahameed 
75979b60ca8SShay Drory 	param->irq = dev->priv.eq_table->ctrl_irq;
76024163189SYuval Avnery 	err = create_async_eq(dev, eq, param);
7617701707cSSaeed Mahameed 	if (err) {
7627701707cSSaeed Mahameed 		kvfree(eq);
7637701707cSSaeed Mahameed 		eq = ERR_PTR(err);
7647701707cSSaeed Mahameed 	}
7657701707cSSaeed Mahameed 
7667701707cSSaeed Mahameed 	return eq;
7677701707cSSaeed Mahameed }
7687701707cSSaeed Mahameed EXPORT_SYMBOL(mlx5_eq_create_generic);
7697701707cSSaeed Mahameed 
mlx5_eq_destroy_generic(struct mlx5_core_dev * dev,struct mlx5_eq * eq)7707701707cSSaeed Mahameed int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
7717701707cSSaeed Mahameed {
7727701707cSSaeed Mahameed 	int err;
7737701707cSSaeed Mahameed 
7747701707cSSaeed Mahameed 	if (IS_ERR(eq))
7757701707cSSaeed Mahameed 		return -EINVAL;
7767701707cSSaeed Mahameed 
7777701707cSSaeed Mahameed 	err = destroy_async_eq(dev, eq);
7787701707cSSaeed Mahameed 	if (err)
7797701707cSSaeed Mahameed 		goto out;
7807701707cSSaeed Mahameed 
7817701707cSSaeed Mahameed 	kvfree(eq);
7827701707cSSaeed Mahameed out:
7837701707cSSaeed Mahameed 	return err;
7847701707cSSaeed Mahameed }
7857701707cSSaeed Mahameed EXPORT_SYMBOL(mlx5_eq_destroy_generic);
7867701707cSSaeed Mahameed 
mlx5_eq_get_eqe(struct mlx5_eq * eq,u32 cc)7877701707cSSaeed Mahameed struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
7887701707cSSaeed Mahameed {
7897701707cSSaeed Mahameed 	u32 ci = eq->cons_index + cc;
79026bf3090STariq Toukan 	u32 nent = eq_get_size(eq);
7917701707cSSaeed Mahameed 	struct mlx5_eqe *eqe;
7927701707cSSaeed Mahameed 
79326bf3090STariq Toukan 	eqe = get_eqe(eq, ci & (nent - 1));
79426bf3090STariq Toukan 	eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
7957701707cSSaeed Mahameed 	/* Make sure we read EQ entry contents after we've
7967701707cSSaeed Mahameed 	 * checked the ownership bit.
7977701707cSSaeed Mahameed 	 */
7987701707cSSaeed Mahameed 	if (eqe)
7997701707cSSaeed Mahameed 		dma_rmb();
8007701707cSSaeed Mahameed 
8017701707cSSaeed Mahameed 	return eqe;
8027701707cSSaeed Mahameed }
8037701707cSSaeed Mahameed EXPORT_SYMBOL(mlx5_eq_get_eqe);
8047701707cSSaeed Mahameed 
mlx5_eq_update_ci(struct mlx5_eq * eq,u32 cc,bool arm)8057701707cSSaeed Mahameed void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
8067701707cSSaeed Mahameed {
8077701707cSSaeed Mahameed 	eq->cons_index += cc;
808619e4109SCaleb Sander Mateos 	eq_update_ci(eq, arm);
8097701707cSSaeed Mahameed }
8107701707cSSaeed Mahameed EXPORT_SYMBOL(mlx5_eq_update_ci);
8117701707cSSaeed Mahameed 
comp_irq_release_pci(struct mlx5_core_dev * dev,u16 vecidx)81254b2cf41SMaher Sanalla static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
81379b60ca8SShay Drory {
81479b60ca8SShay Drory 	struct mlx5_eq_table *table = dev->priv.eq_table;
815c8a0245cSMaher Sanalla 	struct mlx5_irq *irq;
81679b60ca8SShay Drory 
81754b2cf41SMaher Sanalla 	irq = xa_load(&table->comp_irqs, vecidx);
81854b2cf41SMaher Sanalla 	if (!irq)
81954b2cf41SMaher Sanalla 		return;
82054b2cf41SMaher Sanalla 
82154b2cf41SMaher Sanalla 	xa_erase(&table->comp_irqs, vecidx);
822c8a0245cSMaher Sanalla 	mlx5_irq_release_vector(irq);
823c8a0245cSMaher Sanalla }
82479b60ca8SShay Drory 
mlx5_cpumask_default_spread(struct mlx5_core_dev * dev,int index)825e5efc231SErwan Velu static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
82679b60ca8SShay Drory {
827e5efc231SErwan Velu 	return cpumask_local_spread(index, dev->priv.numa_node);
828ddd2c79dSMaher Sanalla }
829ddd2c79dSMaher Sanalla 
mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev * dev)83054c52978SMaher Sanalla static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
83154c52978SMaher Sanalla {
83254c52978SMaher Sanalla #ifdef CONFIG_RFS_ACCEL
83354c52978SMaher Sanalla #ifdef CONFIG_MLX5_SF
83454c52978SMaher Sanalla 	if (mlx5_core_is_sf(dev))
83554c52978SMaher Sanalla 		return dev->priv.parent_mdev->priv.eq_table->rmap;
83654c52978SMaher Sanalla #endif
83754c52978SMaher Sanalla 	return dev->priv.eq_table->rmap;
83854c52978SMaher Sanalla #else
83954c52978SMaher Sanalla 	return NULL;
84054c52978SMaher Sanalla #endif
84154c52978SMaher Sanalla }
84254c52978SMaher Sanalla 
comp_irq_request_pci(struct mlx5_core_dev * dev,u16 vecidx)843ddd2c79dSMaher Sanalla static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
844ddd2c79dSMaher Sanalla {
845ddd2c79dSMaher Sanalla 	struct mlx5_eq_table *table = dev->priv.eq_table;
84654c52978SMaher Sanalla 	struct cpu_rmap *rmap;
847ddd2c79dSMaher Sanalla 	struct mlx5_irq *irq;
848ddd2c79dSMaher Sanalla 	int cpu;
849ddd2c79dSMaher Sanalla 
85054c52978SMaher Sanalla 	rmap = mlx5_eq_table_get_pci_rmap(dev);
851e5efc231SErwan Velu 	cpu = mlx5_cpumask_default_spread(dev, vecidx);
85254c52978SMaher Sanalla 	irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
853a1772de7SMaher Sanalla 	if (IS_ERR(irq))
85454b2cf41SMaher Sanalla 		return PTR_ERR(irq);
855a1772de7SMaher Sanalla 
85654b2cf41SMaher Sanalla 	return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
857a1772de7SMaher Sanalla }
858a1772de7SMaher Sanalla 
comp_irq_release_sf(struct mlx5_core_dev * dev,u16 vecidx)85954b2cf41SMaher Sanalla static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
860b48a0f72SEli Cohen {
861b48a0f72SEli Cohen 	struct mlx5_eq_table *table = dev->priv.eq_table;
862c8a0245cSMaher Sanalla 	struct mlx5_irq *irq;
8637d2f74d1SMaher Sanalla 	int cpu;
864b48a0f72SEli Cohen 
86554b2cf41SMaher Sanalla 	irq = xa_load(&table->comp_irqs, vecidx);
86654b2cf41SMaher Sanalla 	if (!irq)
86754b2cf41SMaher Sanalla 		return;
86854b2cf41SMaher Sanalla 
8697d2f74d1SMaher Sanalla 	cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
8707d2f74d1SMaher Sanalla 	cpumask_clear_cpu(cpu, &table->used_cpus);
87154b2cf41SMaher Sanalla 	xa_erase(&table->comp_irqs, vecidx);
872c8a0245cSMaher Sanalla 	mlx5_irq_affinity_irq_release(dev, irq);
873c8a0245cSMaher Sanalla }
874b48a0f72SEli Cohen 
comp_irq_request_sf(struct mlx5_core_dev * dev,u16 vecidx)87554b2cf41SMaher Sanalla static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
876b48a0f72SEli Cohen {
87732d2724dSShay Drory 	struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
878b48a0f72SEli Cohen 	struct mlx5_eq_table *table = dev->priv.eq_table;
879*43350127SZhu Yanjun 	struct irq_affinity_desc *af_desc;
880a1772de7SMaher Sanalla 	struct mlx5_irq *irq;
881b48a0f72SEli Cohen 
88254c52978SMaher Sanalla 	/* In case SF irq pool does not exist, fallback to the PF irqs */
8837d2f74d1SMaher Sanalla 	if (!mlx5_irq_pool_is_sf_pool(pool))
88454c52978SMaher Sanalla 		return comp_irq_request_pci(dev, vecidx);
88554c52978SMaher Sanalla 
886*43350127SZhu Yanjun 	af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
887*43350127SZhu Yanjun 	if (!af_desc)
888*43350127SZhu Yanjun 		return -ENOMEM;
889*43350127SZhu Yanjun 
890*43350127SZhu Yanjun 	af_desc->is_managed = false;
891*43350127SZhu Yanjun 	cpumask_copy(&af_desc->mask, cpu_online_mask);
892*43350127SZhu Yanjun 	cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
893*43350127SZhu Yanjun 	irq = mlx5_irq_affinity_request(dev, pool, af_desc);
894*43350127SZhu Yanjun 	if (IS_ERR(irq)) {
895*43350127SZhu Yanjun 		kvfree(af_desc);
89654b2cf41SMaher Sanalla 		return PTR_ERR(irq);
897*43350127SZhu Yanjun 	}
8987d2f74d1SMaher Sanalla 
8997d2f74d1SMaher Sanalla 	cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
9007d2f74d1SMaher Sanalla 	mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
9017d2f74d1SMaher Sanalla 		      pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
9027d2f74d1SMaher Sanalla 		      cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
9037d2f74d1SMaher Sanalla 		      mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
904a1772de7SMaher Sanalla 
905*43350127SZhu Yanjun 	kvfree(af_desc);
906*43350127SZhu Yanjun 
90754b2cf41SMaher Sanalla 	return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
908a1772de7SMaher Sanalla }
909a1772de7SMaher Sanalla 
comp_irq_release(struct mlx5_core_dev * dev,u16 vecidx)91054b2cf41SMaher Sanalla static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
911b48a0f72SEli Cohen {
91254b2cf41SMaher Sanalla 	mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) :
91354b2cf41SMaher Sanalla 			       comp_irq_release_pci(dev, vecidx);
914b48a0f72SEli Cohen }
915b48a0f72SEli Cohen 
comp_irq_request(struct mlx5_core_dev * dev,u16 vecidx)91654b2cf41SMaher Sanalla static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
917b48a0f72SEli Cohen {
91854b2cf41SMaher Sanalla 	return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) :
91954b2cf41SMaher Sanalla 				      comp_irq_request_pci(dev, vecidx);
92079b60ca8SShay Drory }
92179b60ca8SShay Drory 
9223354822cSEli Cohen #ifdef CONFIG_RFS_ACCEL
alloc_rmap(struct mlx5_core_dev * mdev)9233354822cSEli Cohen static int alloc_rmap(struct mlx5_core_dev *mdev)
9243354822cSEli Cohen {
9253354822cSEli Cohen 	struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
9263354822cSEli Cohen 
9273354822cSEli Cohen 	/* rmap is a mapping between irq number and queue number.
9283354822cSEli Cohen 	 * Each irq can be assigned only to a single rmap.
9293354822cSEli Cohen 	 * Since SFs share IRQs, rmap mapping cannot function correctly
9303354822cSEli Cohen 	 * for irqs that are shared between different core/netdev RX rings.
9313354822cSEli Cohen 	 * Hence we don't allow netdev rmap for SFs.
9323354822cSEli Cohen 	 */
9333354822cSEli Cohen 	if (mlx5_core_is_sf(mdev))
9343354822cSEli Cohen 		return 0;
9353354822cSEli Cohen 
93618cf3d31SMaher Sanalla 	eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs);
9373354822cSEli Cohen 	if (!eq_table->rmap)
9383354822cSEli Cohen 		return -ENOMEM;
9393354822cSEli Cohen 	return 0;
9403354822cSEli Cohen }
9413354822cSEli Cohen 
free_rmap(struct mlx5_core_dev * mdev)9423354822cSEli Cohen static void free_rmap(struct mlx5_core_dev *mdev)
9433354822cSEli Cohen {
9443354822cSEli Cohen 	struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
9453354822cSEli Cohen 
9463354822cSEli Cohen 	if (eq_table->rmap) {
9473354822cSEli Cohen 		free_irq_cpu_rmap(eq_table->rmap);
9483354822cSEli Cohen 		eq_table->rmap = NULL;
9493354822cSEli Cohen 	}
9503354822cSEli Cohen }
9513354822cSEli Cohen #else
alloc_rmap(struct mlx5_core_dev * mdev)9523354822cSEli Cohen static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
free_rmap(struct mlx5_core_dev * mdev)9533354822cSEli Cohen static void free_rmap(struct mlx5_core_dev *mdev) {}
9543354822cSEli Cohen #endif
9553354822cSEli Cohen 
destroy_comp_eq(struct mlx5_core_dev * dev,struct mlx5_eq_comp * eq,u16 vecidx)956e3e56775SMaher Sanalla static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
957ca828cb4SSaeed Mahameed {
958f2f3df55SSaeed Mahameed 	struct mlx5_eq_table *table = dev->priv.eq_table;
959ca828cb4SSaeed Mahameed 
960e3e56775SMaher Sanalla 	xa_erase(&table->comp_eqs, vecidx);
9611f8a7beeSYuval Avnery 	mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
9627701707cSSaeed Mahameed 	if (destroy_unmap_eq(dev, &eq->core))
96316d76083SSaeed Mahameed 		mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
96416d76083SSaeed Mahameed 			       eq->core.eqn);
96516d76083SSaeed Mahameed 	tasklet_disable(&eq->tasklet_ctx.task);
966ca828cb4SSaeed Mahameed 	kfree(eq);
967e3e56775SMaher Sanalla 	comp_irq_release(dev, vecidx);
96818cf3d31SMaher Sanalla 	table->curr_comp_eqs--;
969ca828cb4SSaeed Mahameed }
97054b2cf41SMaher Sanalla 
comp_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)9710844fa5fSShay Drory static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
9720844fa5fSShay Drory {
9730844fa5fSShay Drory 	struct devlink *devlink = priv_to_devlink(dev);
9740844fa5fSShay Drory 	union devlink_param_value val;
9750844fa5fSShay Drory 	int err;
9760844fa5fSShay Drory 
977075935f0SJiri Pirko 	err = devl_param_driverinit_value_get(devlink,
9780844fa5fSShay Drory 					      DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
9790844fa5fSShay Drory 					      &val);
9800844fa5fSShay Drory 	if (!err)
9810844fa5fSShay Drory 		return val.vu32;
9820844fa5fSShay Drory 	mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
9830844fa5fSShay Drory 	return MLX5_COMP_EQ_SIZE;
9840844fa5fSShay Drory }
9850844fa5fSShay Drory 
986f14c1a14SMaher Sanalla /* Must be called with EQ table comp_lock held */
create_comp_eq(struct mlx5_core_dev * dev,u16 vecidx)987e3e56775SMaher Sanalla static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
988ca828cb4SSaeed Mahameed {
989f2f3df55SSaeed Mahameed 	struct mlx5_eq_table *table = dev->priv.eq_table;
990e3e56775SMaher Sanalla 	struct mlx5_eq_param param = {};
99116d76083SSaeed Mahameed 	struct mlx5_eq_comp *eq;
992c8a0245cSMaher Sanalla 	struct mlx5_irq *irq;
993ca828cb4SSaeed Mahameed 	int nent;
994ca828cb4SSaeed Mahameed 	int err;
995ca828cb4SSaeed Mahameed 
996f14c1a14SMaher Sanalla 	lockdep_assert_held(&table->comp_lock);
997f14c1a14SMaher Sanalla 	if (table->curr_comp_eqs == table->max_comp_eqs) {
998f14c1a14SMaher Sanalla 		mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n",
999f14c1a14SMaher Sanalla 			      table->max_comp_eqs);
1000f14c1a14SMaher Sanalla 		return -ENOMEM;
1001f14c1a14SMaher Sanalla 	}
1002f14c1a14SMaher Sanalla 
1003e3e56775SMaher Sanalla 	err = comp_irq_request(dev, vecidx);
10043354822cSEli Cohen 	if (err)
10053354822cSEli Cohen 		return err;
10063354822cSEli Cohen 
10070844fa5fSShay Drory 	nent = comp_eq_depth_devlink_param_get(dev);
10087f880719STariq Toukan 
10097f880719STariq Toukan 	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
1010ca828cb4SSaeed Mahameed 	if (!eq) {
1011ca828cb4SSaeed Mahameed 		err = -ENOMEM;
1012e3e56775SMaher Sanalla 		goto clean_irq;
1013ca828cb4SSaeed Mahameed 	}
1014ca828cb4SSaeed Mahameed 
101516d76083SSaeed Mahameed 	INIT_LIST_HEAD(&eq->tasklet_ctx.list);
101616d76083SSaeed Mahameed 	INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
101716d76083SSaeed Mahameed 	spin_lock_init(&eq->tasklet_ctx.lock);
1018a1be161aSAllen Pais 	tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
101916d76083SSaeed Mahameed 
1020e3e56775SMaher Sanalla 	irq = xa_load(&table->comp_irqs, vecidx);
1021ca390799SYuval Avnery 	eq->irq_nb.notifier_call = mlx5_eq_comp_int;
10227701707cSSaeed Mahameed 	param = (struct mlx5_eq_param) {
1023c8a0245cSMaher Sanalla 		.irq = irq,
10247701707cSSaeed Mahameed 		.nent = nent,
10257701707cSSaeed Mahameed 	};
1026e4e3f24bSLeon Romanovsky 
1027e4e3f24bSLeon Romanovsky 	err = create_map_eq(dev, &eq->core, &param);
1028e4e3f24bSLeon Romanovsky 	if (err)
1029e4e3f24bSLeon Romanovsky 		goto clean_eq;
10301f8a7beeSYuval Avnery 	err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
10311f8a7beeSYuval Avnery 	if (err) {
10321f8a7beeSYuval Avnery 		destroy_unmap_eq(dev, &eq->core);
1033e4e3f24bSLeon Romanovsky 		goto clean_eq;
10341f8a7beeSYuval Avnery 	}
10351f8a7beeSYuval Avnery 
103616d76083SSaeed Mahameed 	mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
1037e3e56775SMaher Sanalla 	err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
1038273c697fSMaher Sanalla 	if (err)
1039273c697fSMaher Sanalla 		goto disable_eq;
1040ca828cb4SSaeed Mahameed 
1041e3e56775SMaher Sanalla 	table->curr_comp_eqs++;
1042f14c1a14SMaher Sanalla 	return eq->core.eqn;
104379b60ca8SShay Drory 
1044273c697fSMaher Sanalla disable_eq:
1045273c697fSMaher Sanalla 	mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
1046e4e3f24bSLeon Romanovsky clean_eq:
1047e4e3f24bSLeon Romanovsky 	kfree(eq);
1048e3e56775SMaher Sanalla clean_irq:
1049e3e56775SMaher Sanalla 	comp_irq_release(dev, vecidx);
1050ca828cb4SSaeed Mahameed 	return err;
1051ca828cb4SSaeed Mahameed }
1052ca828cb4SSaeed Mahameed 
mlx5_comp_eqn_get(struct mlx5_core_dev * dev,u16 vecidx,int * eqn)1053f14c1a14SMaher Sanalla int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
1054ca828cb4SSaeed Mahameed {
1055f2f3df55SSaeed Mahameed 	struct mlx5_eq_table *table = dev->priv.eq_table;
10563ac0b6aaSParav Pandit 	struct mlx5_eq_comp *eq;
1057f14c1a14SMaher Sanalla 	int ret = 0;
1058ca828cb4SSaeed Mahameed 
1059d4f25be2SMaher Sanalla 	if (vecidx >= table->max_comp_eqs) {
1060d4f25be2SMaher Sanalla 		mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
1061d4f25be2SMaher Sanalla 			      vecidx, table->max_comp_eqs);
1062d4f25be2SMaher Sanalla 		return -EINVAL;
1063d4f25be2SMaher Sanalla 	}
1064d4f25be2SMaher Sanalla 
1065f14c1a14SMaher Sanalla 	mutex_lock(&table->comp_lock);
1066f14c1a14SMaher Sanalla 	eq = xa_load(&table->comp_eqs, vecidx);
1067f14c1a14SMaher Sanalla 	if (eq) {
1068563476aeSShay Drory 		*eqn = eq->core.eqn;
1069f14c1a14SMaher Sanalla 		goto out;
1070f14c1a14SMaher Sanalla 	}
1071f14c1a14SMaher Sanalla 
1072f14c1a14SMaher Sanalla 	ret = create_comp_eq(dev, vecidx);
1073f14c1a14SMaher Sanalla 	if (ret < 0) {
1074f14c1a14SMaher Sanalla 		mutex_unlock(&table->comp_lock);
1075f14c1a14SMaher Sanalla 		return ret;
1076f14c1a14SMaher Sanalla 	}
1077f14c1a14SMaher Sanalla 
1078f14c1a14SMaher Sanalla 	*eqn = ret;
1079f14c1a14SMaher Sanalla out:
1080f14c1a14SMaher Sanalla 	mutex_unlock(&table->comp_lock);
1081273c697fSMaher Sanalla 	return 0;
1082ca828cb4SSaeed Mahameed }
1083f14c1a14SMaher Sanalla EXPORT_SYMBOL(mlx5_comp_eqn_get);
1084563476aeSShay Drory 
mlx5_comp_irqn_get(struct mlx5_core_dev * dev,int vector,unsigned int * irqn)1085f14c1a14SMaher Sanalla int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
1086563476aeSShay Drory {
1087f14c1a14SMaher Sanalla 	struct mlx5_eq_table *table = dev->priv.eq_table;
1088f14c1a14SMaher Sanalla 	struct mlx5_eq_comp *eq;
1089f14c1a14SMaher Sanalla 	int eqn;
1090f14c1a14SMaher Sanalla 	int err;
1091ca828cb4SSaeed Mahameed 
1092f14c1a14SMaher Sanalla 	/* Allocate the EQ if not allocated yet */
1093f14c1a14SMaher Sanalla 	err = mlx5_comp_eqn_get(dev, vector, &eqn);
1094f14c1a14SMaher Sanalla 	if (err)
1095f14c1a14SMaher Sanalla 		return err;
1096f14c1a14SMaher Sanalla 
1097f14c1a14SMaher Sanalla 	eq = xa_load(&table->comp_eqs, vector);
1098f14c1a14SMaher Sanalla 	*irqn = eq->core.irqn;
1099f14c1a14SMaher Sanalla 	return 0;
1100563476aeSShay Drory }
1101563476aeSShay Drory 
mlx5_comp_vectors_max(struct mlx5_core_dev * dev)1102674dd4e2SMaher Sanalla unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
1103f2f3df55SSaeed Mahameed {
110418cf3d31SMaher Sanalla 	return dev->priv.eq_table->max_comp_eqs;
1105f2f3df55SSaeed Mahameed }
1106674dd4e2SMaher Sanalla EXPORT_SYMBOL(mlx5_comp_vectors_max);
1107f2f3df55SSaeed Mahameed 
1108f3147015SMaher Sanalla static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev * dev,int vector)1109f2f3df55SSaeed Mahameed mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
1110f2f3df55SSaeed Mahameed {
1111fc63dd2aSShay Drory 	struct mlx5_eq_table *table = dev->priv.eq_table;
11123ac0b6aaSParav Pandit 	struct mlx5_eq_comp *eq;
1113561aa15aSYuval Avnery 
1114273c697fSMaher Sanalla 	eq = xa_load(&table->comp_eqs, vector);
1115273c697fSMaher Sanalla 	if (eq)
111661718206SJakub Kicinski 		return mlx5_irq_get_affinity_mask(eq->core.irq);
1117fc63dd2aSShay Drory 
111861718206SJakub Kicinski 	return NULL;
1119f2f3df55SSaeed Mahameed }
1120f3147015SMaher Sanalla 
mlx5_comp_vector_get_cpu(struct mlx5_core_dev * dev,int vector)1121f3147015SMaher Sanalla int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
1122f3147015SMaher Sanalla {
1123f3147015SMaher Sanalla 	struct cpumask *mask;
1124f3147015SMaher Sanalla 	int cpu;
1125f3147015SMaher Sanalla 
1126f3147015SMaher Sanalla 	mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
1127f3147015SMaher Sanalla 	if (mask)
1128f3147015SMaher Sanalla 		cpu = cpumask_first(mask);
1129f3147015SMaher Sanalla 	else
1130e5efc231SErwan Velu 		cpu = mlx5_cpumask_default_spread(dev, vector);
1131f3147015SMaher Sanalla 
1132f3147015SMaher Sanalla 	return cpu;
1133f3147015SMaher Sanalla }
1134f3147015SMaher Sanalla EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
1135f2f3df55SSaeed Mahameed 
1136092ead48SSaeed Mahameed #ifdef CONFIG_RFS_ACCEL
mlx5_eq_table_get_rmap(struct mlx5_core_dev * dev)1137f2f3df55SSaeed Mahameed struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
1138f2f3df55SSaeed Mahameed {
11392d74524cSShay Drory 	return dev->priv.eq_table->rmap;
1140f2f3df55SSaeed Mahameed }
1141092ead48SSaeed Mahameed #endif
1142f2f3df55SSaeed Mahameed 
mlx5_eqn2comp_eq(struct mlx5_core_dev * dev,int eqn)114316d76083SSaeed Mahameed struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
1144ca828cb4SSaeed Mahameed {
1145f2f3df55SSaeed Mahameed 	struct mlx5_eq_table *table = dev->priv.eq_table;
114616d76083SSaeed Mahameed 	struct mlx5_eq_comp *eq;
1147273c697fSMaher Sanalla 	unsigned long index;
1148ca828cb4SSaeed Mahameed 
1149f14c1a14SMaher Sanalla 	xa_for_each(&table->comp_eqs, index, eq)
115016d76083SSaeed Mahameed 		if (eq->core.eqn == eqn)
1151ca828cb4SSaeed Mahameed 			return eq;
1152ca828cb4SSaeed Mahameed 
1153ca828cb4SSaeed Mahameed 	return ERR_PTR(-ENOENT);
1154ca828cb4SSaeed Mahameed }
1155ca828cb4SSaeed Mahameed 
11561ef903bfSDaniel Jurgens /* This function should only be called after mlx5_cmd_force_teardown_hca */
mlx5_core_eq_free_irqs(struct mlx5_core_dev * dev)11571ef903bfSDaniel Jurgens void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
11581ef903bfSDaniel Jurgens {
11599c2d0801SShay Drory 	mlx5_irq_table_free_irqs(dev);
11601ef903bfSDaniel Jurgens }
1161c8e21b3bSSaeed Mahameed 
1162a7b76002SDaniel Jurgens #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1163a7b76002SDaniel Jurgens #define MLX5_MAX_ASYNC_EQS 4
1164a7b76002SDaniel Jurgens #else
1165a7b76002SDaniel Jurgens #define MLX5_MAX_ASYNC_EQS 3
1166a7b76002SDaniel Jurgens #endif
1167a7b76002SDaniel Jurgens 
get_num_eqs(struct mlx5_core_dev * dev)11681dc85133SEli Cohen static int get_num_eqs(struct mlx5_core_dev *dev)
1169c8e21b3bSSaeed Mahameed {
1170561aa15aSYuval Avnery 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
11711dc85133SEli Cohen 	int max_dev_eqs;
11721dc85133SEli Cohen 	int num_eqs;
11731dc85133SEli Cohen 
1174b637ac5dSEli Cohen 	/* If ethernet is disabled we use just a single completion vector to
1175b637ac5dSEli Cohen 	 * have the other vectors available for other drivers using mlx5_core. For
1176b637ac5dSEli Cohen 	 * example, mlx5_vdpa
1177b637ac5dSEli Cohen 	 */
1178b637ac5dSEli Cohen 	if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
1179b637ac5dSEli Cohen 		return 1;
1180b637ac5dSEli Cohen 
118129c6a562SDaniel Jurgens 	max_dev_eqs = mlx5_max_eq_cap_get(dev);
1182c8e21b3bSSaeed Mahameed 
11831dc85133SEli Cohen 	num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
11841dc85133SEli Cohen 			max_dev_eqs - MLX5_MAX_ASYNC_EQS);
1185c36326d3SShay Drory 	if (mlx5_core_is_sf(dev)) {
11864b66be76SDaniel Jurgens 		int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ?
11874b66be76SDaniel Jurgens 				 MLX5_CAP_GEN_2(dev, max_num_eqs_24b) :
11884b66be76SDaniel Jurgens 				 MLX5_COMP_EQS_PER_SF;
11894b66be76SDaniel Jurgens 
11904b66be76SDaniel Jurgens 		max_eqs_sf = min_t(int, max_eqs_sf,
1191c36326d3SShay Drory 				   mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
11921dc85133SEli Cohen 		num_eqs = min_t(int, num_eqs, max_eqs_sf);
1193c36326d3SShay Drory 	}
1194561aa15aSYuval Avnery 
11951dc85133SEli Cohen 	return num_eqs;
11961dc85133SEli Cohen }
11971dc85133SEli Cohen 
mlx5_eq_table_create(struct mlx5_core_dev * dev)11981dc85133SEli Cohen int mlx5_eq_table_create(struct mlx5_core_dev *dev)
11991dc85133SEli Cohen {
12001dc85133SEli Cohen 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
12011dc85133SEli Cohen 	int err;
12021dc85133SEli Cohen 
120318cf3d31SMaher Sanalla 	eq_table->max_comp_eqs = get_num_eqs(dev);
1204c8e21b3bSSaeed Mahameed 	err = create_async_eqs(dev);
1205c8e21b3bSSaeed Mahameed 	if (err) {
1206c8e21b3bSSaeed Mahameed 		mlx5_core_err(dev, "Failed to create async EQs\n");
1207c8e21b3bSSaeed Mahameed 		goto err_async_eqs;
1208c8e21b3bSSaeed Mahameed 	}
1209c8e21b3bSSaeed Mahameed 
1210e3e56775SMaher Sanalla 	err = alloc_rmap(dev);
1211c8e21b3bSSaeed Mahameed 	if (err) {
1212e3e56775SMaher Sanalla 		mlx5_core_err(dev, "Failed to allocate rmap\n");
1213e3e56775SMaher Sanalla 		goto err_rmap;
1214e3e56775SMaher Sanalla 	}
1215e3e56775SMaher Sanalla 
1216c8e21b3bSSaeed Mahameed 	return 0;
12173354822cSEli Cohen 
1218e3e56775SMaher Sanalla err_rmap:
1219c8e21b3bSSaeed Mahameed 	destroy_async_eqs(dev);
1220c8e21b3bSSaeed Mahameed err_async_eqs:
1221c8e21b3bSSaeed Mahameed 	return err;
1222c8e21b3bSSaeed Mahameed }
1223c8e21b3bSSaeed Mahameed 
mlx5_eq_table_destroy(struct mlx5_core_dev * dev)1224c8e21b3bSSaeed Mahameed void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1225c8e21b3bSSaeed Mahameed {
1226e3e56775SMaher Sanalla 	struct mlx5_eq_table *table = dev->priv.eq_table;
1227e3e56775SMaher Sanalla 	struct mlx5_eq_comp *eq;
1228e3e56775SMaher Sanalla 	unsigned long index;
1229e3e56775SMaher Sanalla 
1230e3e56775SMaher Sanalla 	xa_for_each(&table->comp_eqs, index, eq)
1231e3e56775SMaher Sanalla 		destroy_comp_eq(dev, eq, index);
1232e3e56775SMaher Sanalla 
1233e3e56775SMaher Sanalla 	free_rmap(dev);
1234c8e21b3bSSaeed Mahameed 	destroy_async_eqs(dev);
1235c8e21b3bSSaeed Mahameed }
12360f597ed4SSaeed Mahameed 
mlx5_eq_notifier_register(struct mlx5_core_dev * dev,struct mlx5_nb * nb)12370f597ed4SSaeed Mahameed int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
12380f597ed4SSaeed Mahameed {
12390f597ed4SSaeed Mahameed 	struct mlx5_eq_table *eqt = dev->priv.eq_table;
12400f597ed4SSaeed Mahameed 
12410f597ed4SSaeed Mahameed 	return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
12420f597ed4SSaeed Mahameed }
1243c0670781SYishai Hadas EXPORT_SYMBOL(mlx5_eq_notifier_register);
12440f597ed4SSaeed Mahameed 
mlx5_eq_notifier_unregister(struct mlx5_core_dev * dev,struct mlx5_nb * nb)12450f597ed4SSaeed Mahameed int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
12460f597ed4SSaeed Mahameed {
12470f597ed4SSaeed Mahameed 	struct mlx5_eq_table *eqt = dev->priv.eq_table;
12480f597ed4SSaeed Mahameed 
12490f597ed4SSaeed Mahameed 	return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
12500f597ed4SSaeed Mahameed }
1251c0670781SYishai Hadas EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
1252