1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2013-2021, Mellanox Technologies inc. All rights reserved.
4 */
5
6 #include <linux/interrupt.h>
7 #include <linux/notifier.h>
8 #include <linux/mlx5/driver.h>
9 #include <linux/mlx5/vport.h>
10 #include <linux/mlx5/eq.h>
11 #ifdef CONFIG_RFS_ACCEL
12 #include <linux/cpu_rmap.h>
13 #endif
14 #include "mlx5_core.h"
15 #include "lib/eq.h"
16 #include "fpga/core.h"
17 #include "eswitch.h"
18 #include "lib/clock.h"
19 #include "diag/fw_tracer.h"
20 #include "mlx5_irq.h"
21 #include "pci_irq.h"
22 #include "devlink.h"
23 #include "en_accel/ipsec.h"
24
25 enum {
26 MLX5_EQE_OWNER_INIT_VAL = 0x1,
27 };
28
29 enum {
30 MLX5_EQ_STATE_ARMED = 0x9,
31 MLX5_EQ_STATE_FIRED = 0xa,
32 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
33 };
34
35 enum {
36 MLX5_EQ_DOORBEL_OFFSET = 0x40,
37 };
38
39 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
40 * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
41 * used to set the EQ size, budget must be smaller than the EQ size.
42 */
43 enum {
44 MLX5_EQ_POLLING_BUDGET = 128,
45 };
46
47 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
48
49 struct mlx5_eq_table {
50 struct xarray comp_eqs;
51 struct mlx5_eq_async pages_eq;
52 struct mlx5_eq_async cmd_eq;
53 struct mlx5_eq_async async_eq;
54
55 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
56
57 /* Since CQ DB is stored in async_eq */
58 struct mlx5_nb cq_err_nb;
59
60 struct mutex lock; /* sync async eqs creations */
61 struct mutex comp_lock; /* sync comp eqs creations */
62 int curr_comp_eqs;
63 int max_comp_eqs;
64 struct mlx5_irq_table *irq_table;
65 struct xarray comp_irqs;
66 struct mlx5_irq *ctrl_irq;
67 struct cpu_rmap *rmap;
68 struct cpumask used_cpus;
69 };
70
71 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
72 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
73 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
74 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
75 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
76 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
77 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
78 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
79 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
80 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
81 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
82 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
83
mlx5_cmd_destroy_eq(struct mlx5_core_dev * dev,u8 eqn)84 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
85 {
86 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
87
88 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
89 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
90 return mlx5_cmd_exec_in(dev, destroy_eq, in);
91 }
92
93 /* caller must eventually call mlx5_cq_put on the returned cq */
mlx5_eq_cq_get(struct mlx5_eq * eq,u32 cqn)94 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
95 {
96 struct mlx5_cq_table *table = &eq->cq_table;
97 struct mlx5_core_cq *cq = NULL;
98
99 rcu_read_lock();
100 cq = radix_tree_lookup(&table->tree, cqn);
101 if (likely(cq))
102 mlx5_cq_hold(cq);
103 rcu_read_unlock();
104
105 return cq;
106 }
107
mlx5_eq_comp_int(struct notifier_block * nb,__always_unused unsigned long action,__always_unused void * data)108 static int mlx5_eq_comp_int(struct notifier_block *nb,
109 __always_unused unsigned long action,
110 __always_unused void *data)
111 {
112 struct mlx5_eq_comp *eq_comp =
113 container_of(nb, struct mlx5_eq_comp, irq_nb);
114 struct mlx5_eq *eq = &eq_comp->core;
115 struct mlx5_eqe *eqe;
116 int num_eqes = 0;
117
118 while ((eqe = next_eqe_sw(eq))) {
119 struct mlx5_core_cq *cq;
120 u32 cqn;
121
122 /* Make sure we read EQ entry contents after we've
123 * checked the ownership bit.
124 */
125 dma_rmb();
126 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
127 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
128
129 cq = mlx5_eq_cq_get(eq, cqn);
130 if (likely(cq)) {
131 ++cq->arm_sn;
132 cq->comp(cq, eqe);
133 mlx5_cq_put(cq);
134 } else {
135 dev_dbg_ratelimited(eq->dev->device,
136 "Completion event for bogus CQ 0x%x\n", cqn);
137 }
138
139 ++eq->cons_index;
140
141 if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
142 break;
143 }
144
145 eq_update_ci(eq, 1);
146
147 return 0;
148 }
149
150 /* Some architectures don't latch interrupts when they are disabled, so using
151 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
152 * avoid losing them. It is not recommended to use it, unless this is the last
153 * resort.
154 */
mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp * eq)155 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
156 {
157 u32 count_eqe;
158
159 disable_irq(eq->core.irqn);
160 count_eqe = eq->core.cons_index;
161 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
162 count_eqe = eq->core.cons_index - count_eqe;
163 enable_irq(eq->core.irqn);
164
165 return count_eqe;
166 }
167
mlx5_eq_async_int_lock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)168 static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
169 unsigned long *flags)
170 __acquires(&eq->lock)
171 {
172 if (!recovery)
173 spin_lock(&eq->lock);
174 else
175 spin_lock_irqsave(&eq->lock, *flags);
176 }
177
mlx5_eq_async_int_unlock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)178 static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
179 unsigned long *flags)
180 __releases(&eq->lock)
181 {
182 if (!recovery)
183 spin_unlock(&eq->lock);
184 else
185 spin_unlock_irqrestore(&eq->lock, *flags);
186 }
187
188 enum async_eq_nb_action {
189 ASYNC_EQ_IRQ_HANDLER = 0,
190 ASYNC_EQ_RECOVER = 1,
191 };
192
mlx5_eq_async_int(struct notifier_block * nb,unsigned long action,void * data)193 static int mlx5_eq_async_int(struct notifier_block *nb,
194 unsigned long action, void *data)
195 {
196 struct mlx5_eq_async *eq_async =
197 container_of(nb, struct mlx5_eq_async, irq_nb);
198 struct mlx5_eq *eq = &eq_async->core;
199 struct mlx5_eq_table *eqt;
200 struct mlx5_core_dev *dev;
201 struct mlx5_eqe *eqe;
202 unsigned long flags;
203 int num_eqes = 0;
204 bool recovery;
205
206 dev = eq->dev;
207 eqt = dev->priv.eq_table;
208
209 recovery = action == ASYNC_EQ_RECOVER;
210 mlx5_eq_async_int_lock(eq_async, recovery, &flags);
211
212 while ((eqe = next_eqe_sw(eq))) {
213 /*
214 * Make sure we read EQ entry contents after we've
215 * checked the ownership bit.
216 */
217 dma_rmb();
218
219 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
220 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
221
222 ++eq->cons_index;
223
224 if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
225 break;
226 }
227
228 eq_update_ci(eq, 1);
229 mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
230
231 return unlikely(recovery) ? num_eqes : 0;
232 }
233
mlx5_cmd_eq_recover(struct mlx5_core_dev * dev)234 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
235 {
236 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
237 int eqes;
238
239 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
240 if (eqes)
241 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
242 }
243
init_eq_buf(struct mlx5_eq * eq)244 static void init_eq_buf(struct mlx5_eq *eq)
245 {
246 struct mlx5_eqe *eqe;
247 int i;
248
249 for (i = 0; i < eq_get_size(eq); i++) {
250 eqe = get_eqe(eq, i);
251 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
252 }
253 }
254
255 static int
create_map_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)256 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
257 struct mlx5_eq_param *param)
258 {
259 u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
260 struct mlx5_cq_table *cq_table = &eq->cq_table;
261 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
262 u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
263 struct mlx5_priv *priv = &dev->priv;
264 __be64 *pas;
265 u16 vecidx;
266 void *eqc;
267 int inlen;
268 u32 *in;
269 int err;
270 int i;
271
272 /* Init CQ table */
273 memset(cq_table, 0, sizeof(*cq_table));
274 spin_lock_init(&cq_table->lock);
275 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
276
277 eq->cons_index = 0;
278
279 err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
280 &eq->frag_buf, dev->priv.numa_node);
281 if (err)
282 return err;
283
284 mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
285 init_eq_buf(eq);
286
287 eq->irq = param->irq;
288 vecidx = mlx5_irq_get_index(eq->irq);
289
290 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
291 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
292
293 in = kvzalloc(inlen, GFP_KERNEL);
294 if (!in) {
295 err = -ENOMEM;
296 goto err_buf;
297 }
298
299 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
300 mlx5_fill_page_frag_array(&eq->frag_buf, pas);
301
302 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
303 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
304 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
305
306 for (i = 0; i < 4; i++)
307 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
308 param->mask[i]);
309
310 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
311 MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
312 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
313 MLX5_SET(eqc, eqc, intr, vecidx);
314 MLX5_SET(eqc, eqc, log_page_size,
315 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
316
317 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
318 if (err)
319 goto err_in;
320
321 eq->vecidx = vecidx;
322 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
323 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
324 eq->dev = dev;
325 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
326
327 err = mlx5_debug_eq_add(dev, eq);
328 if (err)
329 goto err_eq;
330
331 kvfree(in);
332 return 0;
333
334 err_eq:
335 mlx5_cmd_destroy_eq(dev, eq->eqn);
336
337 err_in:
338 kvfree(in);
339
340 err_buf:
341 mlx5_frag_buf_free(dev, &eq->frag_buf);
342 return err;
343 }
344
345 /**
346 * mlx5_eq_enable - Enable EQ for receiving EQEs
347 * @dev : Device which owns the eq
348 * @eq : EQ to enable
349 * @nb : Notifier call block
350 *
351 * Must be called after EQ is created in device.
352 *
353 * @return: 0 if no error
354 */
mlx5_eq_enable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)355 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
356 struct notifier_block *nb)
357 {
358 int err;
359
360 err = mlx5_irq_attach_nb(eq->irq, nb);
361 if (!err)
362 eq_update_ci(eq, 1);
363
364 return err;
365 }
366 EXPORT_SYMBOL(mlx5_eq_enable);
367
368 /**
369 * mlx5_eq_disable - Disable EQ for receiving EQEs
370 * @dev : Device which owns the eq
371 * @eq : EQ to disable
372 * @nb : Notifier call block
373 *
374 * Must be called before EQ is destroyed.
375 */
mlx5_eq_disable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)376 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
377 struct notifier_block *nb)
378 {
379 mlx5_irq_detach_nb(eq->irq, nb);
380 }
381 EXPORT_SYMBOL(mlx5_eq_disable);
382
destroy_unmap_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)383 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
384 {
385 int err;
386
387 mlx5_debug_eq_remove(dev, eq);
388
389 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
390 if (err)
391 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
392 eq->eqn);
393
394 mlx5_frag_buf_free(dev, &eq->frag_buf);
395 return err;
396 }
397
mlx5_eq_add_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)398 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
399 {
400 struct mlx5_cq_table *table = &eq->cq_table;
401 int err;
402
403 spin_lock(&table->lock);
404 err = radix_tree_insert(&table->tree, cq->cqn, cq);
405 spin_unlock(&table->lock);
406
407 return err;
408 }
409
mlx5_eq_del_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)410 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
411 {
412 struct mlx5_cq_table *table = &eq->cq_table;
413 struct mlx5_core_cq *tmp;
414
415 spin_lock(&table->lock);
416 tmp = radix_tree_delete(&table->tree, cq->cqn);
417 spin_unlock(&table->lock);
418
419 if (!tmp) {
420 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
421 eq->eqn, cq->cqn);
422 return;
423 }
424
425 if (tmp != cq)
426 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
427 eq->eqn, cq->cqn);
428 }
429
mlx5_eq_table_init(struct mlx5_core_dev * dev)430 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
431 {
432 struct mlx5_eq_table *eq_table;
433 int i;
434
435 eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
436 dev->priv.numa_node);
437 if (!eq_table)
438 return -ENOMEM;
439
440 dev->priv.eq_table = eq_table;
441
442 mlx5_eq_debugfs_init(dev);
443
444 mutex_init(&eq_table->lock);
445 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
446 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
447
448 eq_table->irq_table = mlx5_irq_table_get(dev);
449 cpumask_clear(&eq_table->used_cpus);
450 xa_init(&eq_table->comp_eqs);
451 xa_init(&eq_table->comp_irqs);
452 mutex_init(&eq_table->comp_lock);
453 eq_table->curr_comp_eqs = 0;
454 return 0;
455 }
456
mlx5_eq_table_cleanup(struct mlx5_core_dev * dev)457 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
458 {
459 struct mlx5_eq_table *table = dev->priv.eq_table;
460
461 mlx5_eq_debugfs_cleanup(dev);
462 xa_destroy(&table->comp_irqs);
463 xa_destroy(&table->comp_eqs);
464 kvfree(table);
465 }
466
467 /* Async EQs */
468
create_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)469 static int create_async_eq(struct mlx5_core_dev *dev,
470 struct mlx5_eq *eq, struct mlx5_eq_param *param)
471 {
472 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
473 int err;
474
475 mutex_lock(&eq_table->lock);
476 err = create_map_eq(dev, eq, param);
477 mutex_unlock(&eq_table->lock);
478 return err;
479 }
480
destroy_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)481 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
482 {
483 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
484 int err;
485
486 mutex_lock(&eq_table->lock);
487 err = destroy_unmap_eq(dev, eq);
488 mutex_unlock(&eq_table->lock);
489 return err;
490 }
491
cq_err_event_notifier(struct notifier_block * nb,unsigned long type,void * data)492 static int cq_err_event_notifier(struct notifier_block *nb,
493 unsigned long type, void *data)
494 {
495 struct mlx5_eq_table *eqt;
496 struct mlx5_core_cq *cq;
497 struct mlx5_eqe *eqe;
498 struct mlx5_eq *eq;
499 u32 cqn;
500
501 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
502
503 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
504 eq = &eqt->async_eq.core;
505 eqe = data;
506
507 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
508 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
509 cqn, eqe->data.cq_err.syndrome);
510
511 cq = mlx5_eq_cq_get(eq, cqn);
512 if (unlikely(!cq)) {
513 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
514 return NOTIFY_OK;
515 }
516
517 if (cq->event)
518 cq->event(cq, type);
519
520 mlx5_cq_put(cq);
521
522 return NOTIFY_OK;
523 }
524
gather_user_async_events(struct mlx5_core_dev * dev,u64 mask[4])525 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
526 {
527 __be64 *user_unaffiliated_events;
528 __be64 *user_affiliated_events;
529 int i;
530
531 user_affiliated_events =
532 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
533 user_unaffiliated_events =
534 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
535
536 for (i = 0; i < 4; i++)
537 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
538 user_unaffiliated_events[i]);
539 }
540
gather_async_events_mask(struct mlx5_core_dev * dev,u64 mask[4])541 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
542 {
543 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
544
545 if (MLX5_VPORT_MANAGER(dev))
546 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
547
548 if (MLX5_CAP_GEN(dev, general_notification_event))
549 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
550
551 if (MLX5_CAP_GEN(dev, port_module_event))
552 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
553 else
554 mlx5_core_dbg(dev, "port_module_event is not set\n");
555
556 if (MLX5_PPS_CAP(dev))
557 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
558
559 if (MLX5_CAP_GEN(dev, fpga))
560 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
561 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
562 if (MLX5_CAP_GEN_MAX(dev, dct))
563 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
564
565 if (MLX5_CAP_GEN(dev, temp_warn_event))
566 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
567
568 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
569 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
570
571 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
572 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
573
574 if (mlx5_eswitch_is_funcs_handler(dev))
575 async_event_mask |=
576 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
577
578 if (MLX5_CAP_GEN_MAX(dev, vhca_state))
579 async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
580
581 if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
582 async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
583
584 if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
585 async_event_mask |=
586 (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
587
588 if (mlx5_pcie_cong_event_supported(dev))
589 async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
590
591 mask[0] = async_event_mask;
592
593 if (MLX5_CAP_GEN(dev, event_cap))
594 gather_user_async_events(dev, mask);
595 }
596
597 static int
setup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,struct mlx5_eq_param * param,const char * name)598 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
599 struct mlx5_eq_param *param, const char *name)
600 {
601 int err;
602
603 eq->irq_nb.notifier_call = mlx5_eq_async_int;
604 spin_lock_init(&eq->lock);
605
606 err = create_async_eq(dev, &eq->core, param);
607 if (err) {
608 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
609 return err;
610 }
611 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
612 if (err) {
613 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
614 destroy_async_eq(dev, &eq->core);
615 }
616 return err;
617 }
618
cleanup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,const char * name)619 static void cleanup_async_eq(struct mlx5_core_dev *dev,
620 struct mlx5_eq_async *eq, const char *name)
621 {
622 int err;
623
624 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
625 err = destroy_async_eq(dev, &eq->core);
626 if (err)
627 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
628 name, err);
629 }
630
async_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)631 static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
632 {
633 struct devlink *devlink = priv_to_devlink(dev);
634 union devlink_param_value val;
635 int err;
636
637 err = devl_param_driverinit_value_get(devlink,
638 DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
639 &val);
640 if (!err)
641 return val.vu32;
642 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
643 return MLX5_NUM_ASYNC_EQE;
644 }
645
create_async_eqs(struct mlx5_core_dev * dev)646 static int create_async_eqs(struct mlx5_core_dev *dev)
647 {
648 struct mlx5_eq_table *table = dev->priv.eq_table;
649 struct mlx5_eq_param param = {};
650 int err;
651
652 /* All the async_eqs are using single IRQ, request one IRQ and share its
653 * index among all the async_eqs of this device.
654 */
655 table->ctrl_irq = mlx5_ctrl_irq_request(dev);
656 if (IS_ERR(table->ctrl_irq))
657 return PTR_ERR(table->ctrl_irq);
658
659 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
660 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
661
662 param = (struct mlx5_eq_param) {
663 .irq = table->ctrl_irq,
664 .nent = MLX5_NUM_CMD_EQE,
665 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
666 };
667 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
668 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
669 if (err)
670 goto err1;
671
672 mlx5_cmd_use_events(dev);
673 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
674
675 param = (struct mlx5_eq_param) {
676 .irq = table->ctrl_irq,
677 .nent = async_eq_depth_devlink_param_get(dev),
678 };
679
680 gather_async_events_mask(dev, param.mask);
681 err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
682 if (err)
683 goto err2;
684
685 /* Skip page eq creation when the device does not request for page requests */
686 if (MLX5_CAP_GEN(dev, page_request_disable)) {
687 mlx5_core_dbg(dev, "Skip page EQ creation\n");
688 return 0;
689 }
690
691 param = (struct mlx5_eq_param) {
692 .irq = table->ctrl_irq,
693 .nent = /* TODO: sriov max_vf + */ 1,
694 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
695 };
696
697 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
698 if (err)
699 goto err3;
700
701 return 0;
702
703 err3:
704 cleanup_async_eq(dev, &table->async_eq, "async");
705 err2:
706 mlx5_cmd_use_polling(dev);
707 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
708 err1:
709 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
710 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
711 mlx5_ctrl_irq_release(dev, table->ctrl_irq);
712 return err;
713 }
714
destroy_async_eqs(struct mlx5_core_dev * dev)715 static void destroy_async_eqs(struct mlx5_core_dev *dev)
716 {
717 struct mlx5_eq_table *table = dev->priv.eq_table;
718
719 if (!MLX5_CAP_GEN(dev, page_request_disable))
720 cleanup_async_eq(dev, &table->pages_eq, "pages");
721 cleanup_async_eq(dev, &table->async_eq, "async");
722 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
723 mlx5_cmd_use_polling(dev);
724 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
725 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
726 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
727 mlx5_ctrl_irq_release(dev, table->ctrl_irq);
728 }
729
mlx5_get_async_eq(struct mlx5_core_dev * dev)730 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
731 {
732 return &dev->priv.eq_table->async_eq.core;
733 }
734
mlx5_eq_synchronize_async_irq(struct mlx5_core_dev * dev)735 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
736 {
737 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
738 }
739
mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev * dev)740 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
741 {
742 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
743 }
744
745 /* Generic EQ API for mlx5_core consumers
746 * Needed For RDMA ODP EQ for now
747 */
748 struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev * dev,struct mlx5_eq_param * param)749 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
750 struct mlx5_eq_param *param)
751 {
752 struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
753 dev->priv.numa_node);
754 int err;
755
756 if (!eq)
757 return ERR_PTR(-ENOMEM);
758
759 param->irq = dev->priv.eq_table->ctrl_irq;
760 err = create_async_eq(dev, eq, param);
761 if (err) {
762 kvfree(eq);
763 eq = ERR_PTR(err);
764 }
765
766 return eq;
767 }
768 EXPORT_SYMBOL(mlx5_eq_create_generic);
769
mlx5_eq_destroy_generic(struct mlx5_core_dev * dev,struct mlx5_eq * eq)770 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
771 {
772 int err;
773
774 if (IS_ERR(eq))
775 return -EINVAL;
776
777 err = destroy_async_eq(dev, eq);
778 if (err)
779 goto out;
780
781 kvfree(eq);
782 out:
783 return err;
784 }
785 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
786
mlx5_eq_get_eqe(struct mlx5_eq * eq,u32 cc)787 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
788 {
789 u32 ci = eq->cons_index + cc;
790 u32 nent = eq_get_size(eq);
791 struct mlx5_eqe *eqe;
792
793 eqe = get_eqe(eq, ci & (nent - 1));
794 eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
795 /* Make sure we read EQ entry contents after we've
796 * checked the ownership bit.
797 */
798 if (eqe)
799 dma_rmb();
800
801 return eqe;
802 }
803 EXPORT_SYMBOL(mlx5_eq_get_eqe);
804
mlx5_eq_update_ci(struct mlx5_eq * eq,u32 cc,bool arm)805 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
806 {
807 eq->cons_index += cc;
808 eq_update_ci(eq, arm);
809 }
810 EXPORT_SYMBOL(mlx5_eq_update_ci);
811
comp_irq_release_pci(struct mlx5_core_dev * dev,u16 vecidx)812 static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
813 {
814 struct mlx5_eq_table *table = dev->priv.eq_table;
815 struct mlx5_irq *irq;
816
817 irq = xa_load(&table->comp_irqs, vecidx);
818 if (!irq)
819 return;
820
821 xa_erase(&table->comp_irqs, vecidx);
822 mlx5_irq_release_vector(irq);
823 }
824
mlx5_cpumask_default_spread(struct mlx5_core_dev * dev,int index)825 static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
826 {
827 return cpumask_local_spread(index, dev->priv.numa_node);
828 }
829
mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev * dev)830 static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
831 {
832 #ifdef CONFIG_RFS_ACCEL
833 #ifdef CONFIG_MLX5_SF
834 if (mlx5_core_is_sf(dev))
835 return dev->priv.parent_mdev->priv.eq_table->rmap;
836 #endif
837 return dev->priv.eq_table->rmap;
838 #else
839 return NULL;
840 #endif
841 }
842
comp_irq_request_pci(struct mlx5_core_dev * dev,u16 vecidx)843 static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
844 {
845 struct mlx5_eq_table *table = dev->priv.eq_table;
846 struct cpu_rmap *rmap;
847 struct mlx5_irq *irq;
848 int cpu;
849
850 rmap = mlx5_eq_table_get_pci_rmap(dev);
851 cpu = mlx5_cpumask_default_spread(dev, vecidx);
852 irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
853 if (IS_ERR(irq))
854 return PTR_ERR(irq);
855
856 return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
857 }
858
comp_irq_release_sf(struct mlx5_core_dev * dev,u16 vecidx)859 static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
860 {
861 struct mlx5_eq_table *table = dev->priv.eq_table;
862 struct mlx5_irq *irq;
863 int cpu;
864
865 irq = xa_load(&table->comp_irqs, vecidx);
866 if (!irq)
867 return;
868
869 cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
870 cpumask_clear_cpu(cpu, &table->used_cpus);
871 xa_erase(&table->comp_irqs, vecidx);
872 mlx5_irq_affinity_irq_release(dev, irq);
873 }
874
comp_irq_request_sf(struct mlx5_core_dev * dev,u16 vecidx)875 static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
876 {
877 struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
878 struct mlx5_eq_table *table = dev->priv.eq_table;
879 struct irq_affinity_desc *af_desc;
880 struct mlx5_irq *irq;
881
882 /* In case SF irq pool does not exist, fallback to the PF irqs */
883 if (!mlx5_irq_pool_is_sf_pool(pool))
884 return comp_irq_request_pci(dev, vecidx);
885
886 af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
887 if (!af_desc)
888 return -ENOMEM;
889
890 af_desc->is_managed = false;
891 cpumask_copy(&af_desc->mask, cpu_online_mask);
892 cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
893 irq = mlx5_irq_affinity_request(dev, pool, af_desc);
894 if (IS_ERR(irq)) {
895 kvfree(af_desc);
896 return PTR_ERR(irq);
897 }
898
899 cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
900 mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
901 pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
902 cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
903 mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
904
905 kvfree(af_desc);
906
907 return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
908 }
909
comp_irq_release(struct mlx5_core_dev * dev,u16 vecidx)910 static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
911 {
912 mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) :
913 comp_irq_release_pci(dev, vecidx);
914 }
915
comp_irq_request(struct mlx5_core_dev * dev,u16 vecidx)916 static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
917 {
918 return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) :
919 comp_irq_request_pci(dev, vecidx);
920 }
921
922 #ifdef CONFIG_RFS_ACCEL
alloc_rmap(struct mlx5_core_dev * mdev)923 static int alloc_rmap(struct mlx5_core_dev *mdev)
924 {
925 struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
926
927 /* rmap is a mapping between irq number and queue number.
928 * Each irq can be assigned only to a single rmap.
929 * Since SFs share IRQs, rmap mapping cannot function correctly
930 * for irqs that are shared between different core/netdev RX rings.
931 * Hence we don't allow netdev rmap for SFs.
932 */
933 if (mlx5_core_is_sf(mdev))
934 return 0;
935
936 eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs);
937 if (!eq_table->rmap)
938 return -ENOMEM;
939 return 0;
940 }
941
free_rmap(struct mlx5_core_dev * mdev)942 static void free_rmap(struct mlx5_core_dev *mdev)
943 {
944 struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
945
946 if (eq_table->rmap) {
947 free_irq_cpu_rmap(eq_table->rmap);
948 eq_table->rmap = NULL;
949 }
950 }
951 #else
alloc_rmap(struct mlx5_core_dev * mdev)952 static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
free_rmap(struct mlx5_core_dev * mdev)953 static void free_rmap(struct mlx5_core_dev *mdev) {}
954 #endif
955
destroy_comp_eq(struct mlx5_core_dev * dev,struct mlx5_eq_comp * eq,u16 vecidx)956 static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
957 {
958 struct mlx5_eq_table *table = dev->priv.eq_table;
959
960 xa_erase(&table->comp_eqs, vecidx);
961 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
962 if (destroy_unmap_eq(dev, &eq->core))
963 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
964 eq->core.eqn);
965 tasklet_disable(&eq->tasklet_ctx.task);
966 kfree(eq);
967 comp_irq_release(dev, vecidx);
968 table->curr_comp_eqs--;
969 }
970
comp_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)971 static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
972 {
973 struct devlink *devlink = priv_to_devlink(dev);
974 union devlink_param_value val;
975 int err;
976
977 err = devl_param_driverinit_value_get(devlink,
978 DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
979 &val);
980 if (!err)
981 return val.vu32;
982 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
983 return MLX5_COMP_EQ_SIZE;
984 }
985
986 /* Must be called with EQ table comp_lock held */
create_comp_eq(struct mlx5_core_dev * dev,u16 vecidx)987 static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
988 {
989 struct mlx5_eq_table *table = dev->priv.eq_table;
990 struct mlx5_eq_param param = {};
991 struct mlx5_eq_comp *eq;
992 struct mlx5_irq *irq;
993 int nent;
994 int err;
995
996 lockdep_assert_held(&table->comp_lock);
997 if (table->curr_comp_eqs == table->max_comp_eqs) {
998 mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n",
999 table->max_comp_eqs);
1000 return -ENOMEM;
1001 }
1002
1003 err = comp_irq_request(dev, vecidx);
1004 if (err)
1005 return err;
1006
1007 nent = comp_eq_depth_devlink_param_get(dev);
1008
1009 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
1010 if (!eq) {
1011 err = -ENOMEM;
1012 goto clean_irq;
1013 }
1014
1015 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
1016 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
1017 spin_lock_init(&eq->tasklet_ctx.lock);
1018 tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
1019
1020 irq = xa_load(&table->comp_irqs, vecidx);
1021 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
1022 param = (struct mlx5_eq_param) {
1023 .irq = irq,
1024 .nent = nent,
1025 };
1026
1027 err = create_map_eq(dev, &eq->core, ¶m);
1028 if (err)
1029 goto clean_eq;
1030 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
1031 if (err) {
1032 destroy_unmap_eq(dev, &eq->core);
1033 goto clean_eq;
1034 }
1035
1036 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
1037 err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
1038 if (err)
1039 goto disable_eq;
1040
1041 table->curr_comp_eqs++;
1042 return eq->core.eqn;
1043
1044 disable_eq:
1045 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
1046 clean_eq:
1047 kfree(eq);
1048 clean_irq:
1049 comp_irq_release(dev, vecidx);
1050 return err;
1051 }
1052
mlx5_comp_eqn_get(struct mlx5_core_dev * dev,u16 vecidx,int * eqn)1053 int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
1054 {
1055 struct mlx5_eq_table *table = dev->priv.eq_table;
1056 struct mlx5_eq_comp *eq;
1057 int ret = 0;
1058
1059 if (vecidx >= table->max_comp_eqs) {
1060 mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
1061 vecidx, table->max_comp_eqs);
1062 return -EINVAL;
1063 }
1064
1065 mutex_lock(&table->comp_lock);
1066 eq = xa_load(&table->comp_eqs, vecidx);
1067 if (eq) {
1068 *eqn = eq->core.eqn;
1069 goto out;
1070 }
1071
1072 ret = create_comp_eq(dev, vecidx);
1073 if (ret < 0) {
1074 mutex_unlock(&table->comp_lock);
1075 return ret;
1076 }
1077
1078 *eqn = ret;
1079 out:
1080 mutex_unlock(&table->comp_lock);
1081 return 0;
1082 }
1083 EXPORT_SYMBOL(mlx5_comp_eqn_get);
1084
mlx5_comp_irqn_get(struct mlx5_core_dev * dev,int vector,unsigned int * irqn)1085 int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
1086 {
1087 struct mlx5_eq_table *table = dev->priv.eq_table;
1088 struct mlx5_eq_comp *eq;
1089 int eqn;
1090 int err;
1091
1092 /* Allocate the EQ if not allocated yet */
1093 err = mlx5_comp_eqn_get(dev, vector, &eqn);
1094 if (err)
1095 return err;
1096
1097 eq = xa_load(&table->comp_eqs, vector);
1098 *irqn = eq->core.irqn;
1099 return 0;
1100 }
1101
mlx5_comp_vectors_max(struct mlx5_core_dev * dev)1102 unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
1103 {
1104 return dev->priv.eq_table->max_comp_eqs;
1105 }
1106 EXPORT_SYMBOL(mlx5_comp_vectors_max);
1107
1108 static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev * dev,int vector)1109 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
1110 {
1111 struct mlx5_eq_table *table = dev->priv.eq_table;
1112 struct mlx5_eq_comp *eq;
1113
1114 eq = xa_load(&table->comp_eqs, vector);
1115 if (eq)
1116 return mlx5_irq_get_affinity_mask(eq->core.irq);
1117
1118 return NULL;
1119 }
1120
mlx5_comp_vector_get_cpu(struct mlx5_core_dev * dev,int vector)1121 int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
1122 {
1123 struct cpumask *mask;
1124 int cpu;
1125
1126 mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
1127 if (mask)
1128 cpu = cpumask_first(mask);
1129 else
1130 cpu = mlx5_cpumask_default_spread(dev, vector);
1131
1132 return cpu;
1133 }
1134 EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
1135
1136 #ifdef CONFIG_RFS_ACCEL
mlx5_eq_table_get_rmap(struct mlx5_core_dev * dev)1137 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
1138 {
1139 return dev->priv.eq_table->rmap;
1140 }
1141 #endif
1142
mlx5_eqn2comp_eq(struct mlx5_core_dev * dev,int eqn)1143 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
1144 {
1145 struct mlx5_eq_table *table = dev->priv.eq_table;
1146 struct mlx5_eq_comp *eq;
1147 unsigned long index;
1148
1149 xa_for_each(&table->comp_eqs, index, eq)
1150 if (eq->core.eqn == eqn)
1151 return eq;
1152
1153 return ERR_PTR(-ENOENT);
1154 }
1155
1156 /* This function should only be called after mlx5_cmd_force_teardown_hca */
mlx5_core_eq_free_irqs(struct mlx5_core_dev * dev)1157 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
1158 {
1159 mlx5_irq_table_free_irqs(dev);
1160 }
1161
1162 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1163 #define MLX5_MAX_ASYNC_EQS 4
1164 #else
1165 #define MLX5_MAX_ASYNC_EQS 3
1166 #endif
1167
get_num_eqs(struct mlx5_core_dev * dev)1168 static int get_num_eqs(struct mlx5_core_dev *dev)
1169 {
1170 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1171 int max_dev_eqs;
1172 int num_eqs;
1173
1174 /* If ethernet is disabled we use just a single completion vector to
1175 * have the other vectors available for other drivers using mlx5_core. For
1176 * example, mlx5_vdpa
1177 */
1178 if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
1179 return 1;
1180
1181 max_dev_eqs = mlx5_max_eq_cap_get(dev);
1182
1183 num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
1184 max_dev_eqs - MLX5_MAX_ASYNC_EQS);
1185 if (mlx5_core_is_sf(dev)) {
1186 int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ?
1187 MLX5_CAP_GEN_2(dev, max_num_eqs_24b) :
1188 MLX5_COMP_EQS_PER_SF;
1189
1190 max_eqs_sf = min_t(int, max_eqs_sf,
1191 mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
1192 num_eqs = min_t(int, num_eqs, max_eqs_sf);
1193 }
1194
1195 return num_eqs;
1196 }
1197
mlx5_eq_table_create(struct mlx5_core_dev * dev)1198 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1199 {
1200 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1201 int err;
1202
1203 eq_table->max_comp_eqs = get_num_eqs(dev);
1204 err = create_async_eqs(dev);
1205 if (err) {
1206 mlx5_core_err(dev, "Failed to create async EQs\n");
1207 goto err_async_eqs;
1208 }
1209
1210 err = alloc_rmap(dev);
1211 if (err) {
1212 mlx5_core_err(dev, "Failed to allocate rmap\n");
1213 goto err_rmap;
1214 }
1215
1216 return 0;
1217
1218 err_rmap:
1219 destroy_async_eqs(dev);
1220 err_async_eqs:
1221 return err;
1222 }
1223
mlx5_eq_table_destroy(struct mlx5_core_dev * dev)1224 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1225 {
1226 struct mlx5_eq_table *table = dev->priv.eq_table;
1227 struct mlx5_eq_comp *eq;
1228 unsigned long index;
1229
1230 xa_for_each(&table->comp_eqs, index, eq)
1231 destroy_comp_eq(dev, eq, index);
1232
1233 free_rmap(dev);
1234 destroy_async_eqs(dev);
1235 }
1236
mlx5_eq_notifier_register(struct mlx5_core_dev * dev,struct mlx5_nb * nb)1237 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1238 {
1239 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1240
1241 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1242 }
1243 EXPORT_SYMBOL(mlx5_eq_notifier_register);
1244
mlx5_eq_notifier_unregister(struct mlx5_core_dev * dev,struct mlx5_nb * nb)1245 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1246 {
1247 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1248
1249 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
1250 }
1251 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
1252