1 /*
2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <linux/dcbnl.h>
35 #include <linux/math64.h>
36
37 #include "mlx4_en.h"
38 #include "fw_qos.h"
39
40 enum {
41 MLX4_CEE_STATE_DOWN = 0,
42 MLX4_CEE_STATE_UP = 1,
43 };
44
45 /* Definitions for QCN
46 */
47
48 struct mlx4_congestion_control_mb_prio_802_1_qau_params {
49 __be32 modify_enable_high;
50 __be32 modify_enable_low;
51 __be32 reserved1;
52 __be32 extended_enable;
53 __be32 rppp_max_rps;
54 __be32 rpg_time_reset;
55 __be32 rpg_byte_reset;
56 __be32 rpg_threshold;
57 __be32 rpg_max_rate;
58 __be32 rpg_ai_rate;
59 __be32 rpg_hai_rate;
60 __be32 rpg_gd;
61 __be32 rpg_min_dec_fac;
62 __be32 rpg_min_rate;
63 __be32 max_time_rise;
64 __be32 max_byte_rise;
65 __be32 max_qdelta;
66 __be32 min_qoffset;
67 __be32 gd_coefficient;
68 __be32 reserved2[5];
69 __be32 cp_sample_base;
70 __be32 reserved3[39];
71 };
72
73 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
74 __be64 rppp_rp_centiseconds;
75 __be32 reserved1;
76 __be32 ignored_cnm;
77 __be32 rppp_created_rps;
78 __be32 estimated_total_rate;
79 __be32 max_active_rate_limiter_index;
80 __be32 dropped_cnms_busy_fw;
81 __be32 reserved2;
82 __be32 cnms_handled_successfully;
83 __be32 min_total_limiters_rate;
84 __be32 max_total_limiters_rate;
85 __be32 reserved3[4];
86 };
87
mlx4_en_dcbnl_getcap(struct net_device * dev,int capid,u8 * cap)88 static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
89 {
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91
92 switch (capid) {
93 case DCB_CAP_ATTR_PFC:
94 *cap = true;
95 break;
96 case DCB_CAP_ATTR_DCBX:
97 *cap = priv->dcbx_cap;
98 break;
99 case DCB_CAP_ATTR_PFC_TCS:
100 *cap = 1 << mlx4_max_tc(priv->mdev->dev);
101 break;
102 default:
103 *cap = false;
104 break;
105 }
106
107 return 0;
108 }
109
mlx4_en_dcbnl_getpfcstate(struct net_device * netdev)110 static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
111 {
112 struct mlx4_en_priv *priv = netdev_priv(netdev);
113
114 return priv->cee_config.pfc_state;
115 }
116
mlx4_en_dcbnl_setpfcstate(struct net_device * netdev,u8 state)117 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
118 {
119 struct mlx4_en_priv *priv = netdev_priv(netdev);
120
121 priv->cee_config.pfc_state = state;
122 }
123
mlx4_en_dcbnl_get_pfc_cfg(struct net_device * netdev,int priority,u8 * setting)124 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
125 u8 *setting)
126 {
127 struct mlx4_en_priv *priv = netdev_priv(netdev);
128
129 *setting = priv->cee_config.dcb_pfc[priority];
130 }
131
mlx4_en_dcbnl_set_pfc_cfg(struct net_device * netdev,int priority,u8 setting)132 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
133 u8 setting)
134 {
135 struct mlx4_en_priv *priv = netdev_priv(netdev);
136
137 priv->cee_config.dcb_pfc[priority] = setting;
138 priv->cee_config.pfc_state = true;
139 }
140
mlx4_en_dcbnl_getnumtcs(struct net_device * netdev,int tcid,u8 * num)141 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
142 {
143 struct mlx4_en_priv *priv = netdev_priv(netdev);
144
145 if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
146 return -EINVAL;
147
148 if (tcid == DCB_NUMTCS_ATTR_PFC)
149 *num = mlx4_max_tc(priv->mdev->dev);
150 else
151 *num = 0;
152
153 return 0;
154 }
155
mlx4_en_dcbnl_set_all(struct net_device * netdev)156 static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
157 {
158 struct mlx4_en_priv *priv = netdev_priv(netdev);
159 struct mlx4_en_port_profile *prof = priv->prof;
160 struct mlx4_en_dev *mdev = priv->mdev;
161 u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
162
163 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
164 return 1;
165
166 if (priv->cee_config.pfc_state) {
167 int tc;
168 rx_ppp = prof->rx_ppp;
169 tx_ppp = prof->tx_ppp;
170
171 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
172 u8 tc_mask = 1 << tc;
173
174 switch (priv->cee_config.dcb_pfc[tc]) {
175 case pfc_disabled:
176 tx_ppp &= ~tc_mask;
177 rx_ppp &= ~tc_mask;
178 break;
179 case pfc_enabled_full:
180 tx_ppp |= tc_mask;
181 rx_ppp |= tc_mask;
182 break;
183 case pfc_enabled_tx:
184 tx_ppp |= tc_mask;
185 rx_ppp &= ~tc_mask;
186 break;
187 case pfc_enabled_rx:
188 tx_ppp &= ~tc_mask;
189 rx_ppp |= tc_mask;
190 break;
191 default:
192 break;
193 }
194 }
195 rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
196 tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
197 } else {
198 rx_ppp = 0;
199 tx_ppp = 0;
200 rx_pause = prof->rx_pause;
201 tx_pause = prof->tx_pause;
202 }
203
204 if (mlx4_SET_PORT_general(mdev->dev, priv->port,
205 priv->rx_skb_size + ETH_FCS_LEN,
206 tx_pause, tx_ppp, rx_pause, rx_ppp)) {
207 en_err(priv, "Failed setting pause params\n");
208 return 1;
209 }
210
211 prof->tx_ppp = tx_ppp;
212 prof->rx_ppp = rx_ppp;
213 prof->tx_pause = tx_pause;
214 prof->rx_pause = rx_pause;
215
216 return 0;
217 }
218
mlx4_en_dcbnl_get_state(struct net_device * dev)219 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
220 {
221 struct mlx4_en_priv *priv = netdev_priv(dev);
222
223 if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
224 return MLX4_CEE_STATE_UP;
225
226 return MLX4_CEE_STATE_DOWN;
227 }
228
mlx4_en_dcbnl_set_state(struct net_device * dev,u8 state)229 static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
230 {
231 struct mlx4_en_priv *priv = netdev_priv(dev);
232 int num_tcs = 0;
233
234 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
235 return 1;
236
237 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
238 return 0;
239
240 if (state) {
241 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
242 num_tcs = IEEE_8021QAZ_MAX_TCS;
243 } else {
244 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
245 }
246
247 if (mlx4_en_alloc_tx_queue_per_tc(dev, num_tcs))
248 return 1;
249
250 return 0;
251 }
252
253 /* On success returns a non-zero 802.1p user priority bitmap
254 * otherwise returns 0 as the invalid user priority bitmap to
255 * indicate an error.
256 */
mlx4_en_dcbnl_getapp(struct net_device * netdev,u8 idtype,u16 id)257 static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
258 {
259 struct mlx4_en_priv *priv = netdev_priv(netdev);
260 struct dcb_app app = {
261 .selector = idtype,
262 .protocol = id,
263 };
264 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
265 return 0;
266
267 return dcb_getapp(netdev, &app);
268 }
269
mlx4_en_dcbnl_setapp(struct net_device * netdev,u8 idtype,u16 id,u8 up)270 static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
271 u16 id, u8 up)
272 {
273 struct mlx4_en_priv *priv = netdev_priv(netdev);
274 struct dcb_app app;
275
276 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
277 return -EINVAL;
278
279 memset(&app, 0, sizeof(struct dcb_app));
280 app.selector = idtype;
281 app.protocol = id;
282 app.priority = up;
283
284 return dcb_setapp(netdev, &app);
285 }
286
mlx4_en_dcbnl_ieee_getets(struct net_device * dev,struct ieee_ets * ets)287 static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
288 struct ieee_ets *ets)
289 {
290 struct mlx4_en_priv *priv = netdev_priv(dev);
291 struct ieee_ets *my_ets = &priv->ets;
292
293 ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
294 ets->cbs = my_ets->cbs;
295 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
296 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
297 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
298
299 return 0;
300 }
301
mlx4_en_ets_validate(struct mlx4_en_priv * priv,struct ieee_ets * ets)302 static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
303 {
304 int i;
305 int total_ets_bw = 0;
306 int has_ets_tc = 0;
307
308 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
309 if (ets->prio_tc[i] >= MLX4_EN_NUM_UP_HIGH) {
310 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
311 i, ets->prio_tc[i]);
312 return -EINVAL;
313 }
314
315 switch (ets->tc_tsa[i]) {
316 case IEEE_8021QAZ_TSA_VENDOR:
317 case IEEE_8021QAZ_TSA_STRICT:
318 break;
319 case IEEE_8021QAZ_TSA_ETS:
320 has_ets_tc = 1;
321 total_ets_bw += ets->tc_tx_bw[i];
322 break;
323 default:
324 en_err(priv, "TC[%d]: Not supported TSA: %d\n",
325 i, ets->tc_tsa[i]);
326 return -EOPNOTSUPP;
327 }
328 }
329
330 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
331 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
332 total_ets_bw);
333 return -EINVAL;
334 }
335
336 return 0;
337 }
338
mlx4_en_config_port_scheduler(struct mlx4_en_priv * priv,struct ieee_ets * ets,u16 * ratelimit)339 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
340 struct ieee_ets *ets, u16 *ratelimit)
341 {
342 struct mlx4_en_dev *mdev = priv->mdev;
343 int num_strict = 0;
344 int i;
345 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
346 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
347
348 ets = ets ?: &priv->ets;
349 ratelimit = ratelimit ?: priv->maxrate;
350
351 /* higher TC means higher priority => lower pg */
352 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
353 switch (ets->tc_tsa[i]) {
354 case IEEE_8021QAZ_TSA_VENDOR:
355 pg[i] = MLX4_EN_TC_VENDOR;
356 tc_tx_bw[i] = MLX4_EN_BW_MAX;
357 break;
358 case IEEE_8021QAZ_TSA_STRICT:
359 pg[i] = num_strict++;
360 tc_tx_bw[i] = MLX4_EN_BW_MAX;
361 break;
362 case IEEE_8021QAZ_TSA_ETS:
363 pg[i] = MLX4_EN_TC_ETS;
364 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
365 break;
366 }
367 }
368
369 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
370 ratelimit);
371 }
372
373 static int
mlx4_en_dcbnl_ieee_setets(struct net_device * dev,struct ieee_ets * ets)374 mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
375 {
376 struct mlx4_en_priv *priv = netdev_priv(dev);
377 struct mlx4_en_dev *mdev = priv->mdev;
378 int err;
379
380 err = mlx4_en_ets_validate(priv, ets);
381 if (err)
382 return err;
383
384 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
385 if (err)
386 return err;
387
388 err = mlx4_en_config_port_scheduler(priv, ets, NULL);
389 if (err)
390 return err;
391
392 memcpy(&priv->ets, ets, sizeof(priv->ets));
393
394 return 0;
395 }
396
mlx4_en_dcbnl_ieee_getpfc(struct net_device * dev,struct ieee_pfc * pfc)397 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
398 struct ieee_pfc *pfc)
399 {
400 struct mlx4_en_priv *priv = netdev_priv(dev);
401
402 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
403 pfc->pfc_en = priv->prof->tx_ppp;
404
405 return 0;
406 }
407
mlx4_en_dcbnl_ieee_setpfc(struct net_device * dev,struct ieee_pfc * pfc)408 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
409 struct ieee_pfc *pfc)
410 {
411 struct mlx4_en_priv *priv = netdev_priv(dev);
412 struct mlx4_en_port_profile *prof = priv->prof;
413 struct mlx4_en_dev *mdev = priv->mdev;
414 u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
415 int err;
416
417 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
418 pfc->pfc_cap,
419 pfc->pfc_en,
420 pfc->mbc,
421 pfc->delay);
422
423 rx_pause = prof->rx_pause && !pfc->pfc_en;
424 tx_pause = prof->tx_pause && !pfc->pfc_en;
425 rx_ppp = pfc->pfc_en;
426 tx_ppp = pfc->pfc_en;
427
428 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
429 priv->rx_skb_size + ETH_FCS_LEN,
430 tx_pause, tx_ppp, rx_pause, rx_ppp);
431 if (err) {
432 en_err(priv, "Failed setting pause params\n");
433 return err;
434 }
435
436 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
437 rx_ppp, rx_pause, tx_ppp, tx_pause);
438
439 prof->tx_ppp = tx_ppp;
440 prof->rx_ppp = rx_ppp;
441 prof->rx_pause = rx_pause;
442 prof->tx_pause = tx_pause;
443
444 return err;
445 }
446
mlx4_en_dcbnl_getdcbx(struct net_device * dev)447 static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
448 {
449 struct mlx4_en_priv *priv = netdev_priv(dev);
450
451 return priv->dcbx_cap;
452 }
453
mlx4_en_dcbnl_setdcbx(struct net_device * dev,u8 mode)454 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
455 {
456 struct mlx4_en_priv *priv = netdev_priv(dev);
457 struct ieee_ets ets = {0};
458 struct ieee_pfc pfc = {0};
459
460 if (mode == priv->dcbx_cap)
461 return 0;
462
463 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
464 ((mode & DCB_CAP_DCBX_VER_IEEE) &&
465 (mode & DCB_CAP_DCBX_VER_CEE)) ||
466 !(mode & DCB_CAP_DCBX_HOST))
467 goto err;
468
469 priv->dcbx_cap = mode;
470
471 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
472 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
473
474 if (mode & DCB_CAP_DCBX_VER_IEEE) {
475 if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
476 goto err;
477 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
478 goto err;
479 } else if (mode & DCB_CAP_DCBX_VER_CEE) {
480 if (mlx4_en_dcbnl_set_all(dev))
481 goto err;
482 } else {
483 if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
484 goto err;
485 if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
486 goto err;
487 if (mlx4_en_alloc_tx_queue_per_tc(dev, 0))
488 goto err;
489 }
490
491 return 0;
492 err:
493 return 1;
494 }
495
496 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
mlx4_en_dcbnl_ieee_getmaxrate(struct net_device * dev,struct ieee_maxrate * maxrate)497 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
498 struct ieee_maxrate *maxrate)
499 {
500 struct mlx4_en_priv *priv = netdev_priv(dev);
501 int i;
502
503 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
504 maxrate->tc_maxrate[i] =
505 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
506
507 return 0;
508 }
509
mlx4_en_dcbnl_ieee_setmaxrate(struct net_device * dev,struct ieee_maxrate * maxrate)510 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
511 struct ieee_maxrate *maxrate)
512 {
513 struct mlx4_en_priv *priv = netdev_priv(dev);
514 u16 tmp[IEEE_8021QAZ_MAX_TCS];
515 int i, err;
516
517 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
518 /* Convert from Kbps into HW units, rounding result up.
519 * Setting to 0, means unlimited BW.
520 */
521 tmp[i] = div_u64(maxrate->tc_maxrate[i] +
522 MLX4_RATELIMIT_UNITS_IN_KB - 1,
523 MLX4_RATELIMIT_UNITS_IN_KB);
524 }
525
526 err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
527 if (err)
528 return err;
529
530 memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
531
532 return 0;
533 }
534
535 #define RPG_ENABLE_BIT 31
536 #define CN_TAG_BIT 30
537
mlx4_en_dcbnl_ieee_getqcn(struct net_device * dev,struct ieee_qcn * qcn)538 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
539 struct ieee_qcn *qcn)
540 {
541 struct mlx4_en_priv *priv = netdev_priv(dev);
542 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
543 struct mlx4_cmd_mailbox *mailbox_out = NULL;
544 u64 mailbox_in_dma = 0;
545 u32 inmod = 0;
546 int i, err;
547
548 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
549 return -EOPNOTSUPP;
550
551 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
552 if (IS_ERR(mailbox_out))
553 return -ENOMEM;
554 hw_qcn =
555 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
556 mailbox_out->buf;
557
558 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
559 inmod = priv->port | ((1 << i) << 8) |
560 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
561 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
562 mailbox_out->dma,
563 inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
564 MLX4_CMD_CONGESTION_CTRL_OPCODE,
565 MLX4_CMD_TIME_CLASS_C,
566 MLX4_CMD_NATIVE);
567 if (err) {
568 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
569 return err;
570 }
571
572 qcn->rpg_enable[i] =
573 be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
574 qcn->rppp_max_rps[i] =
575 be32_to_cpu(hw_qcn->rppp_max_rps);
576 qcn->rpg_time_reset[i] =
577 be32_to_cpu(hw_qcn->rpg_time_reset);
578 qcn->rpg_byte_reset[i] =
579 be32_to_cpu(hw_qcn->rpg_byte_reset);
580 qcn->rpg_threshold[i] =
581 be32_to_cpu(hw_qcn->rpg_threshold);
582 qcn->rpg_max_rate[i] =
583 be32_to_cpu(hw_qcn->rpg_max_rate);
584 qcn->rpg_ai_rate[i] =
585 be32_to_cpu(hw_qcn->rpg_ai_rate);
586 qcn->rpg_hai_rate[i] =
587 be32_to_cpu(hw_qcn->rpg_hai_rate);
588 qcn->rpg_gd[i] =
589 be32_to_cpu(hw_qcn->rpg_gd);
590 qcn->rpg_min_dec_fac[i] =
591 be32_to_cpu(hw_qcn->rpg_min_dec_fac);
592 qcn->rpg_min_rate[i] =
593 be32_to_cpu(hw_qcn->rpg_min_rate);
594 qcn->cndd_state_machine[i] =
595 priv->cndd_state[i];
596 }
597 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
598 return 0;
599 }
600
mlx4_en_dcbnl_ieee_setqcn(struct net_device * dev,struct ieee_qcn * qcn)601 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
602 struct ieee_qcn *qcn)
603 {
604 struct mlx4_en_priv *priv = netdev_priv(dev);
605 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
606 struct mlx4_cmd_mailbox *mailbox_in = NULL;
607 u64 mailbox_in_dma = 0;
608 u32 inmod = 0;
609 int i, err;
610 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000
611 #define MODIFY_ENABLE_LOW_MASK 0xffc00000
612
613 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
614 return -EOPNOTSUPP;
615
616 mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
617 if (IS_ERR(mailbox_in))
618 return -ENOMEM;
619
620 mailbox_in_dma = mailbox_in->dma;
621 hw_qcn =
622 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
623 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
624 inmod = priv->port | ((1 << i) << 8) |
625 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
626
627 /* Before updating QCN parameter,
628 * need to set it's modify enable bit to 1
629 */
630
631 hw_qcn->modify_enable_high = cpu_to_be32(
632 MODIFY_ENABLE_HIGH_MASK);
633 hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
634
635 hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
636 hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
637 hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
638 hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
639 hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
640 hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
641 hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
642 hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
643 hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
644 hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
645 hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
646 priv->cndd_state[i] = qcn->cndd_state_machine[i];
647 if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
648 hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
649
650 err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
651 MLX4_CONGESTION_CONTROL_SET_PARAMS,
652 MLX4_CMD_CONGESTION_CTRL_OPCODE,
653 MLX4_CMD_TIME_CLASS_C,
654 MLX4_CMD_NATIVE);
655 if (err) {
656 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
657 return err;
658 }
659 }
660 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
661 return 0;
662 }
663
mlx4_en_dcbnl_ieee_getqcnstats(struct net_device * dev,struct ieee_qcn_stats * qcn_stats)664 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
665 struct ieee_qcn_stats *qcn_stats)
666 {
667 struct mlx4_en_priv *priv = netdev_priv(dev);
668 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
669 struct mlx4_cmd_mailbox *mailbox_out = NULL;
670 u64 mailbox_in_dma = 0;
671 u32 inmod = 0;
672 int i, err;
673
674 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
675 return -EOPNOTSUPP;
676
677 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
678 if (IS_ERR(mailbox_out))
679 return -ENOMEM;
680
681 hw_qcn_stats =
682 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
683 mailbox_out->buf;
684
685 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
686 inmod = priv->port | ((1 << i) << 8) |
687 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
688 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
689 mailbox_out->dma, inmod,
690 MLX4_CONGESTION_CONTROL_GET_STATISTICS,
691 MLX4_CMD_CONGESTION_CTRL_OPCODE,
692 MLX4_CMD_TIME_CLASS_C,
693 MLX4_CMD_NATIVE);
694 if (err) {
695 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
696 return err;
697 }
698 qcn_stats->rppp_rp_centiseconds[i] =
699 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
700 qcn_stats->rppp_created_rps[i] =
701 be32_to_cpu(hw_qcn_stats->rppp_created_rps);
702 }
703 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
704 return 0;
705 }
706
707 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
708 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
709 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
710 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
711 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
712 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
713 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
714 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
715 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
716 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
717
718 .getstate = mlx4_en_dcbnl_get_state,
719 .setstate = mlx4_en_dcbnl_set_state,
720 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
721 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
722 .setall = mlx4_en_dcbnl_set_all,
723 .getcap = mlx4_en_dcbnl_getcap,
724 .getnumtcs = mlx4_en_dcbnl_getnumtcs,
725 .getpfcstate = mlx4_en_dcbnl_getpfcstate,
726 .setpfcstate = mlx4_en_dcbnl_setpfcstate,
727 .getapp = mlx4_en_dcbnl_getapp,
728 .setapp = mlx4_en_dcbnl_setapp,
729
730 .getdcbx = mlx4_en_dcbnl_getdcbx,
731 .setdcbx = mlx4_en_dcbnl_setdcbx,
732 };
733
734 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
735 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
736 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
737
738 .setstate = mlx4_en_dcbnl_set_state,
739 .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
740 .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
741 .setall = mlx4_en_dcbnl_set_all,
742 .getnumtcs = mlx4_en_dcbnl_getnumtcs,
743 .getpfcstate = mlx4_en_dcbnl_getpfcstate,
744 .setpfcstate = mlx4_en_dcbnl_setpfcstate,
745 .getapp = mlx4_en_dcbnl_getapp,
746 .setapp = mlx4_en_dcbnl_setapp,
747
748 .getdcbx = mlx4_en_dcbnl_getdcbx,
749 .setdcbx = mlx4_en_dcbnl_setdcbx,
750 };
751