1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2024, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "ice_sched.h"
33
34 /**
35 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
36 * @pi: port information structure
37 * @info: Scheduler element information from firmware
38 *
39 * This function inserts the root node of the scheduling tree topology
40 * to the SW DB.
41 */
42 static int
ice_sched_add_root_node(struct ice_port_info * pi,struct ice_aqc_txsched_elem_data * info)43 ice_sched_add_root_node(struct ice_port_info *pi,
44 struct ice_aqc_txsched_elem_data *info)
45 {
46 struct ice_sched_node *root;
47 struct ice_hw *hw;
48
49 if (!pi)
50 return ICE_ERR_PARAM;
51
52 hw = pi->hw;
53
54 root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root));
55 if (!root)
56 return ICE_ERR_NO_MEMORY;
57
58 root->children = (struct ice_sched_node **)
59 ice_calloc(hw, hw->max_children[0], sizeof(*root->children));
60 if (!root->children) {
61 ice_free(hw, root);
62 return ICE_ERR_NO_MEMORY;
63 }
64
65 ice_memcpy(&root->info, info, sizeof(*info), ICE_NONDMA_TO_NONDMA);
66 pi->root = root;
67 return 0;
68 }
69
70 /**
71 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
72 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
73 * @teid: node TEID to search
74 *
75 * This function searches for a node matching the TEID in the scheduling tree
76 * from the SW DB. The search is recursive and is restricted by the number of
77 * layers it has searched through; stopping at the max supported layer.
78 *
79 * This function needs to be called when holding the port_info->sched_lock
80 */
81 struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node * start_node,u32 teid)82 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
83 {
84 u16 i;
85
86 if (!start_node)
87 return NULL;
88
89 /* The TEID is same as that of the start_node */
90 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
91 return start_node;
92
93 /* The node has no children or is at the max layer */
94 if (!start_node->num_children ||
95 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
96 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
97 return NULL;
98
99 /* Check if TEID matches to any of the children nodes */
100 for (i = 0; i < start_node->num_children; i++)
101 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
102 return start_node->children[i];
103
104 /* Search within each child's sub-tree */
105 for (i = 0; i < start_node->num_children; i++) {
106 struct ice_sched_node *tmp;
107
108 tmp = ice_sched_find_node_by_teid(start_node->children[i],
109 teid);
110 if (tmp)
111 return tmp;
112 }
113
114 return NULL;
115 }
116
117 /**
118 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
119 * @hw: pointer to the HW struct
120 * @cmd_opc: cmd opcode
121 * @elems_req: number of elements to request
122 * @buf: pointer to buffer
123 * @buf_size: buffer size in bytes
124 * @elems_resp: returns total number of elements response
125 * @cd: pointer to command details structure or NULL
126 *
127 * This function sends a scheduling elements cmd (cmd_opc)
128 */
129 static int
ice_aqc_send_sched_elem_cmd(struct ice_hw * hw,enum ice_adminq_opc cmd_opc,u16 elems_req,void * buf,u16 buf_size,u16 * elems_resp,struct ice_sq_cd * cd)130 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
131 u16 elems_req, void *buf, u16 buf_size,
132 u16 *elems_resp, struct ice_sq_cd *cd)
133 {
134 struct ice_aqc_sched_elem_cmd *cmd;
135 struct ice_aq_desc desc;
136 int status;
137
138 cmd = &desc.params.sched_elem_cmd;
139 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
140 cmd->num_elem_req = CPU_TO_LE16(elems_req);
141 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
142 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
143 if (!status && elems_resp)
144 *elems_resp = LE16_TO_CPU(cmd->num_elem_resp);
145
146 return status;
147 }
148
149 /**
150 * ice_aq_query_sched_elems - query scheduler elements
151 * @hw: pointer to the HW struct
152 * @elems_req: number of elements to query
153 * @buf: pointer to buffer
154 * @buf_size: buffer size in bytes
155 * @elems_ret: returns total number of elements returned
156 * @cd: pointer to command details structure or NULL
157 *
158 * Query scheduling elements (0x0404)
159 */
160 int
ice_aq_query_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)161 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
162 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
163 u16 *elems_ret, struct ice_sq_cd *cd)
164 {
165 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
166 elems_req, (void *)buf, buf_size,
167 elems_ret, cd);
168 }
169
170 /**
171 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
172 * @pi: port information structure
173 * @layer: Scheduler layer of the node
174 * @info: Scheduler element information from firmware
175 * @prealloc_node: preallocated ice_sched_node struct for SW DB
176 *
177 * This function inserts a scheduler node to the SW DB.
178 */
179 int
ice_sched_add_node(struct ice_port_info * pi,u8 layer,struct ice_aqc_txsched_elem_data * info,struct ice_sched_node * prealloc_node)180 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
181 struct ice_aqc_txsched_elem_data *info,
182 struct ice_sched_node *prealloc_node)
183 {
184 struct ice_aqc_txsched_elem_data elem;
185 struct ice_sched_node *parent;
186 struct ice_sched_node *node;
187 struct ice_hw *hw;
188 int status;
189
190 if (!pi)
191 return ICE_ERR_PARAM;
192
193 hw = pi->hw;
194
195 /* A valid parent node should be there */
196 parent = ice_sched_find_node_by_teid(pi->root,
197 LE32_TO_CPU(info->parent_teid));
198 if (!parent) {
199 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
200 LE32_TO_CPU(info->parent_teid));
201 return ICE_ERR_PARAM;
202 }
203
204 /* query the current node information from FW before adding it
205 * to the SW DB
206 */
207 status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
208 if (status)
209 return status;
210
211 if (prealloc_node)
212 node = prealloc_node;
213 else
214 node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node));
215 if (!node)
216 return ICE_ERR_NO_MEMORY;
217 if (hw->max_children[layer]) {
218 node->children = (struct ice_sched_node **)
219 ice_calloc(hw, hw->max_children[layer],
220 sizeof(*node->children));
221 if (!node->children) {
222 ice_free(hw, node);
223 return ICE_ERR_NO_MEMORY;
224 }
225 }
226
227 node->in_use = true;
228 node->parent = parent;
229 node->tx_sched_layer = layer;
230 parent->children[parent->num_children++] = node;
231 node->info = elem;
232 return 0;
233 }
234
235 /**
236 * ice_aq_delete_sched_elems - delete scheduler elements
237 * @hw: pointer to the HW struct
238 * @grps_req: number of groups to delete
239 * @buf: pointer to buffer
240 * @buf_size: buffer size in bytes
241 * @grps_del: returns total number of elements deleted
242 * @cd: pointer to command details structure or NULL
243 *
244 * Delete scheduling elements (0x040F)
245 */
246 static int
ice_aq_delete_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_delete_elem * buf,u16 buf_size,u16 * grps_del,struct ice_sq_cd * cd)247 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
248 struct ice_aqc_delete_elem *buf, u16 buf_size,
249 u16 *grps_del, struct ice_sq_cd *cd)
250 {
251 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
252 grps_req, (void *)buf, buf_size,
253 grps_del, cd);
254 }
255
256 /**
257 * ice_sched_remove_elems - remove nodes from HW
258 * @hw: pointer to the HW struct
259 * @parent: pointer to the parent node
260 * @num_nodes: number of nodes
261 * @node_teids: array of node teids to be deleted
262 *
263 * This function remove nodes from HW
264 */
265 static int
ice_sched_remove_elems(struct ice_hw * hw,struct ice_sched_node * parent,u16 num_nodes,u32 * node_teids)266 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
267 u16 num_nodes, u32 *node_teids)
268 {
269 struct ice_aqc_delete_elem *buf;
270 u16 i, num_groups_removed = 0;
271 u16 buf_size;
272 int status;
273
274 buf_size = ice_struct_size(buf, teid, num_nodes);
275 buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
276 if (!buf)
277 return ICE_ERR_NO_MEMORY;
278
279 buf->hdr.parent_teid = parent->info.node_teid;
280 buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
281 for (i = 0; i < num_nodes; i++)
282 buf->teid[i] = CPU_TO_LE32(node_teids[i]);
283
284 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
285 &num_groups_removed, NULL);
286 if (status || num_groups_removed != 1)
287 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
288 hw->adminq.sq_last_status);
289
290 ice_free(hw, buf);
291 return status;
292 }
293
294 /**
295 * ice_sched_get_first_node - get the first node of the given layer
296 * @pi: port information structure
297 * @parent: pointer the base node of the subtree
298 * @layer: layer number
299 *
300 * This function retrieves the first node of the given layer from the subtree
301 */
302 static struct ice_sched_node *
ice_sched_get_first_node(struct ice_port_info * pi,struct ice_sched_node * parent,u8 layer)303 ice_sched_get_first_node(struct ice_port_info *pi,
304 struct ice_sched_node *parent, u8 layer)
305 {
306 return pi->sib_head[parent->tc_num][layer];
307 }
308
309 /**
310 * ice_sched_get_tc_node - get pointer to TC node
311 * @pi: port information structure
312 * @tc: TC number
313 *
314 * This function returns the TC node pointer
315 */
ice_sched_get_tc_node(struct ice_port_info * pi,u8 tc)316 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
317 {
318 u8 i;
319
320 if (!pi || !pi->root)
321 return NULL;
322 for (i = 0; i < pi->root->num_children; i++)
323 if (pi->root->children[i]->tc_num == tc)
324 return pi->root->children[i];
325 return NULL;
326 }
327
328 /**
329 * ice_free_sched_node - Free a Tx scheduler node from SW DB
330 * @pi: port information structure
331 * @node: pointer to the ice_sched_node struct
332 *
333 * This function frees up a node from SW DB as well as from HW
334 *
335 * This function needs to be called with the port_info->sched_lock held
336 */
ice_free_sched_node(struct ice_port_info * pi,struct ice_sched_node * node)337 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
338 {
339 struct ice_sched_node *parent;
340 struct ice_hw *hw = pi->hw;
341 u8 i, j;
342
343 /* Free the children before freeing up the parent node
344 * The parent array is updated below and that shifts the nodes
345 * in the array. So always pick the first child if num children > 0
346 */
347 while (node->num_children)
348 ice_free_sched_node(pi, node->children[0]);
349
350 /* Leaf, TC and root nodes can't be deleted by SW */
351 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
352 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
353 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
354 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
355 u32 teid = LE32_TO_CPU(node->info.node_teid);
356
357 ice_sched_remove_elems(hw, node->parent, 1, &teid);
358 }
359 parent = node->parent;
360 /* root has no parent */
361 if (parent) {
362 struct ice_sched_node *p;
363
364 /* update the parent */
365 for (i = 0; i < parent->num_children; i++)
366 if (parent->children[i] == node) {
367 for (j = i + 1; j < parent->num_children; j++)
368 parent->children[j - 1] =
369 parent->children[j];
370 parent->num_children--;
371 break;
372 }
373
374 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
375 while (p) {
376 if (p->sibling == node) {
377 p->sibling = node->sibling;
378 break;
379 }
380 p = p->sibling;
381 }
382
383 /* update the sibling head if head is getting removed */
384 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
385 pi->sib_head[node->tc_num][node->tx_sched_layer] =
386 node->sibling;
387 }
388
389 /* leaf nodes have no children */
390 if (node->children)
391 ice_free(hw, node->children);
392 ice_free(hw, node);
393 }
394
395 /**
396 * ice_aq_get_dflt_topo - gets default scheduler topology
397 * @hw: pointer to the HW struct
398 * @lport: logical port number
399 * @buf: pointer to buffer
400 * @buf_size: buffer size in bytes
401 * @num_branches: returns total number of queue to port branches
402 * @cd: pointer to command details structure or NULL
403 *
404 * Get default scheduler topology (0x400)
405 */
406 static int
ice_aq_get_dflt_topo(struct ice_hw * hw,u8 lport,struct ice_aqc_get_topo_elem * buf,u16 buf_size,u8 * num_branches,struct ice_sq_cd * cd)407 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
408 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
409 u8 *num_branches, struct ice_sq_cd *cd)
410 {
411 struct ice_aqc_get_topo *cmd;
412 struct ice_aq_desc desc;
413 int status;
414
415 cmd = &desc.params.get_topo;
416 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
417 cmd->port_num = lport;
418 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
419 if (!status && num_branches)
420 *num_branches = cmd->num_branches;
421
422 return status;
423 }
424
425 /**
426 * ice_aq_add_sched_elems - adds scheduling element
427 * @hw: pointer to the HW struct
428 * @grps_req: the number of groups that are requested to be added
429 * @buf: pointer to buffer
430 * @buf_size: buffer size in bytes
431 * @grps_added: returns total number of groups added
432 * @cd: pointer to command details structure or NULL
433 *
434 * Add scheduling elements (0x0401)
435 */
436 static int
ice_aq_add_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_add_elem * buf,u16 buf_size,u16 * grps_added,struct ice_sq_cd * cd)437 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
438 struct ice_aqc_add_elem *buf, u16 buf_size,
439 u16 *grps_added, struct ice_sq_cd *cd)
440 {
441 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
442 grps_req, (void *)buf, buf_size,
443 grps_added, cd);
444 }
445
446 /**
447 * ice_aq_cfg_sched_elems - configures scheduler elements
448 * @hw: pointer to the HW struct
449 * @elems_req: number of elements to configure
450 * @buf: pointer to buffer
451 * @buf_size: buffer size in bytes
452 * @elems_cfgd: returns total number of elements configured
453 * @cd: pointer to command details structure or NULL
454 *
455 * Configure scheduling elements (0x0403)
456 */
457 static int
ice_aq_cfg_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_cfgd,struct ice_sq_cd * cd)458 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
459 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
460 u16 *elems_cfgd, struct ice_sq_cd *cd)
461 {
462 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
463 elems_req, (void *)buf, buf_size,
464 elems_cfgd, cd);
465 }
466
467 /**
468 * ice_aq_move_sched_elems - move scheduler elements
469 * @hw: pointer to the HW struct
470 * @grps_req: number of groups to move
471 * @buf: pointer to buffer
472 * @buf_size: buffer size in bytes
473 * @grps_movd: returns total number of groups moved
474 * @cd: pointer to command details structure or NULL
475 *
476 * Move scheduling elements (0x0408)
477 */
478 int
ice_aq_move_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_move_elem * buf,u16 buf_size,u16 * grps_movd,struct ice_sq_cd * cd)479 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
480 struct ice_aqc_move_elem *buf, u16 buf_size,
481 u16 *grps_movd, struct ice_sq_cd *cd)
482 {
483 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
484 grps_req, (void *)buf, buf_size,
485 grps_movd, cd);
486 }
487
488 /**
489 * ice_aq_suspend_sched_elems - suspend scheduler elements
490 * @hw: pointer to the HW struct
491 * @elems_req: number of elements to suspend
492 * @buf: pointer to buffer
493 * @buf_size: buffer size in bytes
494 * @elems_ret: returns total number of elements suspended
495 * @cd: pointer to command details structure or NULL
496 *
497 * Suspend scheduling elements (0x0409)
498 */
499 static int
ice_aq_suspend_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)500 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
501 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
502 {
503 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
504 elems_req, (void *)buf, buf_size,
505 elems_ret, cd);
506 }
507
508 /**
509 * ice_aq_resume_sched_elems - resume scheduler elements
510 * @hw: pointer to the HW struct
511 * @elems_req: number of elements to resume
512 * @buf: pointer to buffer
513 * @buf_size: buffer size in bytes
514 * @elems_ret: returns total number of elements resumed
515 * @cd: pointer to command details structure or NULL
516 *
517 * resume scheduling elements (0x040A)
518 */
519 static int
ice_aq_resume_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)520 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
521 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
522 {
523 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
524 elems_req, (void *)buf, buf_size,
525 elems_ret, cd);
526 }
527
528 /**
529 * ice_aq_query_sched_res - query scheduler resource
530 * @hw: pointer to the HW struct
531 * @buf_size: buffer size in bytes
532 * @buf: pointer to buffer
533 * @cd: pointer to command details structure or NULL
534 *
535 * Query scheduler resource allocation (0x0412)
536 */
537 static int
ice_aq_query_sched_res(struct ice_hw * hw,u16 buf_size,struct ice_aqc_query_txsched_res_resp * buf,struct ice_sq_cd * cd)538 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
539 struct ice_aqc_query_txsched_res_resp *buf,
540 struct ice_sq_cd *cd)
541 {
542 struct ice_aq_desc desc;
543
544 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
545 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
546 }
547
548 /**
549 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
550 * @hw: pointer to the HW struct
551 * @num_nodes: number of nodes
552 * @node_teids: array of node teids to be suspended or resumed
553 * @suspend: true means suspend / false means resume
554 *
555 * This function suspends or resumes HW nodes
556 */
557 static int
ice_sched_suspend_resume_elems(struct ice_hw * hw,u8 num_nodes,u32 * node_teids,bool suspend)558 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
559 bool suspend)
560 {
561 u16 i, buf_size, num_elem_ret = 0;
562 __le32 *buf;
563 int status;
564
565 buf_size = sizeof(*buf) * num_nodes;
566 buf = (__le32 *)ice_malloc(hw, buf_size);
567 if (!buf)
568 return ICE_ERR_NO_MEMORY;
569
570 for (i = 0; i < num_nodes; i++)
571 buf[i] = CPU_TO_LE32(node_teids[i]);
572
573 if (suspend)
574 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
575 buf_size, &num_elem_ret,
576 NULL);
577 else
578 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
579 buf_size, &num_elem_ret,
580 NULL);
581 if (status || num_elem_ret != num_nodes)
582 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
583
584 ice_free(hw, buf);
585 return status;
586 }
587
588 /**
589 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
590 * @hw: pointer to the HW struct
591 * @vsi_handle: VSI handle
592 * @tc: TC number
593 * @new_numqs: number of queues
594 */
595 static int
ice_alloc_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 new_numqs)596 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
597 {
598 struct ice_vsi_ctx *vsi_ctx;
599 struct ice_q_ctx *q_ctx;
600
601 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
602 if (!vsi_ctx)
603 return ICE_ERR_PARAM;
604 /* allocate LAN queue contexts */
605 if (!vsi_ctx->lan_q_ctx[tc]) {
606 vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *)
607 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
608 if (!vsi_ctx->lan_q_ctx[tc])
609 return ICE_ERR_NO_MEMORY;
610 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
611 return 0;
612 }
613 /* num queues are increased, update the queue contexts */
614 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
615 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
616
617 q_ctx = (struct ice_q_ctx *)
618 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
619 if (!q_ctx)
620 return ICE_ERR_NO_MEMORY;
621 ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
622 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
623 ice_free(hw, vsi_ctx->lan_q_ctx[tc]);
624 vsi_ctx->lan_q_ctx[tc] = q_ctx;
625 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
626 }
627 return 0;
628 }
629
630 /**
631 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
632 * @hw: pointer to the HW struct
633 * @vsi_handle: VSI handle
634 * @tc: TC number
635 * @new_numqs: number of queues
636 */
637 static int
ice_alloc_rdma_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 new_numqs)638 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
639 {
640 struct ice_vsi_ctx *vsi_ctx;
641 struct ice_q_ctx *q_ctx;
642
643 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
644 if (!vsi_ctx)
645 return ICE_ERR_PARAM;
646 /* allocate RDMA queue contexts */
647 if (!vsi_ctx->rdma_q_ctx[tc]) {
648 vsi_ctx->rdma_q_ctx[tc] = (struct ice_q_ctx *)
649 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
650 if (!vsi_ctx->rdma_q_ctx[tc])
651 return ICE_ERR_NO_MEMORY;
652 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
653 return 0;
654 }
655 /* num queues are increased, update the queue contexts */
656 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
657 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
658
659 q_ctx = (struct ice_q_ctx *)
660 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
661 if (!q_ctx)
662 return ICE_ERR_NO_MEMORY;
663 ice_memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
664 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
665 ice_free(hw, vsi_ctx->rdma_q_ctx[tc]);
666 vsi_ctx->rdma_q_ctx[tc] = q_ctx;
667 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
668 }
669 return 0;
670 }
671
672 /**
673 * ice_aq_rl_profile - performs a rate limiting task
674 * @hw: pointer to the HW struct
675 * @opcode: opcode for add, query, or remove profile(s)
676 * @num_profiles: the number of profiles
677 * @buf: pointer to buffer
678 * @buf_size: buffer size in bytes
679 * @num_processed: number of processed add or remove profile(s) to return
680 * @cd: pointer to command details structure
681 *
682 * RL profile function to add, query, or remove profile(s)
683 */
684 static int
ice_aq_rl_profile(struct ice_hw * hw,enum ice_adminq_opc opcode,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_processed,struct ice_sq_cd * cd)685 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
686 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
687 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
688 {
689 struct ice_aqc_rl_profile *cmd;
690 struct ice_aq_desc desc;
691 int status;
692
693 cmd = &desc.params.rl_profile;
694
695 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
696 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
697 cmd->num_profiles = CPU_TO_LE16(num_profiles);
698 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
699 if (!status && num_processed)
700 *num_processed = LE16_TO_CPU(cmd->num_processed);
701 return status;
702 }
703
704 /**
705 * ice_aq_add_rl_profile - adds rate limiting profile(s)
706 * @hw: pointer to the HW struct
707 * @num_profiles: the number of profile(s) to be add
708 * @buf: pointer to buffer
709 * @buf_size: buffer size in bytes
710 * @num_profiles_added: total number of profiles added to return
711 * @cd: pointer to command details structure
712 *
713 * Add RL profile (0x0410)
714 */
715 static int
ice_aq_add_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_added,struct ice_sq_cd * cd)716 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
717 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
718 u16 *num_profiles_added, struct ice_sq_cd *cd)
719 {
720 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
721 buf, buf_size, num_profiles_added, cd);
722 }
723
724 /**
725 * ice_aq_query_rl_profile - query rate limiting profile(s)
726 * @hw: pointer to the HW struct
727 * @num_profiles: the number of profile(s) to query
728 * @buf: pointer to buffer
729 * @buf_size: buffer size in bytes
730 * @cd: pointer to command details structure
731 *
732 * Query RL profile (0x0411)
733 */
734 int
ice_aq_query_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,struct ice_sq_cd * cd)735 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
736 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
737 struct ice_sq_cd *cd)
738 {
739 return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
740 num_profiles, buf, buf_size, NULL, cd);
741 }
742
743 /**
744 * ice_aq_remove_rl_profile - removes RL profile(s)
745 * @hw: pointer to the HW struct
746 * @num_profiles: the number of profile(s) to remove
747 * @buf: pointer to buffer
748 * @buf_size: buffer size in bytes
749 * @num_profiles_removed: total number of profiles removed to return
750 * @cd: pointer to command details structure or NULL
751 *
752 * Remove RL profile (0x0415)
753 */
754 static int
ice_aq_remove_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_removed,struct ice_sq_cd * cd)755 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
756 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
757 u16 *num_profiles_removed, struct ice_sq_cd *cd)
758 {
759 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
760 num_profiles, buf, buf_size,
761 num_profiles_removed, cd);
762 }
763
764 /**
765 * ice_sched_del_rl_profile - remove RL profile
766 * @hw: pointer to the HW struct
767 * @rl_info: rate limit profile information
768 *
769 * If the profile ID is not referenced anymore, it removes profile ID with
770 * its associated parameters from HW DB,and locally. The caller needs to
771 * hold scheduler lock.
772 */
773 static int
ice_sched_del_rl_profile(struct ice_hw * hw,struct ice_aqc_rl_profile_info * rl_info)774 ice_sched_del_rl_profile(struct ice_hw *hw,
775 struct ice_aqc_rl_profile_info *rl_info)
776 {
777 struct ice_aqc_rl_profile_elem *buf;
778 u16 num_profiles_removed;
779 u16 num_profiles = 1;
780 int status;
781
782 if (rl_info->prof_id_ref != 0)
783 return ICE_ERR_IN_USE;
784
785 /* Safe to remove profile ID */
786 buf = &rl_info->profile;
787 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
788 &num_profiles_removed, NULL);
789 if (status || num_profiles_removed != num_profiles)
790 return ICE_ERR_CFG;
791
792 /* Delete stale entry now */
793 LIST_DEL(&rl_info->list_entry);
794 ice_free(hw, rl_info);
795 return status;
796 }
797
798 /**
799 * ice_sched_clear_rl_prof - clears RL prof entries
800 * @pi: port information structure
801 *
802 * This function removes all RL profile from HW as well as from SW DB.
803 */
ice_sched_clear_rl_prof(struct ice_port_info * pi)804 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
805 {
806 u16 ln;
807 struct ice_hw *hw = pi->hw;
808
809 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
810 struct ice_aqc_rl_profile_info *rl_prof_elem;
811 struct ice_aqc_rl_profile_info *rl_prof_tmp;
812
813 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
814 &hw->rl_prof_list[ln],
815 ice_aqc_rl_profile_info, list_entry) {
816 int status;
817
818 rl_prof_elem->prof_id_ref = 0;
819 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
820 if (status) {
821 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
822 /* On error, free mem required */
823 LIST_DEL(&rl_prof_elem->list_entry);
824 ice_free(hw, rl_prof_elem);
825 }
826 }
827 }
828 }
829
830 /**
831 * ice_sched_clear_agg - clears the aggregator related information
832 * @hw: pointer to the hardware structure
833 *
834 * This function removes aggregator list and free up aggregator related memory
835 * previously allocated.
836 */
ice_sched_clear_agg(struct ice_hw * hw)837 void ice_sched_clear_agg(struct ice_hw *hw)
838 {
839 struct ice_sched_agg_info *agg_info;
840 struct ice_sched_agg_info *atmp;
841
842 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list,
843 ice_sched_agg_info,
844 list_entry) {
845 struct ice_sched_agg_vsi_info *agg_vsi_info;
846 struct ice_sched_agg_vsi_info *vtmp;
847
848 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
849 &agg_info->agg_vsi_list,
850 ice_sched_agg_vsi_info, list_entry) {
851 LIST_DEL(&agg_vsi_info->list_entry);
852 ice_free(hw, agg_vsi_info);
853 }
854 LIST_DEL(&agg_info->list_entry);
855 ice_free(hw, agg_info);
856 }
857 }
858
859 /**
860 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
861 * @pi: port information structure
862 *
863 * This function removes all the nodes from HW as well as from SW DB.
864 */
ice_sched_clear_tx_topo(struct ice_port_info * pi)865 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
866 {
867 if (!pi)
868 return;
869 /* remove RL profiles related lists */
870 ice_sched_clear_rl_prof(pi);
871 if (pi->root) {
872 ice_free_sched_node(pi, pi->root);
873 pi->root = NULL;
874 }
875 }
876
877 /**
878 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
879 * @pi: port information structure
880 *
881 * Cleanup scheduling elements from SW DB
882 */
ice_sched_clear_port(struct ice_port_info * pi)883 void ice_sched_clear_port(struct ice_port_info *pi)
884 {
885 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
886 return;
887
888 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
889 ice_acquire_lock(&pi->sched_lock);
890 ice_sched_clear_tx_topo(pi);
891 ice_release_lock(&pi->sched_lock);
892 ice_destroy_lock(&pi->sched_lock);
893 }
894
895 /**
896 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
897 * @hw: pointer to the HW struct
898 *
899 * Cleanup scheduling elements from SW DB for all the ports
900 */
ice_sched_cleanup_all(struct ice_hw * hw)901 void ice_sched_cleanup_all(struct ice_hw *hw)
902 {
903 if (!hw)
904 return;
905
906 if (hw->layer_info) {
907 ice_free(hw, hw->layer_info);
908 hw->layer_info = NULL;
909 }
910
911 ice_sched_clear_port(hw->port_info);
912
913 hw->num_tx_sched_layers = 0;
914 hw->num_tx_sched_phys_layers = 0;
915 hw->flattened_layers = 0;
916 hw->max_cgds = 0;
917 }
918
919 /**
920 * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes
921 * @hw: pointer to the HW struct
922 * @num_nodes: the number of nodes whose attributes to configure
923 * @buf: pointer to buffer
924 * @buf_size: buffer size in bytes
925 * @cd: pointer to command details structure or NULL
926 *
927 * Configure Node Attributes (0x0417)
928 */
929 int
ice_aq_cfg_node_attr(struct ice_hw * hw,u16 num_nodes,struct ice_aqc_node_attr_elem * buf,u16 buf_size,struct ice_sq_cd * cd)930 ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
931 struct ice_aqc_node_attr_elem *buf, u16 buf_size,
932 struct ice_sq_cd *cd)
933 {
934 struct ice_aqc_node_attr *cmd;
935 struct ice_aq_desc desc;
936
937 cmd = &desc.params.node_attr;
938 ice_fill_dflt_direct_cmd_desc(&desc,
939 ice_aqc_opc_cfg_node_attr);
940 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
941
942 cmd->num_entries = CPU_TO_LE16(num_nodes);
943 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
944 }
945
946 /**
947 * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
948 * @hw: pointer to the HW struct
949 * @num_l2_nodes: the number of L2 nodes whose CGDs to configure
950 * @buf: pointer to buffer
951 * @buf_size: buffer size in bytes
952 * @cd: pointer to command details structure or NULL
953 *
954 * Configure L2 Node CGD (0x0414)
955 */
956 int
ice_aq_cfg_l2_node_cgd(struct ice_hw * hw,u16 num_l2_nodes,struct ice_aqc_cfg_l2_node_cgd_elem * buf,u16 buf_size,struct ice_sq_cd * cd)957 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
958 struct ice_aqc_cfg_l2_node_cgd_elem *buf,
959 u16 buf_size, struct ice_sq_cd *cd)
960 {
961 struct ice_aqc_cfg_l2_node_cgd *cmd;
962 struct ice_aq_desc desc;
963
964 cmd = &desc.params.cfg_l2_node_cgd;
965 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd);
966 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
967
968 cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes);
969 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
970 }
971
972 /**
973 * ice_sched_add_elems - add nodes to HW and SW DB
974 * @pi: port information structure
975 * @tc_node: pointer to the branch node
976 * @parent: pointer to the parent node
977 * @layer: layer number to add nodes
978 * @num_nodes: number of nodes
979 * @num_nodes_added: pointer to num nodes added
980 * @first_node_teid: if new nodes are added then return the TEID of first node
981 * @prealloc_nodes: preallocated nodes struct for software DB
982 *
983 * This function add nodes to HW as well as to SW DB for a given layer
984 */
985 int
ice_sched_add_elems(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u16 * num_nodes_added,u32 * first_node_teid,struct ice_sched_node ** prealloc_nodes)986 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
987 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
988 u16 *num_nodes_added, u32 *first_node_teid,
989 struct ice_sched_node **prealloc_nodes)
990 {
991 struct ice_sched_node *prev, *new_node;
992 struct ice_aqc_add_elem *buf;
993 u16 i, num_groups_added = 0;
994 struct ice_hw *hw = pi->hw;
995 int status = 0;
996 u16 buf_size;
997 u32 teid;
998
999 buf_size = ice_struct_size(buf, generic, num_nodes);
1000 buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
1001 if (!buf)
1002 return ICE_ERR_NO_MEMORY;
1003
1004 buf->hdr.parent_teid = parent->info.node_teid;
1005 buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
1006 for (i = 0; i < num_nodes; i++) {
1007 buf->generic[i].parent_teid = parent->info.node_teid;
1008 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
1009 buf->generic[i].data.valid_sections =
1010 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
1011 ICE_AQC_ELEM_VALID_EIR;
1012 buf->generic[i].data.generic = 0;
1013 buf->generic[i].data.cir_bw.bw_profile_idx =
1014 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
1015 buf->generic[i].data.cir_bw.bw_alloc =
1016 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
1017 buf->generic[i].data.eir_bw.bw_profile_idx =
1018 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
1019 buf->generic[i].data.eir_bw.bw_alloc =
1020 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
1021 }
1022
1023 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
1024 &num_groups_added, NULL);
1025 if (status || num_groups_added != 1) {
1026 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
1027 hw->adminq.sq_last_status);
1028 ice_free(hw, buf);
1029 return ICE_ERR_CFG;
1030 }
1031
1032 *num_nodes_added = num_nodes;
1033 /* add nodes to the SW DB */
1034 for (i = 0; i < num_nodes; i++) {
1035 if (prealloc_nodes)
1036 status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]);
1037 else
1038 status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
1039
1040 if (status) {
1041 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
1042 status);
1043 break;
1044 }
1045
1046 teid = LE32_TO_CPU(buf->generic[i].node_teid);
1047 new_node = ice_sched_find_node_by_teid(parent, teid);
1048 if (!new_node) {
1049 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
1050 break;
1051 }
1052
1053 new_node->sibling = NULL;
1054 new_node->tc_num = tc_node->tc_num;
1055
1056 /* add it to previous node sibling pointer */
1057 /* Note: siblings are not linked across branches */
1058 prev = ice_sched_get_first_node(pi, tc_node, layer);
1059 if (prev && prev != new_node) {
1060 while (prev->sibling)
1061 prev = prev->sibling;
1062 prev->sibling = new_node;
1063 }
1064
1065 /* initialize the sibling head */
1066 if (!pi->sib_head[tc_node->tc_num][layer])
1067 pi->sib_head[tc_node->tc_num][layer] = new_node;
1068
1069 if (i == 0)
1070 *first_node_teid = teid;
1071 }
1072
1073 ice_free(hw, buf);
1074 return status;
1075 }
1076
1077 /**
1078 * ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer
1079 * @pi: port information structure
1080 * @tc_node: pointer to TC node
1081 * @parent: pointer to parent node
1082 * @layer: layer number to add nodes
1083 * @num_nodes: number of nodes to be added
1084 * @first_node_teid: pointer to the first node TEID
1085 * @num_nodes_added: pointer to number of nodes added
1086 *
1087 * Add nodes into specific hw layer.
1088 */
1089 static int
ice_sched_add_nodes_to_hw_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)1090 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
1091 struct ice_sched_node *tc_node,
1092 struct ice_sched_node *parent, u8 layer,
1093 u16 num_nodes, u32 *first_node_teid,
1094 u16 *num_nodes_added)
1095 {
1096 u16 max_child_nodes;
1097
1098 *num_nodes_added = 0;
1099
1100 if (!num_nodes)
1101 return 0;
1102
1103 if (!parent || layer < pi->hw->sw_entry_point_layer)
1104 return ICE_ERR_PARAM;
1105
1106 /* max children per node per layer */
1107 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1108
1109 /* current number of children + required nodes exceed max children */
1110 if ((parent->num_children + num_nodes) > max_child_nodes) {
1111 /* Fail if the parent is a TC node */
1112 if (parent == tc_node)
1113 return ICE_ERR_CFG;
1114 return ICE_ERR_MAX_LIMIT;
1115 }
1116
1117 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
1118 num_nodes_added, first_node_teid, NULL);
1119 }
1120
1121 /**
1122 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
1123 * @pi: port information structure
1124 * @tc_node: pointer to TC node
1125 * @parent: pointer to parent node
1126 * @layer: layer number to add nodes
1127 * @num_nodes: number of nodes to be added
1128 * @first_node_teid: pointer to the first node TEID
1129 * @num_nodes_added: pointer to number of nodes added
1130 *
1131 * This function add nodes to a given layer.
1132 */
1133 static int
ice_sched_add_nodes_to_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)1134 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
1135 struct ice_sched_node *tc_node,
1136 struct ice_sched_node *parent, u8 layer,
1137 u16 num_nodes, u32 *first_node_teid,
1138 u16 *num_nodes_added)
1139 {
1140 u32 *first_teid_ptr = first_node_teid;
1141 u16 new_num_nodes = num_nodes;
1142 int status = 0;
1143 u32 temp;
1144
1145 *num_nodes_added = 0;
1146 while (*num_nodes_added < num_nodes) {
1147 u16 max_child_nodes, num_added = 0;
1148
1149 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
1150 layer, new_num_nodes,
1151 first_teid_ptr,
1152 &num_added);
1153 if (!status)
1154 *num_nodes_added += num_added;
1155 /* added more nodes than requested ? */
1156 if (*num_nodes_added > num_nodes) {
1157 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
1158 *num_nodes_added);
1159 status = ICE_ERR_CFG;
1160 break;
1161 }
1162 /* break if all the nodes are added successfully */
1163 if (!status && (*num_nodes_added == num_nodes))
1164 break;
1165 /* break if the error is not max limit */
1166 if (status && status != ICE_ERR_MAX_LIMIT)
1167 break;
1168 /* Exceeded the max children */
1169 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1170 /* utilize all the spaces if the parent is not full */
1171 if (parent->num_children < max_child_nodes) {
1172 new_num_nodes = max_child_nodes - parent->num_children;
1173 } else {
1174 /* This parent is full, try the next sibling */
1175 parent = parent->sibling;
1176 /* Don't modify the first node TEID memory if the
1177 * first node was added already in the above call.
1178 * Instead send some temp memory for all other
1179 * recursive calls.
1180 */
1181 if (num_added)
1182 first_teid_ptr = &temp;
1183
1184 new_num_nodes = num_nodes - *num_nodes_added;
1185 }
1186 }
1187 return status;
1188 }
1189
1190 /**
1191 * ice_sched_get_qgrp_layer - get the current queue group layer number
1192 * @hw: pointer to the HW struct
1193 *
1194 * This function returns the current queue group layer number
1195 */
ice_sched_get_qgrp_layer(struct ice_hw * hw)1196 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1197 {
1198 /* It's always total layers - 1, the array is 0 relative so -2 */
1199 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1200 }
1201
1202 /**
1203 * ice_sched_get_vsi_layer - get the current VSI layer number
1204 * @hw: pointer to the HW struct
1205 *
1206 * This function returns the current VSI layer number
1207 */
ice_sched_get_vsi_layer(struct ice_hw * hw)1208 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1209 {
1210 /* Num Layers VSI layer
1211 * 9 6
1212 * 7 4
1213 * 5 or less sw_entry_point_layer
1214 */
1215 /* calculate the VSI layer based on number of layers. */
1216 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
1217 return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1218 else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
1219 /* qgroup and VSI layers are same */
1220 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1221 return hw->sw_entry_point_layer;
1222 }
1223
1224 /**
1225 * ice_sched_get_agg_layer - get the current aggregator layer number
1226 * @hw: pointer to the HW struct
1227 *
1228 * This function returns the current aggregator layer number
1229 */
ice_sched_get_agg_layer(struct ice_hw * hw)1230 static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1231 {
1232 /* Num Layers aggregator layer
1233 * 9 4
1234 * 7 or less sw_entry_point_layer
1235 */
1236 /* calculate the aggregator layer based on number of layers. */
1237 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
1238 return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1239 return hw->sw_entry_point_layer;
1240 }
1241
1242 /**
1243 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1244 * @pi: port information structure
1245 *
1246 * This function removes the leaf node that was created by the FW
1247 * during initialization
1248 */
ice_rm_dflt_leaf_node(struct ice_port_info * pi)1249 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1250 {
1251 struct ice_sched_node *node;
1252
1253 node = pi->root;
1254 while (node) {
1255 if (!node->num_children)
1256 break;
1257 node = node->children[0];
1258 }
1259 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1260 u32 teid = LE32_TO_CPU(node->info.node_teid);
1261 int status;
1262
1263 /* remove the default leaf node */
1264 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1265 if (!status)
1266 ice_free_sched_node(pi, node);
1267 }
1268 }
1269
1270 /**
1271 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1272 * @pi: port information structure
1273 *
1274 * This function frees all the nodes except root and TC that were created by
1275 * the FW during initialization
1276 */
ice_sched_rm_dflt_nodes(struct ice_port_info * pi)1277 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1278 {
1279 struct ice_sched_node *node;
1280
1281 ice_rm_dflt_leaf_node(pi);
1282
1283 /* remove the default nodes except TC and root nodes */
1284 node = pi->root;
1285 while (node) {
1286 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1287 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1288 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1289 ice_free_sched_node(pi, node);
1290 break;
1291 }
1292
1293 if (!node->num_children)
1294 break;
1295 node = node->children[0];
1296 }
1297 }
1298
1299 /**
1300 * ice_sched_init_port - Initialize scheduler by querying information from FW
1301 * @pi: port info structure for the tree to cleanup
1302 *
1303 * This function is the initial call to find the total number of Tx scheduler
1304 * resources, default topology created by firmware and storing the information
1305 * in SW DB.
1306 */
ice_sched_init_port(struct ice_port_info * pi)1307 int ice_sched_init_port(struct ice_port_info *pi)
1308 {
1309 struct ice_aqc_get_topo_elem *buf;
1310 struct ice_hw *hw;
1311 u8 num_branches;
1312 u16 num_elems;
1313 int status;
1314 u8 i, j;
1315
1316 if (!pi)
1317 return ICE_ERR_PARAM;
1318 hw = pi->hw;
1319
1320 /* Query the Default Topology from FW */
1321 buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw,
1322 ICE_AQ_MAX_BUF_LEN);
1323 if (!buf)
1324 return ICE_ERR_NO_MEMORY;
1325
1326 /* Query default scheduling tree topology */
1327 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1328 &num_branches, NULL);
1329 if (status)
1330 goto err_init_port;
1331
1332 /* num_branches should be between 1-8 */
1333 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1334 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1335 num_branches);
1336 status = ICE_ERR_PARAM;
1337 goto err_init_port;
1338 }
1339
1340 /* get the number of elements on the default/first branch */
1341 num_elems = LE16_TO_CPU(buf[0].hdr.num_elems);
1342
1343 /* num_elems should always be between 1-9 */
1344 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1345 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1346 num_elems);
1347 status = ICE_ERR_PARAM;
1348 goto err_init_port;
1349 }
1350
1351 /* If the last node is a leaf node then the index of the queue group
1352 * layer is two less than the number of elements.
1353 */
1354 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1355 ICE_AQC_ELEM_TYPE_LEAF)
1356 pi->last_node_teid =
1357 LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid);
1358 else
1359 pi->last_node_teid =
1360 LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid);
1361
1362 /* Insert the Tx Sched root node */
1363 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1364 if (status)
1365 goto err_init_port;
1366
1367 /* Parse the default tree and cache the information */
1368 for (i = 0; i < num_branches; i++) {
1369 num_elems = LE16_TO_CPU(buf[i].hdr.num_elems);
1370
1371 /* Skip root element as already inserted */
1372 for (j = 1; j < num_elems; j++) {
1373 /* update the sw entry point */
1374 if (buf[0].generic[j].data.elem_type ==
1375 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1376 hw->sw_entry_point_layer = j;
1377
1378 status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);
1379 if (status)
1380 goto err_init_port;
1381 }
1382 }
1383
1384 /* Remove the default nodes. */
1385 if (pi->root)
1386 ice_sched_rm_dflt_nodes(pi);
1387
1388 /* initialize the port for handling the scheduler tree */
1389 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1390 ice_init_lock(&pi->sched_lock);
1391 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1392 INIT_LIST_HEAD(&hw->rl_prof_list[i]);
1393
1394 err_init_port:
1395 if (status && pi->root) {
1396 ice_free_sched_node(pi, pi->root);
1397 pi->root = NULL;
1398 }
1399
1400 ice_free(hw, buf);
1401 return status;
1402 }
1403
1404 /**
1405 * ice_sched_get_node - Get the struct ice_sched_node for given TEID
1406 * @pi: port information structure
1407 * @teid: Scheduler node TEID
1408 *
1409 * This function retrieves the ice_sched_node struct for given TEID from
1410 * the SW DB and returns it to the caller.
1411 */
ice_sched_get_node(struct ice_port_info * pi,u32 teid)1412 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
1413 {
1414 struct ice_sched_node *node;
1415
1416 if (!pi)
1417 return NULL;
1418
1419 /* Find the node starting from root */
1420 ice_acquire_lock(&pi->sched_lock);
1421 node = ice_sched_find_node_by_teid(pi->root, teid);
1422 ice_release_lock(&pi->sched_lock);
1423
1424 if (!node)
1425 ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid);
1426
1427 return node;
1428 }
1429
1430 /**
1431 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1432 * @hw: pointer to the HW struct
1433 *
1434 * query FW for allocated scheduler resources and store in HW struct
1435 */
ice_sched_query_res_alloc(struct ice_hw * hw)1436 int ice_sched_query_res_alloc(struct ice_hw *hw)
1437 {
1438 struct ice_aqc_query_txsched_res_resp *buf;
1439 __le16 max_sibl;
1440 int status = 0;
1441 u16 i;
1442
1443 if (hw->layer_info)
1444 return status;
1445
1446 buf = (struct ice_aqc_query_txsched_res_resp *)
1447 ice_malloc(hw, sizeof(*buf));
1448 if (!buf)
1449 return ICE_ERR_NO_MEMORY;
1450
1451 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1452 if (status)
1453 goto sched_query_out;
1454
1455 hw->num_tx_sched_layers =
1456 (u8)LE16_TO_CPU(buf->sched_props.logical_levels);
1457 hw->num_tx_sched_phys_layers =
1458 (u8)LE16_TO_CPU(buf->sched_props.phys_levels);
1459 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1460 hw->max_cgds = buf->sched_props.max_pf_cgds;
1461
1462 /* max sibling group size of current layer refers to the max children
1463 * of the below layer node.
1464 * layer 1 node max children will be layer 2 max sibling group size
1465 * layer 2 node max children will be layer 3 max sibling group size
1466 * and so on. This array will be populated from root (index 0) to
1467 * qgroup layer 7. Leaf node has no children.
1468 */
1469 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1470 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1471 hw->max_children[i] = LE16_TO_CPU(max_sibl);
1472 }
1473
1474 hw->layer_info = (struct ice_aqc_layer_props *)
1475 ice_memdup(hw, buf->layer_props,
1476 (hw->num_tx_sched_layers *
1477 sizeof(*hw->layer_info)),
1478 ICE_NONDMA_TO_NONDMA);
1479 if (!hw->layer_info) {
1480 status = ICE_ERR_NO_MEMORY;
1481 goto sched_query_out;
1482 }
1483
1484 sched_query_out:
1485 ice_free(hw, buf);
1486 return status;
1487 }
1488
1489 /**
1490 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1491 * @hw: pointer to the HW struct
1492 *
1493 * Determine the PSM clock frequency and store in HW struct
1494 */
ice_sched_get_psm_clk_freq(struct ice_hw * hw)1495 void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1496 {
1497 u32 val, clk_src;
1498
1499 val = rd32(hw, GLGEN_CLKSTAT_SRC);
1500 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
1501 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
1502
1503 switch (clk_src) {
1504 case PSM_CLK_SRC_367_MHZ:
1505 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1506 break;
1507 case PSM_CLK_SRC_416_MHZ:
1508 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1509 break;
1510 case PSM_CLK_SRC_446_MHZ:
1511 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1512 break;
1513 case PSM_CLK_SRC_390_MHZ:
1514 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1515 break;
1516
1517 /* default condition is not required as clk_src is restricted
1518 * to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask.
1519 * The above switch statements cover the possible values of
1520 * this variable.
1521 */
1522 }
1523 }
1524
1525 /**
1526 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1527 * @hw: pointer to the HW struct
1528 * @base: pointer to the base node
1529 * @node: pointer to the node to search
1530 *
1531 * This function checks whether a given node is part of the base node
1532 * subtree or not
1533 */
1534 bool
ice_sched_find_node_in_subtree(struct ice_hw * hw,struct ice_sched_node * base,struct ice_sched_node * node)1535 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1536 struct ice_sched_node *node)
1537 {
1538 u8 i;
1539
1540 for (i = 0; i < base->num_children; i++) {
1541 struct ice_sched_node *child = base->children[i];
1542
1543 if (node == child)
1544 return true;
1545
1546 if (child->tx_sched_layer > node->tx_sched_layer)
1547 return false;
1548
1549 /* this recursion is intentional, and wouldn't
1550 * go more than 8 calls
1551 */
1552 if (ice_sched_find_node_in_subtree(hw, child, node))
1553 return true;
1554 }
1555 return false;
1556 }
1557
1558 /**
1559 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1560 * @pi: port information structure
1561 * @vsi_node: software VSI handle
1562 * @qgrp_node: first queue group node identified for scanning
1563 * @owner: LAN or RDMA
1564 *
1565 * This function retrieves a free LAN or RDMA queue group node by scanning
1566 * qgrp_node and its siblings for the queue group with the fewest number
1567 * of queues currently assigned.
1568 */
1569 static struct ice_sched_node *
ice_sched_get_free_qgrp(struct ice_port_info * pi,struct ice_sched_node * vsi_node,struct ice_sched_node * qgrp_node,u8 owner)1570 ice_sched_get_free_qgrp(struct ice_port_info *pi,
1571 struct ice_sched_node *vsi_node,
1572 struct ice_sched_node *qgrp_node, u8 owner)
1573 {
1574 struct ice_sched_node *min_qgrp;
1575 u8 min_children;
1576
1577 if (!qgrp_node)
1578 return qgrp_node;
1579 min_children = qgrp_node->num_children;
1580 if (!min_children)
1581 return qgrp_node;
1582 min_qgrp = qgrp_node;
1583 /* scan all queue groups until find a node which has less than the
1584 * minimum number of children. This way all queue group nodes get
1585 * equal number of shares and active. The bandwidth will be equally
1586 * distributed across all queues.
1587 */
1588 while (qgrp_node) {
1589 /* make sure the qgroup node is part of the VSI subtree */
1590 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1591 if (qgrp_node->num_children < min_children &&
1592 qgrp_node->owner == owner) {
1593 /* replace the new min queue group node */
1594 min_qgrp = qgrp_node;
1595 min_children = min_qgrp->num_children;
1596 /* break if it has no children, */
1597 if (!min_children)
1598 break;
1599 }
1600 qgrp_node = qgrp_node->sibling;
1601 }
1602 return min_qgrp;
1603 }
1604
1605 /**
1606 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1607 * @pi: port information structure
1608 * @vsi_handle: software VSI handle
1609 * @tc: branch number
1610 * @owner: LAN or RDMA
1611 *
1612 * This function retrieves a free LAN or RDMA queue group node
1613 */
1614 struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 owner)1615 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1616 u8 owner)
1617 {
1618 struct ice_sched_node *vsi_node, *qgrp_node;
1619 struct ice_vsi_ctx *vsi_ctx;
1620 u8 qgrp_layer, vsi_layer;
1621 u16 max_children;
1622
1623 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1624 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1625 max_children = pi->hw->max_children[qgrp_layer];
1626
1627 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1628 if (!vsi_ctx)
1629 return NULL;
1630 vsi_node = vsi_ctx->sched.vsi_node[tc];
1631 /* validate invalid VSI ID */
1632 if (!vsi_node)
1633 return NULL;
1634
1635 /* If the queue group and vsi layer are same then queues
1636 * are all attached directly to VSI
1637 */
1638 if (qgrp_layer == vsi_layer)
1639 return vsi_node;
1640
1641 /* get the first queue group node from VSI sub-tree */
1642 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1643 while (qgrp_node) {
1644 /* make sure the qgroup node is part of the VSI subtree */
1645 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1646 if (qgrp_node->num_children < max_children &&
1647 qgrp_node->owner == owner)
1648 break;
1649 qgrp_node = qgrp_node->sibling;
1650 }
1651
1652 /* Select the best queue group */
1653 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1654 }
1655
1656 /**
1657 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1658 * @pi: pointer to the port information structure
1659 * @tc_node: pointer to the TC node
1660 * @vsi_handle: software VSI handle
1661 *
1662 * This function retrieves a VSI node for a given VSI ID from a given
1663 * TC branch
1664 */
1665 struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 vsi_handle)1666 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1667 u16 vsi_handle)
1668 {
1669 struct ice_sched_node *node;
1670 u8 vsi_layer;
1671
1672 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1673 node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1674
1675 /* Check whether it already exists */
1676 while (node) {
1677 if (node->vsi_handle == vsi_handle)
1678 return node;
1679 node = node->sibling;
1680 }
1681
1682 return node;
1683 }
1684
1685 /**
1686 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1687 * @pi: pointer to the port information structure
1688 * @tc_node: pointer to the TC node
1689 * @agg_id: aggregator ID
1690 *
1691 * This function retrieves an aggregator node for a given aggregator ID from
1692 * a given TC branch
1693 */
1694 static struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u32 agg_id)1695 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1696 u32 agg_id)
1697 {
1698 struct ice_sched_node *node;
1699 struct ice_hw *hw = pi->hw;
1700 u8 agg_layer;
1701
1702 if (!hw)
1703 return NULL;
1704 agg_layer = ice_sched_get_agg_layer(hw);
1705 node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1706
1707 /* Check whether it already exists */
1708 while (node) {
1709 if (node->agg_id == agg_id)
1710 return node;
1711 node = node->sibling;
1712 }
1713
1714 return node;
1715 }
1716
1717 /**
1718 * ice_sched_check_node - Compare node parameters between SW DB and HW DB
1719 * @hw: pointer to the HW struct
1720 * @node: pointer to the ice_sched_node struct
1721 *
1722 * This function queries and compares the HW element with SW DB node parameters
1723 */
ice_sched_check_node(struct ice_hw * hw,struct ice_sched_node * node)1724 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
1725 {
1726 struct ice_aqc_txsched_elem_data buf;
1727 u32 node_teid;
1728 int status;
1729
1730 node_teid = LE32_TO_CPU(node->info.node_teid);
1731 status = ice_sched_query_elem(hw, node_teid, &buf);
1732 if (status)
1733 return false;
1734
1735 if (memcmp(&buf, &node->info, sizeof(buf))) {
1736 ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n",
1737 node_teid);
1738 return false;
1739 }
1740
1741 return true;
1742 }
1743
1744 /**
1745 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1746 * @hw: pointer to the HW struct
1747 * @num_qs: number of queues
1748 * @num_nodes: num nodes array
1749 *
1750 * This function calculates the number of VSI child nodes based on the
1751 * number of queues.
1752 */
1753 static void
ice_sched_calc_vsi_child_nodes(struct ice_hw * hw,u16 num_qs,u16 * num_nodes)1754 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1755 {
1756 u16 num = num_qs;
1757 u8 i, qgl, vsil;
1758
1759 qgl = ice_sched_get_qgrp_layer(hw);
1760 vsil = ice_sched_get_vsi_layer(hw);
1761
1762 /* calculate num nodes from queue group to VSI layer */
1763 for (i = qgl; i > vsil; i--) {
1764 /* round to the next integer if there is a remainder */
1765 num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]);
1766
1767 /* need at least one node */
1768 num_nodes[i] = num ? num : 1;
1769 }
1770 }
1771
1772 /**
1773 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1774 * @pi: port information structure
1775 * @vsi_handle: software VSI handle
1776 * @tc_node: pointer to the TC node
1777 * @num_nodes: pointer to the num nodes that needs to be added per layer
1778 * @owner: node owner (LAN or RDMA)
1779 *
1780 * This function adds the VSI child nodes to tree. It gets called for
1781 * LAN and RDMA separately.
1782 */
1783 static int
ice_sched_add_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes,u8 owner)1784 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1785 struct ice_sched_node *tc_node, u16 *num_nodes,
1786 u8 owner)
1787 {
1788 struct ice_sched_node *parent, *node;
1789 struct ice_hw *hw = pi->hw;
1790 u32 first_node_teid;
1791 u16 num_added = 0;
1792 u8 i, qgl, vsil;
1793
1794 qgl = ice_sched_get_qgrp_layer(hw);
1795 vsil = ice_sched_get_vsi_layer(hw);
1796 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1797 for (i = vsil + 1; i <= qgl; i++) {
1798 int status;
1799
1800 if (!parent)
1801 return ICE_ERR_CFG;
1802
1803 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1804 num_nodes[i],
1805 &first_node_teid,
1806 &num_added);
1807 if (status || num_nodes[i] != num_added)
1808 return ICE_ERR_CFG;
1809
1810 /* The newly added node can be a new parent for the next
1811 * layer nodes
1812 */
1813 if (num_added) {
1814 parent = ice_sched_find_node_by_teid(tc_node,
1815 first_node_teid);
1816 node = parent;
1817 while (node) {
1818 node->owner = owner;
1819 node = node->sibling;
1820 }
1821 } else {
1822 parent = parent->children[0];
1823 }
1824 }
1825
1826 return 0;
1827 }
1828
1829 /**
1830 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1831 * @pi: pointer to the port info structure
1832 * @tc_node: pointer to TC node
1833 * @num_nodes: pointer to num nodes array
1834 *
1835 * This function calculates the number of supported nodes needed to add this
1836 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1837 * layers
1838 */
1839 static void
ice_sched_calc_vsi_support_nodes(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 * num_nodes)1840 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1841 struct ice_sched_node *tc_node, u16 *num_nodes)
1842 {
1843 struct ice_sched_node *node;
1844 u8 vsil;
1845 int i;
1846
1847 vsil = ice_sched_get_vsi_layer(pi->hw);
1848 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1849 /* Add intermediate nodes if TC has no children and
1850 * need at least one node for VSI
1851 */
1852 if (!tc_node->num_children || i == vsil) {
1853 num_nodes[i]++;
1854 } else {
1855 /* If intermediate nodes are reached max children
1856 * then add a new one.
1857 */
1858 node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1859 /* scan all the siblings */
1860 while (node) {
1861 if (node->num_children <
1862 pi->hw->max_children[i])
1863 break;
1864 node = node->sibling;
1865 }
1866
1867 /* tree has one intermediate node to add this new VSI.
1868 * So no need to calculate supported nodes for below
1869 * layers.
1870 */
1871 if (node)
1872 break;
1873 /* all the nodes are full, allocate a new one */
1874 num_nodes[i]++;
1875 }
1876 }
1877
1878 /**
1879 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1880 * @pi: port information structure
1881 * @vsi_handle: software VSI handle
1882 * @tc_node: pointer to TC node
1883 * @num_nodes: pointer to num nodes array
1884 *
1885 * This function adds the VSI supported nodes into Tx tree including the
1886 * VSI, its parent and intermediate nodes in below layers
1887 */
1888 static int
ice_sched_add_vsi_support_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes)1889 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1890 struct ice_sched_node *tc_node, u16 *num_nodes)
1891 {
1892 struct ice_sched_node *parent = tc_node;
1893 u32 first_node_teid;
1894 u16 num_added = 0;
1895 u8 i, vsil;
1896
1897 if (!pi)
1898 return ICE_ERR_PARAM;
1899
1900 vsil = ice_sched_get_vsi_layer(pi->hw);
1901 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1902 int status;
1903
1904 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1905 i, num_nodes[i],
1906 &first_node_teid,
1907 &num_added);
1908 if (status || num_nodes[i] != num_added)
1909 return ICE_ERR_CFG;
1910
1911 /* The newly added node can be a new parent for the next
1912 * layer nodes
1913 */
1914 if (num_added)
1915 parent = ice_sched_find_node_by_teid(tc_node,
1916 first_node_teid);
1917 else
1918 parent = parent->children[0];
1919
1920 if (!parent)
1921 return ICE_ERR_CFG;
1922
1923 if (i == vsil)
1924 parent->vsi_handle = vsi_handle;
1925 }
1926
1927 return 0;
1928 }
1929
1930 /**
1931 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1932 * @pi: port information structure
1933 * @vsi_handle: software VSI handle
1934 * @tc: TC number
1935 *
1936 * This function adds a new VSI into scheduler tree
1937 */
1938 static int
ice_sched_add_vsi_to_topo(struct ice_port_info * pi,u16 vsi_handle,u8 tc)1939 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1940 {
1941 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1942 struct ice_sched_node *tc_node;
1943
1944 tc_node = ice_sched_get_tc_node(pi, tc);
1945 if (!tc_node)
1946 return ICE_ERR_PARAM;
1947
1948 /* calculate number of supported nodes needed for this VSI */
1949 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1950
1951 /* add VSI supported nodes to TC subtree */
1952 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1953 num_nodes);
1954 }
1955
1956 /**
1957 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1958 * @pi: port information structure
1959 * @vsi_handle: software VSI handle
1960 * @tc: TC number
1961 * @new_numqs: new number of max queues
1962 * @owner: owner of this subtree
1963 *
1964 * This function updates the VSI child nodes based on the number of queues
1965 */
1966 static int
ice_sched_update_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 new_numqs,u8 owner)1967 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1968 u8 tc, u16 new_numqs, u8 owner)
1969 {
1970 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1971 struct ice_sched_node *vsi_node;
1972 struct ice_sched_node *tc_node;
1973 struct ice_vsi_ctx *vsi_ctx;
1974 struct ice_hw *hw = pi->hw;
1975 int status = 0;
1976 u16 prev_numqs;
1977
1978 tc_node = ice_sched_get_tc_node(pi, tc);
1979 if (!tc_node)
1980 return ICE_ERR_CFG;
1981
1982 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1983 if (!vsi_node)
1984 return ICE_ERR_CFG;
1985
1986 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1987 if (!vsi_ctx)
1988 return ICE_ERR_PARAM;
1989
1990 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1991 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1992 else
1993 prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
1994 /* num queues are not changed or less than the previous number */
1995 if (new_numqs <= prev_numqs)
1996 return status;
1997 if (owner == ICE_SCHED_NODE_OWNER_LAN) {
1998 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1999 if (status)
2000 return status;
2001 } else {
2002 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
2003 if (status)
2004 return status;
2005 }
2006
2007 if (new_numqs)
2008 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
2009 /* Keep the max number of queue configuration all the time. Update the
2010 * tree only if number of queues > previous number of queues. This may
2011 * leave some extra nodes in the tree if number of queues < previous
2012 * number but that wouldn't harm anything. Removing those extra nodes
2013 * may complicate the code if those nodes are part of SRL or
2014 * individually rate limited.
2015 */
2016 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
2017 new_num_nodes, owner);
2018 if (status)
2019 return status;
2020 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2021 vsi_ctx->sched.max_lanq[tc] = new_numqs;
2022 else
2023 vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
2024
2025 return 0;
2026 }
2027
2028 /**
2029 * ice_sched_cfg_vsi - configure the new/existing VSI
2030 * @pi: port information structure
2031 * @vsi_handle: software VSI handle
2032 * @tc: TC number
2033 * @maxqs: max number of queues
2034 * @owner: LAN or RDMA
2035 * @enable: TC enabled or disabled
2036 *
2037 * This function adds/updates VSI nodes based on the number of queues. If TC is
2038 * enabled and VSI is in suspended state then resume the VSI back. If TC is
2039 * disabled then suspend the VSI if it is not already.
2040 */
2041 int
ice_sched_cfg_vsi(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 maxqs,u8 owner,bool enable)2042 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
2043 u8 owner, bool enable)
2044 {
2045 struct ice_sched_node *vsi_node, *tc_node;
2046 struct ice_vsi_ctx *vsi_ctx;
2047 struct ice_hw *hw = pi->hw;
2048 int status = 0;
2049
2050 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
2051 tc_node = ice_sched_get_tc_node(pi, tc);
2052 if (!tc_node)
2053 return ICE_ERR_PARAM;
2054 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2055 if (!vsi_ctx)
2056 return ICE_ERR_PARAM;
2057 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2058
2059 /* suspend the VSI if TC is not enabled */
2060 if (!enable) {
2061 if (vsi_node && vsi_node->in_use) {
2062 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
2063
2064 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
2065 true);
2066 if (!status)
2067 vsi_node->in_use = false;
2068 }
2069 return status;
2070 }
2071
2072 /* TC is enabled, if it is a new VSI then add it to the tree */
2073 if (!vsi_node) {
2074 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
2075 if (status)
2076 return status;
2077
2078 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2079 if (!vsi_node)
2080 return ICE_ERR_CFG;
2081
2082 vsi_ctx->sched.vsi_node[tc] = vsi_node;
2083 vsi_node->in_use = true;
2084 /* invalidate the max queues whenever VSI gets added first time
2085 * into the scheduler tree (boot or after reset). We need to
2086 * recreate the child nodes all the time in these cases.
2087 */
2088 vsi_ctx->sched.max_lanq[tc] = 0;
2089 vsi_ctx->sched.max_rdmaq[tc] = 0;
2090 }
2091
2092 /* update the VSI child nodes */
2093 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
2094 owner);
2095 if (status)
2096 return status;
2097
2098 /* TC is enabled, resume the VSI if it is in the suspend state */
2099 if (!vsi_node->in_use) {
2100 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
2101
2102 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
2103 if (!status)
2104 vsi_node->in_use = true;
2105 }
2106
2107 return status;
2108 }
2109
2110 /**
2111 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
2112 * @pi: port information structure
2113 * @vsi_handle: software VSI handle
2114 *
2115 * This function removes single aggregator VSI info entry from
2116 * aggregator list.
2117 */
ice_sched_rm_agg_vsi_info(struct ice_port_info * pi,u16 vsi_handle)2118 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
2119 {
2120 struct ice_sched_agg_info *agg_info;
2121 struct ice_sched_agg_info *atmp;
2122
2123 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list,
2124 ice_sched_agg_info,
2125 list_entry) {
2126 struct ice_sched_agg_vsi_info *agg_vsi_info;
2127 struct ice_sched_agg_vsi_info *vtmp;
2128
2129 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
2130 &agg_info->agg_vsi_list,
2131 ice_sched_agg_vsi_info, list_entry)
2132 if (agg_vsi_info->vsi_handle == vsi_handle) {
2133 LIST_DEL(&agg_vsi_info->list_entry);
2134 ice_free(pi->hw, agg_vsi_info);
2135 return;
2136 }
2137 }
2138 }
2139
2140 /**
2141 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
2142 * @node: pointer to the sub-tree node
2143 *
2144 * This function checks for a leaf node presence in a given sub-tree node.
2145 */
ice_sched_is_leaf_node_present(struct ice_sched_node * node)2146 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
2147 {
2148 u8 i;
2149
2150 for (i = 0; i < node->num_children; i++)
2151 if (ice_sched_is_leaf_node_present(node->children[i]))
2152 return true;
2153 /* check for a leaf node */
2154 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
2155 }
2156
2157 /**
2158 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
2159 * @pi: port information structure
2160 * @vsi_handle: software VSI handle
2161 * @owner: LAN or RDMA
2162 *
2163 * This function removes the VSI and its LAN or RDMA children nodes from the
2164 * scheduler tree.
2165 */
2166 static int
ice_sched_rm_vsi_cfg(struct ice_port_info * pi,u16 vsi_handle,u8 owner)2167 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
2168 {
2169 struct ice_vsi_ctx *vsi_ctx;
2170 int status = ICE_ERR_PARAM;
2171 u8 i;
2172
2173 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
2174 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2175 return status;
2176 ice_acquire_lock(&pi->sched_lock);
2177 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
2178 if (!vsi_ctx)
2179 goto exit_sched_rm_vsi_cfg;
2180
2181 ice_for_each_traffic_class(i) {
2182 struct ice_sched_node *vsi_node, *tc_node;
2183 u8 j = 0;
2184
2185 tc_node = ice_sched_get_tc_node(pi, i);
2186 if (!tc_node)
2187 continue;
2188
2189 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2190 if (!vsi_node)
2191 continue;
2192
2193 if (ice_sched_is_leaf_node_present(vsi_node)) {
2194 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
2195 status = ICE_ERR_IN_USE;
2196 goto exit_sched_rm_vsi_cfg;
2197 }
2198 while (j < vsi_node->num_children) {
2199 if (vsi_node->children[j]->owner == owner) {
2200 ice_free_sched_node(pi, vsi_node->children[j]);
2201
2202 /* reset the counter again since the num
2203 * children will be updated after node removal
2204 */
2205 j = 0;
2206 } else {
2207 j++;
2208 }
2209 }
2210 /* remove the VSI if it has no children */
2211 if (!vsi_node->num_children) {
2212 ice_free_sched_node(pi, vsi_node);
2213 vsi_ctx->sched.vsi_node[i] = NULL;
2214
2215 /* clean up aggregator related VSI info if any */
2216 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
2217 }
2218 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2219 vsi_ctx->sched.max_lanq[i] = 0;
2220 else
2221 vsi_ctx->sched.max_rdmaq[i] = 0;
2222 }
2223 status = 0;
2224
2225 exit_sched_rm_vsi_cfg:
2226 ice_release_lock(&pi->sched_lock);
2227 return status;
2228 }
2229
2230 /**
2231 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2232 * @pi: port information structure
2233 * @vsi_handle: software VSI handle
2234 *
2235 * This function clears the VSI and its LAN children nodes from scheduler tree
2236 * for all TCs.
2237 */
ice_rm_vsi_lan_cfg(struct ice_port_info * pi,u16 vsi_handle)2238 int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2239 {
2240 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2241 }
2242
2243 /**
2244 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
2245 * @pi: port information structure
2246 * @vsi_handle: software VSI handle
2247 *
2248 * This function clears the VSI and its RDMA children nodes from scheduler tree
2249 * for all TCs.
2250 */
ice_rm_vsi_rdma_cfg(struct ice_port_info * pi,u16 vsi_handle)2251 int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
2252 {
2253 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
2254 }
2255
2256 /**
2257 * ice_sched_is_tree_balanced - Check tree nodes are identical or not
2258 * @hw: pointer to the HW struct
2259 * @node: pointer to the ice_sched_node struct
2260 *
2261 * This function compares all the nodes for a given tree against HW DB nodes
2262 * This function needs to be called with the port_info->sched_lock held
2263 */
ice_sched_is_tree_balanced(struct ice_hw * hw,struct ice_sched_node * node)2264 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
2265 {
2266 u8 i;
2267
2268 /* start from the leaf node */
2269 for (i = 0; i < node->num_children; i++)
2270 /* Fail if node doesn't match with the SW DB
2271 * this recursion is intentional, and wouldn't
2272 * go more than 9 calls
2273 */
2274 if (!ice_sched_is_tree_balanced(hw, node->children[i]))
2275 return false;
2276
2277 return ice_sched_check_node(hw, node);
2278 }
2279
2280 /**
2281 * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID
2282 * @hw: pointer to the HW struct
2283 * @node_teid: node TEID
2284 * @buf: pointer to buffer
2285 * @buf_size: buffer size in bytes
2286 * @cd: pointer to command details structure or NULL
2287 *
2288 * This function retrieves the tree topology from the firmware for a given
2289 * node TEID to the root node.
2290 */
2291 int
ice_aq_query_node_to_root(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,struct ice_sq_cd * cd)2292 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
2293 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
2294 struct ice_sq_cd *cd)
2295 {
2296 struct ice_aqc_query_node_to_root *cmd;
2297 struct ice_aq_desc desc;
2298
2299 cmd = &desc.params.query_node_to_root;
2300 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root);
2301 cmd->teid = CPU_TO_LE32(node_teid);
2302 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2303 }
2304
2305 /**
2306 * ice_get_agg_info - get the aggregator ID
2307 * @hw: pointer to the hardware structure
2308 * @agg_id: aggregator ID
2309 *
2310 * This function validates aggregator ID. The function returns info if
2311 * aggregator ID is present in list otherwise it returns null.
2312 */
2313 static struct ice_sched_agg_info *
ice_get_agg_info(struct ice_hw * hw,u32 agg_id)2314 ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2315 {
2316 struct ice_sched_agg_info *agg_info;
2317
2318 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
2319 list_entry)
2320 if (agg_info->agg_id == agg_id)
2321 return agg_info;
2322
2323 return NULL;
2324 }
2325
2326 /**
2327 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2328 * @hw: pointer to the HW struct
2329 * @node: pointer to a child node
2330 * @num_nodes: num nodes count array
2331 *
2332 * This function walks through the aggregator subtree to find a free parent
2333 * node
2334 */
2335 static struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw * hw,struct ice_sched_node * node,u16 * num_nodes)2336 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2337 u16 *num_nodes)
2338 {
2339 u8 l = node->tx_sched_layer;
2340 u8 vsil, i;
2341
2342 vsil = ice_sched_get_vsi_layer(hw);
2343
2344 /* Is it VSI parent layer ? */
2345 if (l == vsil - 1)
2346 return (node->num_children < hw->max_children[l]) ? node : NULL;
2347
2348 /* We have intermediate nodes. Let's walk through the subtree. If the
2349 * intermediate node has space to add a new node then clear the count
2350 */
2351 if (node->num_children < hw->max_children[l])
2352 num_nodes[l] = 0;
2353 /* The below recursive call is intentional and wouldn't go more than
2354 * 2 or 3 iterations.
2355 */
2356
2357 for (i = 0; i < node->num_children; i++) {
2358 struct ice_sched_node *parent;
2359
2360 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2361 num_nodes);
2362 if (parent)
2363 return parent;
2364 }
2365
2366 return NULL;
2367 }
2368
2369 /**
2370 * ice_sched_update_parent - update the new parent in SW DB
2371 * @new_parent: pointer to a new parent node
2372 * @node: pointer to a child node
2373 *
2374 * This function removes the child from the old parent and adds it to a new
2375 * parent
2376 */
2377 void
ice_sched_update_parent(struct ice_sched_node * new_parent,struct ice_sched_node * node)2378 ice_sched_update_parent(struct ice_sched_node *new_parent,
2379 struct ice_sched_node *node)
2380 {
2381 struct ice_sched_node *old_parent;
2382 u8 i, j;
2383
2384 old_parent = node->parent;
2385
2386 /* update the old parent children */
2387 for (i = 0; i < old_parent->num_children; i++)
2388 if (old_parent->children[i] == node) {
2389 for (j = i + 1; j < old_parent->num_children; j++)
2390 old_parent->children[j - 1] =
2391 old_parent->children[j];
2392 old_parent->num_children--;
2393 break;
2394 }
2395
2396 /* now move the node to a new parent */
2397 new_parent->children[new_parent->num_children++] = node;
2398 node->parent = new_parent;
2399 node->info.parent_teid = new_parent->info.node_teid;
2400 }
2401
2402 /**
2403 * ice_sched_move_nodes - move child nodes to a given parent
2404 * @pi: port information structure
2405 * @parent: pointer to parent node
2406 * @num_items: number of child nodes to be moved
2407 * @list: pointer to child node teids
2408 *
2409 * This function move the child nodes to a given parent.
2410 */
2411 int
ice_sched_move_nodes(struct ice_port_info * pi,struct ice_sched_node * parent,u16 num_items,u32 * list)2412 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2413 u16 num_items, u32 *list)
2414 {
2415 struct ice_aqc_move_elem *buf;
2416 struct ice_sched_node *node;
2417 u16 i, grps_movd = 0;
2418 struct ice_hw *hw;
2419 int status = 0;
2420 u16 buf_len;
2421
2422 hw = pi->hw;
2423
2424 if (!parent || !num_items)
2425 return ICE_ERR_PARAM;
2426
2427 /* Does parent have enough space */
2428 if (parent->num_children + num_items >
2429 hw->max_children[parent->tx_sched_layer])
2430 return ICE_ERR_AQ_FULL;
2431
2432 buf_len = ice_struct_size(buf, teid, 1);
2433 buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len);
2434 if (!buf)
2435 return ICE_ERR_NO_MEMORY;
2436
2437 for (i = 0; i < num_items; i++) {
2438 node = ice_sched_find_node_by_teid(pi->root, list[i]);
2439 if (!node) {
2440 status = ICE_ERR_PARAM;
2441 goto move_err_exit;
2442 }
2443
2444 buf->hdr.src_parent_teid = node->info.parent_teid;
2445 buf->hdr.dest_parent_teid = parent->info.node_teid;
2446 buf->teid[0] = node->info.node_teid;
2447 buf->hdr.num_elems = CPU_TO_LE16(1);
2448 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
2449 &grps_movd, NULL);
2450 if (status && grps_movd != 1) {
2451 status = ICE_ERR_CFG;
2452 goto move_err_exit;
2453 }
2454
2455 /* update the SW DB */
2456 ice_sched_update_parent(parent, node);
2457 }
2458
2459 move_err_exit:
2460 ice_free(hw, buf);
2461 return status;
2462 }
2463
2464 /**
2465 * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2466 * @pi: port information structure
2467 * @vsi_handle: software VSI handle
2468 * @agg_id: aggregator ID
2469 * @tc: TC number
2470 *
2471 * This function moves a VSI to an aggregator node or its subtree.
2472 * Intermediate nodes may be created if required.
2473 */
2474 static int
ice_sched_move_vsi_to_agg(struct ice_port_info * pi,u16 vsi_handle,u32 agg_id,u8 tc)2475 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2476 u8 tc)
2477 {
2478 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2479 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2480 u32 first_node_teid, vsi_teid;
2481 u16 num_nodes_added;
2482 u8 aggl, vsil, i;
2483 int status;
2484
2485 tc_node = ice_sched_get_tc_node(pi, tc);
2486 if (!tc_node)
2487 return ICE_ERR_CFG;
2488
2489 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2490 if (!agg_node)
2491 return ICE_ERR_DOES_NOT_EXIST;
2492
2493 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2494 if (!vsi_node)
2495 return ICE_ERR_DOES_NOT_EXIST;
2496
2497 /* Is this VSI already part of given aggregator? */
2498 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
2499 return 0;
2500
2501 aggl = ice_sched_get_agg_layer(pi->hw);
2502 vsil = ice_sched_get_vsi_layer(pi->hw);
2503
2504 /* set intermediate node count to 1 between aggregator and VSI layers */
2505 for (i = aggl + 1; i < vsil; i++)
2506 num_nodes[i] = 1;
2507
2508 /* Check if the aggregator subtree has any free node to add the VSI */
2509 for (i = 0; i < agg_node->num_children; i++) {
2510 parent = ice_sched_get_free_vsi_parent(pi->hw,
2511 agg_node->children[i],
2512 num_nodes);
2513 if (parent)
2514 goto move_nodes;
2515 }
2516
2517 /* add new nodes */
2518 parent = agg_node;
2519 for (i = aggl + 1; i < vsil; i++) {
2520 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2521 num_nodes[i],
2522 &first_node_teid,
2523 &num_nodes_added);
2524 if (status || num_nodes[i] != num_nodes_added)
2525 return ICE_ERR_CFG;
2526
2527 /* The newly added node can be a new parent for the next
2528 * layer nodes
2529 */
2530 if (num_nodes_added)
2531 parent = ice_sched_find_node_by_teid(tc_node,
2532 first_node_teid);
2533 else
2534 parent = parent->children[0];
2535
2536 if (!parent)
2537 return ICE_ERR_CFG;
2538 }
2539
2540 move_nodes:
2541 vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid);
2542 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2543 }
2544
2545 /**
2546 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2547 * @pi: port information structure
2548 * @agg_info: aggregator info
2549 * @tc: traffic class number
2550 * @rm_vsi_info: true or false
2551 *
2552 * This function move all the VSI(s) to the default aggregator and delete
2553 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
2554 * caller holds the scheduler lock.
2555 */
2556 static int
ice_move_all_vsi_to_dflt_agg(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2557 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2558 struct ice_sched_agg_info *agg_info, u8 tc,
2559 bool rm_vsi_info)
2560 {
2561 struct ice_sched_agg_vsi_info *agg_vsi_info;
2562 struct ice_sched_agg_vsi_info *tmp;
2563 int status = 0;
2564
2565 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2566 ice_sched_agg_vsi_info, list_entry) {
2567 u16 vsi_handle = agg_vsi_info->vsi_handle;
2568
2569 /* Move VSI to default aggregator */
2570 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2571 continue;
2572
2573 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2574 ICE_DFLT_AGG_ID, tc);
2575 if (status)
2576 break;
2577
2578 ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
2579 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2580 LIST_DEL(&agg_vsi_info->list_entry);
2581 ice_free(pi->hw, agg_vsi_info);
2582 }
2583 }
2584
2585 return status;
2586 }
2587
2588 /**
2589 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2590 * @pi: port information structure
2591 * @node: node pointer
2592 *
2593 * This function checks whether the aggregator is attached with any VSI or not.
2594 */
2595 static bool
ice_sched_is_agg_inuse(struct ice_port_info * pi,struct ice_sched_node * node)2596 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2597 {
2598 u8 vsil, i;
2599
2600 vsil = ice_sched_get_vsi_layer(pi->hw);
2601 if (node->tx_sched_layer < vsil - 1) {
2602 for (i = 0; i < node->num_children; i++)
2603 if (ice_sched_is_agg_inuse(pi, node->children[i]))
2604 return true;
2605 return false;
2606 } else {
2607 return node->num_children ? true : false;
2608 }
2609 }
2610
2611 /**
2612 * ice_sched_rm_agg_cfg - remove the aggregator node
2613 * @pi: port information structure
2614 * @agg_id: aggregator ID
2615 * @tc: TC number
2616 *
2617 * This function removes the aggregator node and intermediate nodes if any
2618 * from the given TC
2619 */
2620 static int
ice_sched_rm_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2621 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2622 {
2623 struct ice_sched_node *tc_node, *agg_node;
2624 struct ice_hw *hw = pi->hw;
2625
2626 tc_node = ice_sched_get_tc_node(pi, tc);
2627 if (!tc_node)
2628 return ICE_ERR_CFG;
2629
2630 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2631 if (!agg_node)
2632 return ICE_ERR_DOES_NOT_EXIST;
2633
2634 /* Can't remove the aggregator node if it has children */
2635 if (ice_sched_is_agg_inuse(pi, agg_node))
2636 return ICE_ERR_IN_USE;
2637
2638 /* need to remove the whole subtree if aggregator node is the
2639 * only child.
2640 */
2641 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2642 struct ice_sched_node *parent = agg_node->parent;
2643
2644 if (!parent)
2645 return ICE_ERR_CFG;
2646
2647 if (parent->num_children > 1)
2648 break;
2649
2650 agg_node = parent;
2651 }
2652
2653 ice_free_sched_node(pi, agg_node);
2654 return 0;
2655 }
2656
2657 /**
2658 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2659 * @pi: port information structure
2660 * @agg_info: aggregator ID
2661 * @tc: TC number
2662 * @rm_vsi_info: bool value true or false
2663 *
2664 * This function removes aggregator reference to VSI of given TC. It removes
2665 * the aggregator configuration completely for requested TC. The caller needs
2666 * to hold the scheduler lock.
2667 */
2668 static int
ice_rm_agg_cfg_tc(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2669 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2670 u8 tc, bool rm_vsi_info)
2671 {
2672 int status = 0;
2673
2674 /* If nothing to remove - return success */
2675 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2676 goto exit_rm_agg_cfg_tc;
2677
2678 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2679 if (status)
2680 goto exit_rm_agg_cfg_tc;
2681
2682 /* Delete aggregator node(s) */
2683 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2684 if (status)
2685 goto exit_rm_agg_cfg_tc;
2686
2687 ice_clear_bit(tc, agg_info->tc_bitmap);
2688 exit_rm_agg_cfg_tc:
2689 return status;
2690 }
2691
2692 /**
2693 * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2694 * @pi: port information structure
2695 * @agg_id: aggregator ID
2696 * @tc_bitmap: 8 bits TC bitmap
2697 *
2698 * Save aggregator TC bitmap. This function needs to be called with scheduler
2699 * lock held.
2700 */
2701 static int
ice_save_agg_tc_bitmap(struct ice_port_info * pi,u32 agg_id,ice_bitmap_t * tc_bitmap)2702 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2703 ice_bitmap_t *tc_bitmap)
2704 {
2705 struct ice_sched_agg_info *agg_info;
2706
2707 agg_info = ice_get_agg_info(pi->hw, agg_id);
2708 if (!agg_info)
2709 return ICE_ERR_PARAM;
2710 ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
2711 ICE_MAX_TRAFFIC_CLASS);
2712 return 0;
2713 }
2714
2715 /**
2716 * ice_sched_add_agg_cfg - create an aggregator node
2717 * @pi: port information structure
2718 * @agg_id: aggregator ID
2719 * @tc: TC number
2720 *
2721 * This function creates an aggregator node and intermediate nodes if required
2722 * for the given TC
2723 */
2724 static int
ice_sched_add_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2725 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2726 {
2727 struct ice_sched_node *parent, *agg_node, *tc_node;
2728 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2729 struct ice_hw *hw = pi->hw;
2730 u32 first_node_teid;
2731 u16 num_nodes_added;
2732 int status = 0;
2733 u8 i, aggl;
2734
2735 tc_node = ice_sched_get_tc_node(pi, tc);
2736 if (!tc_node)
2737 return ICE_ERR_CFG;
2738
2739 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2740 /* Does Agg node already exist ? */
2741 if (agg_node)
2742 return status;
2743
2744 aggl = ice_sched_get_agg_layer(hw);
2745
2746 /* need one node in Agg layer */
2747 num_nodes[aggl] = 1;
2748
2749 /* Check whether the intermediate nodes have space to add the
2750 * new aggregator. If they are full, then SW needs to allocate a new
2751 * intermediate node on those layers
2752 */
2753 for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2754 parent = ice_sched_get_first_node(pi, tc_node, i);
2755
2756 /* scan all the siblings */
2757 while (parent) {
2758 if (parent->num_children < hw->max_children[i])
2759 break;
2760 parent = parent->sibling;
2761 }
2762
2763 /* all the nodes are full, reserve one for this layer */
2764 if (!parent)
2765 num_nodes[i]++;
2766 }
2767
2768 /* add the aggregator node */
2769 parent = tc_node;
2770 for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2771 if (!parent)
2772 return ICE_ERR_CFG;
2773
2774 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2775 num_nodes[i],
2776 &first_node_teid,
2777 &num_nodes_added);
2778 if (status || num_nodes[i] != num_nodes_added)
2779 return ICE_ERR_CFG;
2780
2781 /* The newly added node can be a new parent for the next
2782 * layer nodes
2783 */
2784 if (num_nodes_added) {
2785 parent = ice_sched_find_node_by_teid(tc_node,
2786 first_node_teid);
2787 /* register aggregator ID with the aggregator node */
2788 if (parent && i == aggl)
2789 parent->agg_id = agg_id;
2790 } else {
2791 parent = parent->children[0];
2792 }
2793 }
2794
2795 return 0;
2796 }
2797
2798 /**
2799 * ice_sched_cfg_agg - configure aggregator node
2800 * @pi: port information structure
2801 * @agg_id: aggregator ID
2802 * @agg_type: aggregator type queue, VSI, or aggregator group
2803 * @tc_bitmap: bits TC bitmap
2804 *
2805 * It registers a unique aggregator node into scheduler services. It
2806 * allows a user to register with a unique ID to track it's resources.
2807 * The aggregator type determines if this is a queue group, VSI group
2808 * or aggregator group. It then creates the aggregator node(s) for requested
2809 * TC(s) or removes an existing aggregator node including its configuration
2810 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
2811 * resources and remove aggregator ID.
2812 * This function needs to be called with scheduler lock held.
2813 */
2814 static int
ice_sched_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,ice_bitmap_t * tc_bitmap)2815 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2816 enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap)
2817 {
2818 struct ice_sched_agg_info *agg_info;
2819 struct ice_hw *hw = pi->hw;
2820 int status = 0;
2821 u8 tc;
2822
2823 agg_info = ice_get_agg_info(hw, agg_id);
2824 if (!agg_info) {
2825 /* Create new entry for new aggregator ID */
2826 agg_info = (struct ice_sched_agg_info *)
2827 ice_malloc(hw, sizeof(*agg_info));
2828 if (!agg_info)
2829 return ICE_ERR_NO_MEMORY;
2830
2831 agg_info->agg_id = agg_id;
2832 agg_info->agg_type = agg_type;
2833 agg_info->tc_bitmap[0] = 0;
2834
2835 /* Initialize the aggregator VSI list head */
2836 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2837
2838 /* Add new entry in aggregator list */
2839 LIST_ADD(&agg_info->list_entry, &hw->agg_list);
2840 }
2841 /* Create aggregator node(s) for requested TC(s) */
2842 ice_for_each_traffic_class(tc) {
2843 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2844 /* Delete aggregator cfg TC if it exists previously */
2845 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2846 if (status)
2847 break;
2848 continue;
2849 }
2850
2851 /* Check if aggregator node for TC already exists */
2852 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2853 continue;
2854
2855 /* Create new aggregator node for TC */
2856 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2857 if (status)
2858 break;
2859
2860 /* Save aggregator node's TC information */
2861 ice_set_bit(tc, agg_info->tc_bitmap);
2862 }
2863
2864 return status;
2865 }
2866
2867 /**
2868 * ice_cfg_agg - config aggregator node
2869 * @pi: port information structure
2870 * @agg_id: aggregator ID
2871 * @agg_type: aggregator type queue, VSI, or aggregator group
2872 * @tc_bitmap: bits TC bitmap
2873 *
2874 * This function configures aggregator node(s).
2875 */
2876 int
ice_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,u8 tc_bitmap)2877 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2878 u8 tc_bitmap)
2879 {
2880 ice_bitmap_t bitmap = tc_bitmap;
2881 int status;
2882
2883 ice_acquire_lock(&pi->sched_lock);
2884 status = ice_sched_cfg_agg(pi, agg_id, agg_type,
2885 (ice_bitmap_t *)&bitmap);
2886 if (!status)
2887 status = ice_save_agg_tc_bitmap(pi, agg_id,
2888 (ice_bitmap_t *)&bitmap);
2889 ice_release_lock(&pi->sched_lock);
2890 return status;
2891 }
2892
2893 /**
2894 * ice_get_agg_vsi_info - get the aggregator ID
2895 * @agg_info: aggregator info
2896 * @vsi_handle: software VSI handle
2897 *
2898 * The function returns aggregator VSI info based on VSI handle. This function
2899 * needs to be called with scheduler lock held.
2900 */
2901 static struct ice_sched_agg_vsi_info *
ice_get_agg_vsi_info(struct ice_sched_agg_info * agg_info,u16 vsi_handle)2902 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2903 {
2904 struct ice_sched_agg_vsi_info *agg_vsi_info;
2905
2906 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
2907 ice_sched_agg_vsi_info, list_entry)
2908 if (agg_vsi_info->vsi_handle == vsi_handle)
2909 return agg_vsi_info;
2910
2911 return NULL;
2912 }
2913
2914 /**
2915 * ice_get_vsi_agg_info - get the aggregator info of VSI
2916 * @hw: pointer to the hardware structure
2917 * @vsi_handle: Sw VSI handle
2918 *
2919 * The function returns aggregator info of VSI represented via vsi_handle. The
2920 * VSI has in this case a different aggregator than the default one. This
2921 * function needs to be called with scheduler lock held.
2922 */
2923 static struct ice_sched_agg_info *
ice_get_vsi_agg_info(struct ice_hw * hw,u16 vsi_handle)2924 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2925 {
2926 struct ice_sched_agg_info *agg_info;
2927
2928 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
2929 list_entry) {
2930 struct ice_sched_agg_vsi_info *agg_vsi_info;
2931
2932 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2933 if (agg_vsi_info)
2934 return agg_info;
2935 }
2936 return NULL;
2937 }
2938
2939 /**
2940 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2941 * @pi: port information structure
2942 * @agg_id: aggregator ID
2943 * @vsi_handle: software VSI handle
2944 * @tc_bitmap: TC bitmap of enabled TC(s)
2945 *
2946 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
2947 * lock held.
2948 */
2949 static int
ice_save_agg_vsi_tc_bitmap(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,ice_bitmap_t * tc_bitmap)2950 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2951 ice_bitmap_t *tc_bitmap)
2952 {
2953 struct ice_sched_agg_vsi_info *agg_vsi_info;
2954 struct ice_sched_agg_info *agg_info;
2955
2956 agg_info = ice_get_agg_info(pi->hw, agg_id);
2957 if (!agg_info)
2958 return ICE_ERR_PARAM;
2959 /* check if entry already exist */
2960 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2961 if (!agg_vsi_info)
2962 return ICE_ERR_PARAM;
2963 ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2964 ICE_MAX_TRAFFIC_CLASS);
2965 return 0;
2966 }
2967
2968 /**
2969 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2970 * @pi: port information structure
2971 * @agg_id: aggregator ID
2972 * @vsi_handle: software VSI handle
2973 * @tc_bitmap: TC bitmap of enabled TC(s)
2974 *
2975 * This function moves VSI to a new or default aggregator node. If VSI is
2976 * already associated to the aggregator node then no operation is performed on
2977 * the tree. This function needs to be called with scheduler lock held.
2978 */
2979 static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,ice_bitmap_t * tc_bitmap)2980 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2981 u16 vsi_handle, ice_bitmap_t *tc_bitmap)
2982 {
2983 struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
2984 struct ice_sched_agg_info *agg_info, *old_agg_info;
2985 struct ice_hw *hw = pi->hw;
2986 int status = 0;
2987 u8 tc;
2988
2989 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2990 return ICE_ERR_PARAM;
2991 agg_info = ice_get_agg_info(hw, agg_id);
2992 if (!agg_info)
2993 return ICE_ERR_PARAM;
2994 /* If the vsi is already part of another aggregator then update
2995 * its vsi info list
2996 */
2997 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
2998 if (old_agg_info && old_agg_info != agg_info) {
2999 struct ice_sched_agg_vsi_info *vtmp;
3000
3001 LIST_FOR_EACH_ENTRY_SAFE(old_agg_vsi_info, vtmp,
3002 &old_agg_info->agg_vsi_list,
3003 ice_sched_agg_vsi_info, list_entry)
3004 if (old_agg_vsi_info->vsi_handle == vsi_handle)
3005 break;
3006 }
3007
3008 /* check if entry already exist */
3009 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
3010 if (!agg_vsi_info) {
3011 /* Create new entry for VSI under aggregator list */
3012 agg_vsi_info = (struct ice_sched_agg_vsi_info *)
3013 ice_malloc(hw, sizeof(*agg_vsi_info));
3014 if (!agg_vsi_info)
3015 return ICE_ERR_PARAM;
3016
3017 /* add VSI ID into the aggregator list */
3018 agg_vsi_info->vsi_handle = vsi_handle;
3019 LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
3020 }
3021 /* Move VSI node to new aggregator node for requested TC(s) */
3022 ice_for_each_traffic_class(tc) {
3023 if (!ice_is_tc_ena(*tc_bitmap, tc))
3024 continue;
3025
3026 /* Move VSI to new aggregator */
3027 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
3028 if (status)
3029 break;
3030
3031 ice_set_bit(tc, agg_vsi_info->tc_bitmap);
3032 if (old_agg_vsi_info)
3033 ice_clear_bit(tc, old_agg_vsi_info->tc_bitmap);
3034 }
3035 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
3036 LIST_DEL(&old_agg_vsi_info->list_entry);
3037 ice_free(pi->hw, old_agg_vsi_info);
3038 }
3039 return status;
3040 }
3041
3042 /**
3043 * ice_sched_rm_unused_rl_prof - remove unused RL profile
3044 * @hw: pointer to the hardware structure
3045 *
3046 * This function removes unused rate limit profiles from the HW and
3047 * SW DB. The caller needs to hold scheduler lock.
3048 */
ice_sched_rm_unused_rl_prof(struct ice_hw * hw)3049 static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
3050 {
3051 u16 ln;
3052
3053 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
3054 struct ice_aqc_rl_profile_info *rl_prof_elem;
3055 struct ice_aqc_rl_profile_info *rl_prof_tmp;
3056
3057 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
3058 &hw->rl_prof_list[ln],
3059 ice_aqc_rl_profile_info, list_entry) {
3060 if (!ice_sched_del_rl_profile(hw, rl_prof_elem))
3061 ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n");
3062 }
3063 }
3064 }
3065
3066 /**
3067 * ice_sched_update_elem - update element
3068 * @hw: pointer to the HW struct
3069 * @node: pointer to node
3070 * @info: node info to update
3071 *
3072 * Update the HW DB, and local SW DB of node. Update the scheduling
3073 * parameters of node from argument info data buffer (Info->data buf) and
3074 * returns success or error on config sched element failure. The caller
3075 * needs to hold scheduler lock.
3076 */
3077 static int
ice_sched_update_elem(struct ice_hw * hw,struct ice_sched_node * node,struct ice_aqc_txsched_elem_data * info)3078 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
3079 struct ice_aqc_txsched_elem_data *info)
3080 {
3081 struct ice_aqc_txsched_elem_data buf;
3082 u16 elem_cfgd = 0;
3083 u16 num_elems = 1;
3084 int status;
3085
3086 buf = *info;
3087 /* For TC nodes, CIR config is not supported */
3088 if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_TC)
3089 buf.data.valid_sections &= ~ICE_AQC_ELEM_VALID_CIR;
3090 /* Parent TEID is reserved field in this aq call */
3091 buf.parent_teid = 0;
3092 /* Element type is reserved field in this aq call */
3093 buf.data.elem_type = 0;
3094 /* Flags is reserved field in this aq call */
3095 buf.data.flags = 0;
3096
3097 /* Update HW DB */
3098 /* Configure element node */
3099 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
3100 &elem_cfgd, NULL);
3101 if (status || elem_cfgd != num_elems) {
3102 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
3103 return ICE_ERR_CFG;
3104 }
3105
3106 /* Config success case */
3107 /* Now update local SW DB */
3108 /* Only copy the data portion of info buffer */
3109 node->info.data = info->data;
3110 return status;
3111 }
3112
3113 /**
3114 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
3115 * @hw: pointer to the HW struct
3116 * @node: sched node to configure
3117 * @rl_type: rate limit type CIR, EIR, or shared
3118 * @bw_alloc: BW weight/allocation
3119 *
3120 * This function configures node element's BW allocation.
3121 */
3122 static int
ice_sched_cfg_node_bw_alloc(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 bw_alloc)3123 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
3124 enum ice_rl_type rl_type, u16 bw_alloc)
3125 {
3126 struct ice_aqc_txsched_elem_data buf;
3127 struct ice_aqc_txsched_elem *data;
3128 int status;
3129
3130 buf = node->info;
3131 data = &buf.data;
3132 if (rl_type == ICE_MIN_BW) {
3133 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3134 data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
3135 } else if (rl_type == ICE_MAX_BW) {
3136 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3137 data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
3138 } else {
3139 return ICE_ERR_PARAM;
3140 }
3141
3142 /* Configure element */
3143 status = ice_sched_update_elem(hw, node, &buf);
3144 return status;
3145 }
3146
3147 /**
3148 * ice_move_vsi_to_agg - moves VSI to new or default aggregator
3149 * @pi: port information structure
3150 * @agg_id: aggregator ID
3151 * @vsi_handle: software VSI handle
3152 * @tc_bitmap: TC bitmap of enabled TC(s)
3153 *
3154 * Move or associate VSI to a new or default aggregator node.
3155 */
3156 int
ice_move_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,u8 tc_bitmap)3157 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
3158 u8 tc_bitmap)
3159 {
3160 ice_bitmap_t bitmap = tc_bitmap;
3161 int status;
3162
3163 ice_acquire_lock(&pi->sched_lock);
3164 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
3165 (ice_bitmap_t *)&bitmap);
3166 if (!status)
3167 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
3168 (ice_bitmap_t *)&bitmap);
3169 ice_release_lock(&pi->sched_lock);
3170 return status;
3171 }
3172
3173 /**
3174 * ice_rm_agg_cfg - remove aggregator configuration
3175 * @pi: port information structure
3176 * @agg_id: aggregator ID
3177 *
3178 * This function removes aggregator reference to VSI and delete aggregator ID
3179 * info. It removes the aggregator configuration completely.
3180 */
ice_rm_agg_cfg(struct ice_port_info * pi,u32 agg_id)3181 int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
3182 {
3183 struct ice_sched_agg_info *agg_info;
3184 int status = 0;
3185 u8 tc;
3186
3187 ice_acquire_lock(&pi->sched_lock);
3188 agg_info = ice_get_agg_info(pi->hw, agg_id);
3189 if (!agg_info) {
3190 status = ICE_ERR_DOES_NOT_EXIST;
3191 goto exit_ice_rm_agg_cfg;
3192 }
3193
3194 ice_for_each_traffic_class(tc) {
3195 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true);
3196 if (status)
3197 goto exit_ice_rm_agg_cfg;
3198 }
3199
3200 if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) {
3201 status = ICE_ERR_IN_USE;
3202 goto exit_ice_rm_agg_cfg;
3203 }
3204
3205 /* Safe to delete entry now */
3206 LIST_DEL(&agg_info->list_entry);
3207 ice_free(pi->hw, agg_info);
3208
3209 /* Remove unused RL profile IDs from HW and SW DB */
3210 ice_sched_rm_unused_rl_prof(pi->hw);
3211
3212 exit_ice_rm_agg_cfg:
3213 ice_release_lock(&pi->sched_lock);
3214 return status;
3215 }
3216
3217 /**
3218 * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information
3219 * @bw_t_info: bandwidth type information structure
3220 * @bw_alloc: Bandwidth allocation information
3221 *
3222 * Save or clear CIR BW alloc information (bw_alloc) in the passed param
3223 * bw_t_info.
3224 */
3225 static void
ice_set_clear_cir_bw_alloc(struct ice_bw_type_info * bw_t_info,u16 bw_alloc)3226 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
3227 {
3228 bw_t_info->cir_bw.bw_alloc = bw_alloc;
3229 if (bw_t_info->cir_bw.bw_alloc)
3230 ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
3231 else
3232 ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
3233 }
3234
3235 /**
3236 * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information
3237 * @bw_t_info: bandwidth type information structure
3238 * @bw_alloc: Bandwidth allocation information
3239 *
3240 * Save or clear EIR BW alloc information (bw_alloc) in the passed param
3241 * bw_t_info.
3242 */
3243 static void
ice_set_clear_eir_bw_alloc(struct ice_bw_type_info * bw_t_info,u16 bw_alloc)3244 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
3245 {
3246 bw_t_info->eir_bw.bw_alloc = bw_alloc;
3247 if (bw_t_info->eir_bw.bw_alloc)
3248 ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
3249 else
3250 ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
3251 }
3252
3253 /**
3254 * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information
3255 * @pi: port information structure
3256 * @vsi_handle: sw VSI handle
3257 * @tc: traffic class
3258 * @rl_type: rate limit type min or max
3259 * @bw_alloc: Bandwidth allocation information
3260 *
3261 * Save BW alloc information of VSI type node for post replay use.
3262 */
3263 static int
ice_sched_save_vsi_bw_alloc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type,u16 bw_alloc)3264 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3265 enum ice_rl_type rl_type, u16 bw_alloc)
3266 {
3267 struct ice_vsi_ctx *vsi_ctx;
3268
3269 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3270 return ICE_ERR_PARAM;
3271 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3272 if (!vsi_ctx)
3273 return ICE_ERR_PARAM;
3274 switch (rl_type) {
3275 case ICE_MIN_BW:
3276 ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
3277 bw_alloc);
3278 break;
3279 case ICE_MAX_BW:
3280 ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
3281 bw_alloc);
3282 break;
3283 default:
3284 return ICE_ERR_PARAM;
3285 }
3286 return 0;
3287 }
3288
3289 /**
3290 * ice_set_clear_cir_bw - set or clear CIR BW
3291 * @bw_t_info: bandwidth type information structure
3292 * @bw: bandwidth in Kbps - Kilo bits per sec
3293 *
3294 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
3295 */
ice_set_clear_cir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)3296 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3297 {
3298 if (bw == ICE_SCHED_DFLT_BW) {
3299 ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
3300 bw_t_info->cir_bw.bw = 0;
3301 } else {
3302 /* Save type of BW information */
3303 ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
3304 bw_t_info->cir_bw.bw = bw;
3305 }
3306 }
3307
3308 /**
3309 * ice_set_clear_eir_bw - set or clear EIR BW
3310 * @bw_t_info: bandwidth type information structure
3311 * @bw: bandwidth in Kbps - Kilo bits per sec
3312 *
3313 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
3314 */
ice_set_clear_eir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)3315 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3316 {
3317 if (bw == ICE_SCHED_DFLT_BW) {
3318 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3319 bw_t_info->eir_bw.bw = 0;
3320 } else {
3321 /* save EIR BW information */
3322 ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3323 bw_t_info->eir_bw.bw = bw;
3324 }
3325 }
3326
3327 /**
3328 * ice_set_clear_shared_bw - set or clear shared BW
3329 * @bw_t_info: bandwidth type information structure
3330 * @bw: bandwidth in Kbps - Kilo bits per sec
3331 *
3332 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
3333 */
ice_set_clear_shared_bw(struct ice_bw_type_info * bw_t_info,u32 bw)3334 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3335 {
3336 if (bw == ICE_SCHED_DFLT_BW) {
3337 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3338 bw_t_info->shared_bw = 0;
3339 } else {
3340 /* save shared BW information */
3341 ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3342 bw_t_info->shared_bw = bw;
3343 }
3344 }
3345
3346 /**
3347 * ice_sched_save_vsi_bw - save VSI node's BW information
3348 * @pi: port information structure
3349 * @vsi_handle: sw VSI handle
3350 * @tc: traffic class
3351 * @rl_type: rate limit type min, max, or shared
3352 * @bw: bandwidth in Kbps - Kilo bits per sec
3353 *
3354 * Save BW information of VSI type node for post replay use.
3355 */
3356 static int
ice_sched_save_vsi_bw(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type,u32 bw)3357 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3358 enum ice_rl_type rl_type, u32 bw)
3359 {
3360 struct ice_vsi_ctx *vsi_ctx;
3361
3362 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3363 return ICE_ERR_PARAM;
3364 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3365 if (!vsi_ctx)
3366 return ICE_ERR_PARAM;
3367 switch (rl_type) {
3368 case ICE_MIN_BW:
3369 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3370 break;
3371 case ICE_MAX_BW:
3372 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3373 break;
3374 case ICE_SHARED_BW:
3375 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3376 break;
3377 default:
3378 return ICE_ERR_PARAM;
3379 }
3380 return 0;
3381 }
3382
3383 /**
3384 * ice_set_clear_prio - set or clear priority information
3385 * @bw_t_info: bandwidth type information structure
3386 * @prio: priority to save
3387 *
3388 * Save or clear priority (prio) in the passed param bw_t_info.
3389 */
ice_set_clear_prio(struct ice_bw_type_info * bw_t_info,u8 prio)3390 static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
3391 {
3392 bw_t_info->generic = prio;
3393 if (bw_t_info->generic)
3394 ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
3395 else
3396 ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
3397 }
3398
3399 /**
3400 * ice_sched_save_vsi_prio - save VSI node's priority information
3401 * @pi: port information structure
3402 * @vsi_handle: Software VSI handle
3403 * @tc: traffic class
3404 * @prio: priority to save
3405 *
3406 * Save priority information of VSI type node for post replay use.
3407 */
3408 static int
ice_sched_save_vsi_prio(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 prio)3409 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3410 u8 prio)
3411 {
3412 struct ice_vsi_ctx *vsi_ctx;
3413
3414 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3415 return ICE_ERR_PARAM;
3416 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3417 if (!vsi_ctx)
3418 return ICE_ERR_PARAM;
3419 if (tc >= ICE_MAX_TRAFFIC_CLASS)
3420 return ICE_ERR_PARAM;
3421 ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
3422 return 0;
3423 }
3424
3425 /**
3426 * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information
3427 * @pi: port information structure
3428 * @agg_id: node aggregator ID
3429 * @tc: traffic class
3430 * @rl_type: rate limit type min or max
3431 * @bw_alloc: bandwidth alloc information
3432 *
3433 * Save BW alloc information of AGG type node for post replay use.
3434 */
3435 static int
ice_sched_save_agg_bw_alloc(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type,u16 bw_alloc)3436 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3437 enum ice_rl_type rl_type, u16 bw_alloc)
3438 {
3439 struct ice_sched_agg_info *agg_info;
3440
3441 agg_info = ice_get_agg_info(pi->hw, agg_id);
3442 if (!agg_info)
3443 return ICE_ERR_PARAM;
3444 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
3445 return ICE_ERR_PARAM;
3446 switch (rl_type) {
3447 case ICE_MIN_BW:
3448 ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
3449 break;
3450 case ICE_MAX_BW:
3451 ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
3452 break;
3453 default:
3454 return ICE_ERR_PARAM;
3455 }
3456 return 0;
3457 }
3458
3459 /**
3460 * ice_sched_save_agg_bw - save aggregator node's BW information
3461 * @pi: port information structure
3462 * @agg_id: node aggregator ID
3463 * @tc: traffic class
3464 * @rl_type: rate limit type min, max, or shared
3465 * @bw: bandwidth in Kbps - Kilo bits per sec
3466 *
3467 * Save BW information of AGG type node for post replay use.
3468 */
3469 static int
ice_sched_save_agg_bw(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type,u32 bw)3470 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
3471 enum ice_rl_type rl_type, u32 bw)
3472 {
3473 struct ice_sched_agg_info *agg_info;
3474
3475 agg_info = ice_get_agg_info(pi->hw, agg_id);
3476 if (!agg_info)
3477 return ICE_ERR_PARAM;
3478 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
3479 return ICE_ERR_PARAM;
3480 switch (rl_type) {
3481 case ICE_MIN_BW:
3482 ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw);
3483 break;
3484 case ICE_MAX_BW:
3485 ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw);
3486 break;
3487 case ICE_SHARED_BW:
3488 ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw);
3489 break;
3490 default:
3491 return ICE_ERR_PARAM;
3492 }
3493 return 0;
3494 }
3495
3496 /**
3497 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
3498 * @pi: port information structure
3499 * @vsi_handle: software VSI handle
3500 * @tc: traffic class
3501 * @rl_type: min or max
3502 * @bw: bandwidth in Kbps
3503 *
3504 * This function configures BW limit of VSI scheduling node based on TC
3505 * information.
3506 */
3507 int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type,u32 bw)3508 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3509 enum ice_rl_type rl_type, u32 bw)
3510 {
3511 int status;
3512
3513 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3514 ICE_AGG_TYPE_VSI,
3515 tc, rl_type, bw);
3516 if (!status) {
3517 ice_acquire_lock(&pi->sched_lock);
3518 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
3519 ice_release_lock(&pi->sched_lock);
3520 }
3521 return status;
3522 }
3523
3524 /**
3525 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
3526 * @pi: port information structure
3527 * @vsi_handle: software VSI handle
3528 * @tc: traffic class
3529 * @rl_type: min or max
3530 *
3531 * This function configures default BW limit of VSI scheduling node based on TC
3532 * information.
3533 */
3534 int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type)3535 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3536 enum ice_rl_type rl_type)
3537 {
3538 int status;
3539
3540 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3541 ICE_AGG_TYPE_VSI,
3542 tc, rl_type,
3543 ICE_SCHED_DFLT_BW);
3544 if (!status) {
3545 ice_acquire_lock(&pi->sched_lock);
3546 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
3547 ICE_SCHED_DFLT_BW);
3548 ice_release_lock(&pi->sched_lock);
3549 }
3550 return status;
3551 }
3552
3553 /**
3554 * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC
3555 * @pi: port information structure
3556 * @agg_id: aggregator ID
3557 * @tc: traffic class
3558 * @rl_type: min or max
3559 * @bw: bandwidth in Kbps
3560 *
3561 * This function applies BW limit to aggregator scheduling node based on TC
3562 * information.
3563 */
3564 int
ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type,u32 bw)3565 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3566 enum ice_rl_type rl_type, u32 bw)
3567 {
3568 int status;
3569
3570 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
3571 tc, rl_type, bw);
3572 if (!status) {
3573 ice_acquire_lock(&pi->sched_lock);
3574 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
3575 ice_release_lock(&pi->sched_lock);
3576 }
3577 return status;
3578 }
3579
3580 /**
3581 * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC
3582 * @pi: port information structure
3583 * @agg_id: aggregator ID
3584 * @tc: traffic class
3585 * @rl_type: min or max
3586 *
3587 * This function applies default BW limit to aggregator scheduling node based
3588 * on TC information.
3589 */
3590 int
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type)3591 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3592 enum ice_rl_type rl_type)
3593 {
3594 int status;
3595
3596 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
3597 tc, rl_type,
3598 ICE_SCHED_DFLT_BW);
3599 if (!status) {
3600 ice_acquire_lock(&pi->sched_lock);
3601 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type,
3602 ICE_SCHED_DFLT_BW);
3603 ice_release_lock(&pi->sched_lock);
3604 }
3605 return status;
3606 }
3607
3608 /**
3609 * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit
3610 * @pi: port information structure
3611 * @vsi_handle: software VSI handle
3612 * @min_bw: minimum bandwidth in Kbps
3613 * @max_bw: maximum bandwidth in Kbps
3614 * @shared_bw: shared bandwidth in Kbps
3615 *
3616 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
3617 * classes for VSI matching handle.
3618 */
3619 int
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info * pi,u16 vsi_handle,u32 min_bw,u32 max_bw,u32 shared_bw)3620 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
3621 u32 max_bw, u32 shared_bw)
3622 {
3623 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw,
3624 shared_bw);
3625 }
3626
3627 /**
3628 * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter
3629 * @pi: port information structure
3630 * @vsi_handle: software VSI handle
3631 *
3632 * This function removes the shared rate limiter(SRL) of all VSI type nodes
3633 * across all traffic classes for VSI matching handle.
3634 */
3635 int
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info * pi,u16 vsi_handle)3636 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
3637 {
3638 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
3639 ICE_SCHED_DFLT_BW,
3640 ICE_SCHED_DFLT_BW,
3641 ICE_SCHED_DFLT_BW);
3642 }
3643
3644 /**
3645 * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit
3646 * @pi: port information structure
3647 * @agg_id: aggregator ID
3648 * @min_bw: minimum bandwidth in Kbps
3649 * @max_bw: maximum bandwidth in Kbps
3650 * @shared_bw: shared bandwidth in Kbps
3651 *
3652 * This function configures the shared rate limiter(SRL) of all aggregator type
3653 * nodes across all traffic classes for aggregator matching agg_id.
3654 */
3655 int
ice_cfg_agg_bw_shared_lmt(struct ice_port_info * pi,u32 agg_id,u32 min_bw,u32 max_bw,u32 shared_bw)3656 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
3657 u32 max_bw, u32 shared_bw)
3658 {
3659 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw,
3660 shared_bw);
3661 }
3662
3663 /**
3664 * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter
3665 * @pi: port information structure
3666 * @agg_id: aggregator ID
3667 *
3668 * This function removes the shared rate limiter(SRL) of all aggregator type
3669 * nodes across all traffic classes for aggregator matching agg_id.
3670 */
3671 int
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info * pi,u32 agg_id)3672 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
3673 {
3674 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW,
3675 ICE_SCHED_DFLT_BW,
3676 ICE_SCHED_DFLT_BW);
3677 }
3678
3679 /**
3680 * ice_cfg_agg_bw_shared_lmt_per_tc - config aggregator BW shared limit per tc
3681 * @pi: port information structure
3682 * @agg_id: aggregator ID
3683 * @tc: traffic class
3684 * @min_bw: minimum bandwidth in Kbps
3685 * @max_bw: maximum bandwidth in Kbps
3686 * @shared_bw: shared bandwidth in Kbps
3687 *
3688 * This function configures the shared rate limiter(SRL) of all aggregator type
3689 * nodes across all traffic classes for aggregator matching agg_id.
3690 */
3691 int
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)3692 ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3693 u32 min_bw, u32 max_bw, u32 shared_bw)
3694 {
3695 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw,
3696 max_bw, shared_bw);
3697 }
3698
3699 /**
3700 * ice_cfg_agg_bw_no_shared_lmt_per_tc - cfg aggregator BW shared limit per tc
3701 * @pi: port information structure
3702 * @agg_id: aggregator ID
3703 * @tc: traffic class
3704 *
3705 * This function configures the shared rate limiter(SRL) of all aggregator type
3706 * nodes across all traffic classes for aggregator matching agg_id.
3707 */
3708 int
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc)3709 ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
3710 {
3711 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc,
3712 ICE_SCHED_DFLT_BW,
3713 ICE_SCHED_DFLT_BW,
3714 ICE_SCHED_DFLT_BW);
3715 }
3716
3717 /**
3718 * ice_cfg_vsi_q_priority - config VSI queue priority of node
3719 * @pi: port information structure
3720 * @num_qs: number of VSI queues
3721 * @q_ids: queue IDs array
3722 * @q_prio: queue priority array
3723 *
3724 * This function configures the queue node priority (Sibling Priority) of the
3725 * passed in VSI's queue(s) for a given traffic class (TC).
3726 */
3727 int
ice_cfg_vsi_q_priority(struct ice_port_info * pi,u16 num_qs,u32 * q_ids,u8 * q_prio)3728 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
3729 u8 *q_prio)
3730 {
3731 int status = ICE_ERR_PARAM;
3732 u16 i;
3733
3734 ice_acquire_lock(&pi->sched_lock);
3735
3736 for (i = 0; i < num_qs; i++) {
3737 struct ice_sched_node *node;
3738
3739 node = ice_sched_find_node_by_teid(pi->root, q_ids[i]);
3740 if (!node || node->info.data.elem_type !=
3741 ICE_AQC_ELEM_TYPE_LEAF) {
3742 status = ICE_ERR_PARAM;
3743 break;
3744 }
3745 /* Configure Priority */
3746 status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]);
3747 if (status)
3748 break;
3749 }
3750
3751 ice_release_lock(&pi->sched_lock);
3752 return status;
3753 }
3754
3755 /**
3756 * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC
3757 * @pi: port information structure
3758 * @agg_id: Aggregator ID
3759 * @num_vsis: number of VSI(s)
3760 * @vsi_handle_arr: array of software VSI handles
3761 * @node_prio: pointer to node priority
3762 * @tc: traffic class
3763 *
3764 * This function configures the node priority (Sibling Priority) of the
3765 * passed in VSI's for a given traffic class (TC) of an Aggregator ID.
3766 */
3767 int
ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info * pi,u32 agg_id,u16 num_vsis,u16 * vsi_handle_arr,u8 * node_prio,u8 tc)3768 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
3769 u16 num_vsis, u16 *vsi_handle_arr,
3770 u8 *node_prio, u8 tc)
3771 {
3772 struct ice_sched_agg_vsi_info *agg_vsi_info;
3773 struct ice_sched_node *tc_node, *agg_node;
3774 struct ice_sched_agg_info *agg_info;
3775 bool agg_id_present = false;
3776 struct ice_hw *hw = pi->hw;
3777 int status = ICE_ERR_PARAM;
3778 u16 i;
3779
3780 ice_acquire_lock(&pi->sched_lock);
3781 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
3782 list_entry)
3783 if (agg_info->agg_id == agg_id) {
3784 agg_id_present = true;
3785 break;
3786 }
3787 if (!agg_id_present)
3788 goto exit_agg_priority_per_tc;
3789
3790 tc_node = ice_sched_get_tc_node(pi, tc);
3791 if (!tc_node)
3792 goto exit_agg_priority_per_tc;
3793
3794 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
3795 if (!agg_node)
3796 goto exit_agg_priority_per_tc;
3797
3798 if (num_vsis > hw->max_children[agg_node->tx_sched_layer])
3799 goto exit_agg_priority_per_tc;
3800
3801 for (i = 0; i < num_vsis; i++) {
3802 struct ice_sched_node *vsi_node;
3803 bool vsi_handle_valid = false;
3804 u16 vsi_handle;
3805
3806 status = ICE_ERR_PARAM;
3807 vsi_handle = vsi_handle_arr[i];
3808 if (!ice_is_vsi_valid(hw, vsi_handle))
3809 goto exit_agg_priority_per_tc;
3810 /* Verify child nodes before applying settings */
3811 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
3812 ice_sched_agg_vsi_info, list_entry)
3813 if (agg_vsi_info->vsi_handle == vsi_handle) {
3814 vsi_handle_valid = true;
3815 break;
3816 }
3817
3818 if (!vsi_handle_valid)
3819 goto exit_agg_priority_per_tc;
3820
3821 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
3822 if (!vsi_node)
3823 goto exit_agg_priority_per_tc;
3824
3825 if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) {
3826 /* Configure Priority */
3827 status = ice_sched_cfg_sibl_node_prio(pi, vsi_node,
3828 node_prio[i]);
3829 if (status)
3830 break;
3831 status = ice_sched_save_vsi_prio(pi, vsi_handle, tc,
3832 node_prio[i]);
3833 if (status)
3834 break;
3835 }
3836 }
3837
3838 exit_agg_priority_per_tc:
3839 ice_release_lock(&pi->sched_lock);
3840 return status;
3841 }
3842
3843 /**
3844 * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC
3845 * @pi: port information structure
3846 * @vsi_handle: software VSI handle
3847 * @ena_tcmap: enabled TC map
3848 * @rl_type: Rate limit type CIR/EIR
3849 * @bw_alloc: Array of BW alloc
3850 *
3851 * This function configures the BW allocation of the passed in VSI's
3852 * node(s) for enabled traffic class.
3853 */
3854 int
ice_cfg_vsi_bw_alloc(struct ice_port_info * pi,u16 vsi_handle,u8 ena_tcmap,enum ice_rl_type rl_type,u8 * bw_alloc)3855 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
3856 enum ice_rl_type rl_type, u8 *bw_alloc)
3857 {
3858 int status = 0;
3859 u8 tc;
3860
3861 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3862 return ICE_ERR_PARAM;
3863
3864 ice_acquire_lock(&pi->sched_lock);
3865
3866 /* Return success if no nodes are present across TC */
3867 ice_for_each_traffic_class(tc) {
3868 struct ice_sched_node *tc_node, *vsi_node;
3869
3870 if (!ice_is_tc_ena(ena_tcmap, tc))
3871 continue;
3872
3873 tc_node = ice_sched_get_tc_node(pi, tc);
3874 if (!tc_node)
3875 continue;
3876
3877 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
3878 if (!vsi_node)
3879 continue;
3880
3881 status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type,
3882 bw_alloc[tc]);
3883 if (status)
3884 break;
3885 status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc,
3886 rl_type, bw_alloc[tc]);
3887 if (status)
3888 break;
3889 }
3890
3891 ice_release_lock(&pi->sched_lock);
3892 return status;
3893 }
3894
3895 /**
3896 * ice_cfg_agg_bw_alloc - config aggregator BW alloc
3897 * @pi: port information structure
3898 * @agg_id: aggregator ID
3899 * @ena_tcmap: enabled TC map
3900 * @rl_type: rate limit type CIR/EIR
3901 * @bw_alloc: array of BW alloc
3902 *
3903 * This function configures the BW allocation of passed in aggregator for
3904 * enabled traffic class(s).
3905 */
3906 int
ice_cfg_agg_bw_alloc(struct ice_port_info * pi,u32 agg_id,u8 ena_tcmap,enum ice_rl_type rl_type,u8 * bw_alloc)3907 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
3908 enum ice_rl_type rl_type, u8 *bw_alloc)
3909 {
3910 struct ice_sched_agg_info *agg_info;
3911 bool agg_id_present = false;
3912 struct ice_hw *hw = pi->hw;
3913 int status = 0;
3914 u8 tc;
3915
3916 ice_acquire_lock(&pi->sched_lock);
3917 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
3918 list_entry)
3919 if (agg_info->agg_id == agg_id) {
3920 agg_id_present = true;
3921 break;
3922 }
3923 if (!agg_id_present) {
3924 status = ICE_ERR_PARAM;
3925 goto exit_cfg_agg_bw_alloc;
3926 }
3927
3928 /* Return success if no nodes are present across TC */
3929 ice_for_each_traffic_class(tc) {
3930 struct ice_sched_node *tc_node, *agg_node;
3931
3932 if (!ice_is_tc_ena(ena_tcmap, tc))
3933 continue;
3934
3935 tc_node = ice_sched_get_tc_node(pi, tc);
3936 if (!tc_node)
3937 continue;
3938
3939 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
3940 if (!agg_node)
3941 continue;
3942
3943 status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type,
3944 bw_alloc[tc]);
3945 if (status)
3946 break;
3947 status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type,
3948 bw_alloc[tc]);
3949 if (status)
3950 break;
3951 }
3952
3953 exit_cfg_agg_bw_alloc:
3954 ice_release_lock(&pi->sched_lock);
3955 return status;
3956 }
3957
3958 /**
3959 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
3960 * @hw: pointer to the HW struct
3961 * @bw: bandwidth in Kbps
3962 *
3963 * This function calculates the wakeup parameter of RL profile.
3964 */
ice_sched_calc_wakeup(struct ice_hw * hw,s32 bw)3965 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
3966 {
3967 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
3968 s32 wakeup_f_int;
3969 u16 wakeup = 0;
3970
3971 /* Get the wakeup integer value */
3972 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
3973 wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec);
3974 if (wakeup_int > 63) {
3975 wakeup = (u16)((1 << 15) | wakeup_int);
3976 } else {
3977 /* Calculate fraction value up to 4 decimals
3978 * Convert Integer value to a constant multiplier
3979 */
3980 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
3981 wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER *
3982 hw->psm_clk_freq, bytes_per_sec);
3983
3984 /* Get Fraction value */
3985 wakeup_f = wakeup_a - wakeup_b;
3986
3987 /* Round up the Fractional value via Ceil(Fractional value) */
3988 if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2))
3989 wakeup_f += 1;
3990
3991 wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION,
3992 ICE_RL_PROF_MULTIPLIER);
3993 wakeup |= (u16)(wakeup_int << 9);
3994 wakeup |= (u16)(0x1ff & wakeup_f_int);
3995 }
3996
3997 return wakeup;
3998 }
3999
4000 /**
4001 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
4002 * @hw: pointer to the HW struct
4003 * @bw: bandwidth in Kbps
4004 * @profile: profile parameters to return
4005 *
4006 * This function converts the BW to profile structure format.
4007 */
4008 static int
ice_sched_bw_to_rl_profile(struct ice_hw * hw,u32 bw,struct ice_aqc_rl_profile_elem * profile)4009 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
4010 struct ice_aqc_rl_profile_elem *profile)
4011 {
4012 s64 bytes_per_sec, ts_rate, mv_tmp;
4013 int status = ICE_ERR_PARAM;
4014 bool found = false;
4015 s32 encode = 0;
4016 s64 mv = 0;
4017 s32 i;
4018
4019 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
4020 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
4021 return status;
4022
4023 /* Bytes per second from Kbps */
4024 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
4025
4026 /* encode is 6 bits but really useful are 5 bits */
4027 for (i = 0; i < 64; i++) {
4028 u64 pow_result = BIT_ULL(i);
4029
4030 ts_rate = DIV_S64((s64)hw->psm_clk_freq,
4031 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
4032 if (ts_rate <= 0)
4033 continue;
4034
4035 /* Multiplier value */
4036 mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
4037 ts_rate);
4038
4039 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
4040 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
4041
4042 /* First multiplier value greater than the given
4043 * accuracy bytes
4044 */
4045 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
4046 encode = i;
4047 found = true;
4048 break;
4049 }
4050 }
4051 if (found) {
4052 u16 wm;
4053
4054 wm = ice_sched_calc_wakeup(hw, bw);
4055 profile->rl_multiply = CPU_TO_LE16(mv);
4056 profile->wake_up_calc = CPU_TO_LE16(wm);
4057 profile->rl_encode = CPU_TO_LE16(encode);
4058 status = 0;
4059 } else {
4060 status = ICE_ERR_DOES_NOT_EXIST;
4061 }
4062
4063 return status;
4064 }
4065
4066 /**
4067 * ice_sched_add_rl_profile - add RL profile
4068 * @hw: pointer to the hardware structure
4069 * @rl_type: type of rate limit BW - min, max, or shared
4070 * @bw: bandwidth in Kbps - Kilo bits per sec
4071 * @layer_num: specifies in which layer to create profile
4072 *
4073 * This function first checks the existing list for corresponding BW
4074 * parameter. If it exists, it returns the associated profile otherwise
4075 * it creates a new rate limit profile for requested BW, and adds it to
4076 * the HW DB and local list. It returns the new profile or null on error.
4077 * The caller needs to hold the scheduler lock.
4078 */
4079 static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_hw * hw,enum ice_rl_type rl_type,u32 bw,u8 layer_num)4080 ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
4081 u32 bw, u8 layer_num)
4082 {
4083 struct ice_aqc_rl_profile_info *rl_prof_elem;
4084 u16 profiles_added = 0, num_profiles = 1;
4085 struct ice_aqc_rl_profile_elem *buf;
4086 u8 profile_type;
4087 int status;
4088
4089 if (!hw || layer_num >= hw->num_tx_sched_layers)
4090 return NULL;
4091 switch (rl_type) {
4092 case ICE_MIN_BW:
4093 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
4094 break;
4095 case ICE_MAX_BW:
4096 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
4097 break;
4098 case ICE_SHARED_BW:
4099 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
4100 break;
4101 default:
4102 return NULL;
4103 }
4104
4105 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
4106 ice_aqc_rl_profile_info, list_entry)
4107 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
4108 profile_type && rl_prof_elem->bw == bw)
4109 /* Return existing profile ID info */
4110 return rl_prof_elem;
4111
4112 /* Create new profile ID */
4113 rl_prof_elem = (struct ice_aqc_rl_profile_info *)
4114 ice_malloc(hw, sizeof(*rl_prof_elem));
4115
4116 if (!rl_prof_elem)
4117 return NULL;
4118
4119 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
4120 if (status)
4121 goto exit_add_rl_prof;
4122
4123 rl_prof_elem->bw = bw;
4124 /* layer_num is zero relative, and fw expects level from 1 to 9 */
4125 rl_prof_elem->profile.level = layer_num + 1;
4126 rl_prof_elem->profile.flags = profile_type;
4127 rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size);
4128
4129 /* Create new entry in HW DB */
4130 buf = &rl_prof_elem->profile;
4131 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
4132 &profiles_added, NULL);
4133 if (status || profiles_added != num_profiles)
4134 goto exit_add_rl_prof;
4135
4136 /* Good entry - add in the list */
4137 rl_prof_elem->prof_id_ref = 0;
4138 LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]);
4139 return rl_prof_elem;
4140
4141 exit_add_rl_prof:
4142 ice_free(hw, rl_prof_elem);
4143 return NULL;
4144 }
4145
4146 /**
4147 * ice_sched_cfg_node_bw_lmt - configure node sched params
4148 * @hw: pointer to the HW struct
4149 * @node: sched node to configure
4150 * @rl_type: rate limit type CIR, EIR, or shared
4151 * @rl_prof_id: rate limit profile ID
4152 *
4153 * This function configures node element's BW limit.
4154 */
4155 static int
ice_sched_cfg_node_bw_lmt(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 rl_prof_id)4156 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
4157 enum ice_rl_type rl_type, u16 rl_prof_id)
4158 {
4159 struct ice_aqc_txsched_elem_data buf;
4160 struct ice_aqc_txsched_elem *data;
4161
4162 buf = node->info;
4163 data = &buf.data;
4164 switch (rl_type) {
4165 case ICE_MIN_BW:
4166 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
4167 data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
4168 break;
4169 case ICE_MAX_BW:
4170 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
4171 data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
4172 break;
4173 case ICE_SHARED_BW:
4174 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
4175 data->srl_id = CPU_TO_LE16(rl_prof_id);
4176 break;
4177 default:
4178 /* Unknown rate limit type */
4179 return ICE_ERR_PARAM;
4180 }
4181
4182 /* Configure element */
4183 return ice_sched_update_elem(hw, node, &buf);
4184 }
4185
4186 /**
4187 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
4188 * @node: sched node
4189 * @rl_type: rate limit type
4190 *
4191 * If existing profile matches, it returns the corresponding rate
4192 * limit profile ID, otherwise it returns an invalid ID as error.
4193 */
4194 static u16
ice_sched_get_node_rl_prof_id(struct ice_sched_node * node,enum ice_rl_type rl_type)4195 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
4196 enum ice_rl_type rl_type)
4197 {
4198 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
4199 struct ice_aqc_txsched_elem *data;
4200
4201 data = &node->info.data;
4202 switch (rl_type) {
4203 case ICE_MIN_BW:
4204 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
4205 rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx);
4206 break;
4207 case ICE_MAX_BW:
4208 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
4209 rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx);
4210 break;
4211 case ICE_SHARED_BW:
4212 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
4213 rl_prof_id = LE16_TO_CPU(data->srl_id);
4214 break;
4215 default:
4216 break;
4217 }
4218
4219 return rl_prof_id;
4220 }
4221
4222 /**
4223 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
4224 * @pi: port information structure
4225 * @rl_type: type of rate limit BW - min, max, or shared
4226 * @layer_index: layer index
4227 *
4228 * This function returns requested profile creation layer.
4229 */
4230 static u8
ice_sched_get_rl_prof_layer(struct ice_port_info * pi,enum ice_rl_type rl_type,u8 layer_index)4231 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
4232 u8 layer_index)
4233 {
4234 struct ice_hw *hw = pi->hw;
4235
4236 if (layer_index >= hw->num_tx_sched_layers)
4237 return ICE_SCHED_INVAL_LAYER_NUM;
4238 switch (rl_type) {
4239 case ICE_MIN_BW:
4240 if (hw->layer_info[layer_index].max_cir_rl_profiles)
4241 return layer_index;
4242 break;
4243 case ICE_MAX_BW:
4244 if (hw->layer_info[layer_index].max_eir_rl_profiles)
4245 return layer_index;
4246 break;
4247 case ICE_SHARED_BW:
4248 /* if current layer doesn't support SRL profile creation
4249 * then try a layer up or down.
4250 */
4251 if (hw->layer_info[layer_index].max_srl_profiles)
4252 return layer_index;
4253 else if (layer_index < hw->num_tx_sched_layers - 1 &&
4254 hw->layer_info[layer_index + 1].max_srl_profiles)
4255 return layer_index + 1;
4256 else if (layer_index > 0 &&
4257 hw->layer_info[layer_index - 1].max_srl_profiles)
4258 return layer_index - 1;
4259 break;
4260 default:
4261 break;
4262 }
4263 return ICE_SCHED_INVAL_LAYER_NUM;
4264 }
4265
4266 /**
4267 * ice_sched_get_srl_node - get shared rate limit node
4268 * @node: tree node
4269 * @srl_layer: shared rate limit layer
4270 *
4271 * This function returns SRL node to be used for shared rate limit purpose.
4272 * The caller needs to hold scheduler lock.
4273 */
4274 static struct ice_sched_node *
ice_sched_get_srl_node(struct ice_sched_node * node,u8 srl_layer)4275 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
4276 {
4277 if (srl_layer > node->tx_sched_layer)
4278 return node->children[0];
4279 else if (srl_layer < node->tx_sched_layer)
4280 /* Node can't be created without a parent. It will always
4281 * have a valid parent except root node.
4282 */
4283 return node->parent;
4284 else
4285 return node;
4286 }
4287
4288 /**
4289 * ice_sched_rm_rl_profile - remove RL profile ID
4290 * @hw: pointer to the hardware structure
4291 * @layer_num: layer number where profiles are saved
4292 * @profile_type: profile type like EIR, CIR, or SRL
4293 * @profile_id: profile ID to remove
4294 *
4295 * This function removes rate limit profile from layer 'layer_num' of type
4296 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
4297 * scheduler lock.
4298 */
4299 static int
ice_sched_rm_rl_profile(struct ice_hw * hw,u8 layer_num,u8 profile_type,u16 profile_id)4300 ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
4301 u16 profile_id)
4302 {
4303 struct ice_aqc_rl_profile_info *rl_prof_elem;
4304 int status = 0;
4305
4306 if (!hw || layer_num >= hw->num_tx_sched_layers)
4307 return ICE_ERR_PARAM;
4308 /* Check the existing list for RL profile */
4309 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
4310 ice_aqc_rl_profile_info, list_entry)
4311 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
4312 profile_type &&
4313 LE16_TO_CPU(rl_prof_elem->profile.profile_id) ==
4314 profile_id) {
4315 if (rl_prof_elem->prof_id_ref)
4316 rl_prof_elem->prof_id_ref--;
4317
4318 /* Remove old profile ID from database */
4319 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
4320 if (status && status != ICE_ERR_IN_USE)
4321 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
4322 break;
4323 }
4324 if (status == ICE_ERR_IN_USE)
4325 status = 0;
4326 return status;
4327 }
4328
4329 /**
4330 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
4331 * @pi: port information structure
4332 * @node: pointer to node structure
4333 * @rl_type: rate limit type min, max, or shared
4334 * @layer_num: layer number where RL profiles are saved
4335 *
4336 * This function configures node element's BW rate limit profile ID of
4337 * type CIR, EIR, or SRL to default. This function needs to be called
4338 * with the scheduler lock held.
4339 */
4340 static int
ice_sched_set_node_bw_dflt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u8 layer_num)4341 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
4342 struct ice_sched_node *node,
4343 enum ice_rl_type rl_type, u8 layer_num)
4344 {
4345 struct ice_hw *hw;
4346 u8 profile_type;
4347 u16 rl_prof_id;
4348 int status;
4349 u16 old_id;
4350
4351 hw = pi->hw;
4352 switch (rl_type) {
4353 case ICE_MIN_BW:
4354 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
4355 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
4356 break;
4357 case ICE_MAX_BW:
4358 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
4359 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
4360 break;
4361 case ICE_SHARED_BW:
4362 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
4363 /* No SRL is configured for default case */
4364 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
4365 break;
4366 default:
4367 return ICE_ERR_PARAM;
4368 }
4369 /* Save existing RL prof ID for later clean up */
4370 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
4371 /* Configure BW scheduling parameters */
4372 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
4373 if (status)
4374 return status;
4375
4376 /* Remove stale RL profile ID */
4377 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
4378 old_id == ICE_SCHED_INVAL_PROF_ID)
4379 return 0;
4380
4381 return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
4382 }
4383
4384 /**
4385 * ice_sched_set_node_bw - set node's bandwidth
4386 * @pi: port information structure
4387 * @node: tree node
4388 * @rl_type: rate limit type min, max, or shared
4389 * @bw: bandwidth in Kbps - Kilo bits per sec
4390 * @layer_num: layer number
4391 *
4392 * This function adds new profile corresponding to requested BW, configures
4393 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
4394 * ID from local database. The caller needs to hold scheduler lock.
4395 */
4396 int
ice_sched_set_node_bw(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw,u8 layer_num)4397 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
4398 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
4399 {
4400 struct ice_aqc_rl_profile_info *rl_prof_info;
4401 struct ice_hw *hw = pi->hw;
4402 u16 old_id, rl_prof_id;
4403 int status = ICE_ERR_PARAM;
4404
4405 rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
4406 if (!rl_prof_info)
4407 return status;
4408
4409 rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id);
4410
4411 /* Save existing RL prof ID for later clean up */
4412 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
4413 /* Configure BW scheduling parameters */
4414 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
4415 if (status)
4416 return status;
4417
4418 /* New changes has been applied */
4419 /* Increment the profile ID reference count */
4420 rl_prof_info->prof_id_ref++;
4421
4422 /* Check for old ID removal */
4423 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
4424 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
4425 return 0;
4426
4427 return ice_sched_rm_rl_profile(hw, layer_num,
4428 rl_prof_info->profile.flags &
4429 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
4430 }
4431
4432 /**
4433 * ice_sched_set_node_priority - set node's priority
4434 * @pi: port information structure
4435 * @node: tree node
4436 * @priority: number 0-7 representing priority among siblings
4437 *
4438 * This function sets priority of a node among it's siblings.
4439 */
4440 int
ice_sched_set_node_priority(struct ice_port_info * pi,struct ice_sched_node * node,u16 priority)4441 ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
4442 u16 priority)
4443 {
4444 struct ice_aqc_txsched_elem_data buf;
4445 struct ice_aqc_txsched_elem *data;
4446
4447 buf = node->info;
4448 data = &buf.data;
4449
4450 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
4451 data->generic |= ICE_AQC_ELEM_GENERIC_PRIO_M &
4452 (priority << ICE_AQC_ELEM_GENERIC_PRIO_S);
4453
4454 return ice_sched_update_elem(pi->hw, node, &buf);
4455 }
4456
4457 /**
4458 * ice_sched_set_node_weight - set node's weight
4459 * @pi: port information structure
4460 * @node: tree node
4461 * @weight: number 1-200 representing weight for WFQ
4462 *
4463 * This function sets weight of the node for WFQ algorithm.
4464 */
4465 int
ice_sched_set_node_weight(struct ice_port_info * pi,struct ice_sched_node * node,u16 weight)4466 ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
4467 {
4468 struct ice_aqc_txsched_elem_data buf;
4469 struct ice_aqc_txsched_elem *data;
4470
4471 buf = node->info;
4472 data = &buf.data;
4473
4474 data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
4475 ICE_AQC_ELEM_VALID_GENERIC;
4476 data->cir_bw.bw_alloc = CPU_TO_LE16(weight);
4477 data->eir_bw.bw_alloc = CPU_TO_LE16(weight);
4478 data->generic |= ICE_AQC_ELEM_GENERIC_SP_M &
4479 (0x0 << ICE_AQC_ELEM_GENERIC_SP_S);
4480
4481 return ice_sched_update_elem(pi->hw, node, &buf);
4482 }
4483
4484 /**
4485 * ice_sched_set_node_bw_lmt - set node's BW limit
4486 * @pi: port information structure
4487 * @node: tree node
4488 * @rl_type: rate limit type min, max, or shared
4489 * @bw: bandwidth in Kbps - Kilo bits per sec
4490 *
4491 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
4492 * EIR, or SRL. The caller needs to hold scheduler lock.
4493 *
4494 * NOTE: Caller provides the correct SRL node in case of shared profile
4495 * settings.
4496 */
4497 int
ice_sched_set_node_bw_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw)4498 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
4499 enum ice_rl_type rl_type, u32 bw)
4500 {
4501 struct ice_hw *hw;
4502 u8 layer_num;
4503
4504 if (!pi)
4505 return ICE_ERR_PARAM;
4506 hw = pi->hw;
4507 /* Remove unused RL profile IDs from HW and SW DB */
4508 ice_sched_rm_unused_rl_prof(hw);
4509
4510 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
4511 node->tx_sched_layer);
4512 if (layer_num >= hw->num_tx_sched_layers)
4513 return ICE_ERR_PARAM;
4514
4515 if (bw == ICE_SCHED_DFLT_BW)
4516 return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num);
4517 return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num);
4518 }
4519
4520 /**
4521 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
4522 * @pi: port information structure
4523 * @node: pointer to node structure
4524 * @rl_type: rate limit type min, max, or shared
4525 *
4526 * This function configures node element's BW rate limit profile ID of
4527 * type CIR, EIR, or SRL to default. This function needs to be called
4528 * with the scheduler lock held.
4529 */
4530 static int
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type)4531 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
4532 struct ice_sched_node *node,
4533 enum ice_rl_type rl_type)
4534 {
4535 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
4536 ICE_SCHED_DFLT_BW);
4537 }
4538
4539 /**
4540 * ice_sched_validate_srl_node - Check node for SRL applicability
4541 * @node: sched node to configure
4542 * @sel_layer: selected SRL layer
4543 *
4544 * This function checks if the SRL can be applied to a selceted layer node on
4545 * behalf of the requested node (first argument). This function needs to be
4546 * called with scheduler lock held.
4547 */
4548 static int
ice_sched_validate_srl_node(struct ice_sched_node * node,u8 sel_layer)4549 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
4550 {
4551 /* SRL profiles are not available on all layers. Check if the
4552 * SRL profile can be applied to a node above or below the
4553 * requested node. SRL configuration is possible only if the
4554 * selected layer's node has single child.
4555 */
4556 if (sel_layer == node->tx_sched_layer ||
4557 ((sel_layer == node->tx_sched_layer + 1) &&
4558 node->num_children == 1) ||
4559 ((sel_layer == node->tx_sched_layer - 1) &&
4560 (node->parent && node->parent->num_children == 1)))
4561 return 0;
4562
4563 return ICE_ERR_CFG;
4564 }
4565
4566 /**
4567 * ice_sched_save_q_bw - save queue node's BW information
4568 * @q_ctx: queue context structure
4569 * @rl_type: rate limit type min, max, or shared
4570 * @bw: bandwidth in Kbps - Kilo bits per sec
4571 *
4572 * Save BW information of queue type node for post replay use.
4573 */
4574 static int
ice_sched_save_q_bw(struct ice_q_ctx * q_ctx,enum ice_rl_type rl_type,u32 bw)4575 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
4576 {
4577 switch (rl_type) {
4578 case ICE_MIN_BW:
4579 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
4580 break;
4581 case ICE_MAX_BW:
4582 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
4583 break;
4584 case ICE_SHARED_BW:
4585 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
4586 break;
4587 default:
4588 return ICE_ERR_PARAM;
4589 }
4590 return 0;
4591 }
4592
4593 /**
4594 * ice_sched_set_q_bw_lmt - sets queue BW limit
4595 * @pi: port information structure
4596 * @vsi_handle: sw VSI handle
4597 * @tc: traffic class
4598 * @q_handle: software queue handle
4599 * @rl_type: min, max, or shared
4600 * @bw: bandwidth in Kbps
4601 *
4602 * This function sets BW limit of queue scheduling node.
4603 */
4604 static int
ice_sched_set_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)4605 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4606 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
4607 {
4608 struct ice_sched_node *node;
4609 struct ice_q_ctx *q_ctx;
4610 int status = ICE_ERR_PARAM;
4611
4612 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4613 return ICE_ERR_PARAM;
4614 ice_acquire_lock(&pi->sched_lock);
4615 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
4616 if (!q_ctx)
4617 goto exit_q_bw_lmt;
4618 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
4619 if (!node) {
4620 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
4621 goto exit_q_bw_lmt;
4622 }
4623
4624 /* Return error if it is not a leaf node */
4625 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
4626 goto exit_q_bw_lmt;
4627
4628 /* SRL bandwidth layer selection */
4629 if (rl_type == ICE_SHARED_BW) {
4630 u8 sel_layer; /* selected layer */
4631
4632 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
4633 node->tx_sched_layer);
4634 if (sel_layer >= pi->hw->num_tx_sched_layers) {
4635 status = ICE_ERR_PARAM;
4636 goto exit_q_bw_lmt;
4637 }
4638 status = ice_sched_validate_srl_node(node, sel_layer);
4639 if (status)
4640 goto exit_q_bw_lmt;
4641 }
4642
4643 if (bw == ICE_SCHED_DFLT_BW)
4644 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
4645 else
4646 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
4647
4648 if (!status)
4649 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
4650
4651 exit_q_bw_lmt:
4652 ice_release_lock(&pi->sched_lock);
4653 return status;
4654 }
4655
4656 /**
4657 * ice_cfg_q_bw_lmt - configure queue BW limit
4658 * @pi: port information structure
4659 * @vsi_handle: sw VSI handle
4660 * @tc: traffic class
4661 * @q_handle: software queue handle
4662 * @rl_type: min, max, or shared
4663 * @bw: bandwidth in Kbps
4664 *
4665 * This function configures BW limit of queue scheduling node.
4666 */
4667 int
ice_cfg_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)4668 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4669 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
4670 {
4671 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
4672 bw);
4673 }
4674
4675 /**
4676 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
4677 * @pi: port information structure
4678 * @vsi_handle: sw VSI handle
4679 * @tc: traffic class
4680 * @q_handle: software queue handle
4681 * @rl_type: min, max, or shared
4682 *
4683 * This function configures BW default limit of queue scheduling node.
4684 */
4685 int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type)4686 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4687 u16 q_handle, enum ice_rl_type rl_type)
4688 {
4689 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
4690 ICE_SCHED_DFLT_BW);
4691 }
4692
4693 /**
4694 * ice_sched_save_tc_node_bw - save TC node BW limit
4695 * @pi: port information structure
4696 * @tc: TC number
4697 * @rl_type: min or max
4698 * @bw: bandwidth in Kbps
4699 *
4700 * This function saves the modified values of bandwidth settings for later
4701 * replay purpose (restore) after reset.
4702 */
4703 static int
ice_sched_save_tc_node_bw(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u32 bw)4704 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
4705 enum ice_rl_type rl_type, u32 bw)
4706 {
4707 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4708 return ICE_ERR_PARAM;
4709 switch (rl_type) {
4710 case ICE_MIN_BW:
4711 ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw);
4712 break;
4713 case ICE_MAX_BW:
4714 ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw);
4715 break;
4716 case ICE_SHARED_BW:
4717 ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw);
4718 break;
4719 default:
4720 return ICE_ERR_PARAM;
4721 }
4722 return 0;
4723 }
4724
4725 #define ICE_SCHED_GENERIC_STRICT_MODE BIT(4)
4726 #define ICE_SCHED_GENERIC_PRIO_S 1
4727
4728 /**
4729 * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit
4730 * @pi: port information structure
4731 * @tc: TC number
4732 * @rl_type: min or max
4733 * @bw: bandwidth in Kbps
4734 *
4735 * This function configures bandwidth limit of TC node.
4736 */
4737 static int
ice_sched_set_tc_node_bw_lmt(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u32 bw)4738 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
4739 enum ice_rl_type rl_type, u32 bw)
4740 {
4741 struct ice_aqc_txsched_elem_data buf;
4742 struct ice_aqc_txsched_elem *data;
4743 struct ice_sched_node *tc_node;
4744 int status = ICE_ERR_PARAM;
4745
4746 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4747 return status;
4748 ice_acquire_lock(&pi->sched_lock);
4749 tc_node = ice_sched_get_tc_node(pi, tc);
4750 if (!tc_node)
4751 goto exit_set_tc_node_bw;
4752
4753 /* update node's generic field */
4754 buf = tc_node->info;
4755 data = &buf.data;
4756 data->valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
4757 data->generic = (tc << ICE_SCHED_GENERIC_PRIO_S) |
4758 ICE_SCHED_GENERIC_STRICT_MODE;
4759 status = ice_sched_update_elem(pi->hw, tc_node, &buf);
4760 if (status)
4761 goto exit_set_tc_node_bw;
4762
4763 if (bw == ICE_SCHED_DFLT_BW)
4764 status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type);
4765 else
4766 status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw);
4767 if (!status)
4768 status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw);
4769
4770 exit_set_tc_node_bw:
4771 ice_release_lock(&pi->sched_lock);
4772 return status;
4773 }
4774
4775 /**
4776 * ice_cfg_tc_node_bw_lmt - configure TC node BW limit
4777 * @pi: port information structure
4778 * @tc: TC number
4779 * @rl_type: min or max
4780 * @bw: bandwidth in Kbps
4781 *
4782 * This function configures BW limit of TC node.
4783 * Note: The minimum guaranteed reservation is done via DCBX.
4784 */
4785 int
ice_cfg_tc_node_bw_lmt(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u32 bw)4786 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
4787 enum ice_rl_type rl_type, u32 bw)
4788 {
4789 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw);
4790 }
4791
4792 /**
4793 * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit
4794 * @pi: port information structure
4795 * @tc: TC number
4796 * @rl_type: min or max
4797 *
4798 * This function configures BW default limit of TC node.
4799 */
4800 int
ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type)4801 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
4802 enum ice_rl_type rl_type)
4803 {
4804 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW);
4805 }
4806
4807 /**
4808 * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information
4809 * @pi: port information structure
4810 * @tc: traffic class
4811 * @rl_type: rate limit type min or max
4812 * @bw_alloc: Bandwidth allocation information
4813 *
4814 * Save BW alloc information of VSI type node for post replay use.
4815 */
4816 static int
ice_sched_save_tc_node_bw_alloc(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u16 bw_alloc)4817 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4818 enum ice_rl_type rl_type, u16 bw_alloc)
4819 {
4820 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4821 return ICE_ERR_PARAM;
4822 switch (rl_type) {
4823 case ICE_MIN_BW:
4824 ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc],
4825 bw_alloc);
4826 break;
4827 case ICE_MAX_BW:
4828 ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc],
4829 bw_alloc);
4830 break;
4831 default:
4832 return ICE_ERR_PARAM;
4833 }
4834 return 0;
4835 }
4836
4837 /**
4838 * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc
4839 * @pi: port information structure
4840 * @tc: TC number
4841 * @rl_type: min or max
4842 * @bw_alloc: bandwidth alloc
4843 *
4844 * This function configures bandwidth alloc of TC node, also saves the
4845 * changed settings for replay purpose, and return success if it succeeds
4846 * in modifying bandwidth alloc setting.
4847 */
4848 static int
ice_sched_set_tc_node_bw_alloc(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u8 bw_alloc)4849 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4850 enum ice_rl_type rl_type, u8 bw_alloc)
4851 {
4852 struct ice_sched_node *tc_node;
4853 int status = ICE_ERR_PARAM;
4854
4855 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4856 return status;
4857 ice_acquire_lock(&pi->sched_lock);
4858 tc_node = ice_sched_get_tc_node(pi, tc);
4859 if (!tc_node)
4860 goto exit_set_tc_node_bw_alloc;
4861 status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type,
4862 bw_alloc);
4863 if (status)
4864 goto exit_set_tc_node_bw_alloc;
4865 status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
4866
4867 exit_set_tc_node_bw_alloc:
4868 ice_release_lock(&pi->sched_lock);
4869 return status;
4870 }
4871
4872 /**
4873 * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc
4874 * @pi: port information structure
4875 * @tc: TC number
4876 * @rl_type: min or max
4877 * @bw_alloc: bandwidth alloc
4878 *
4879 * This function configures BW limit of TC node.
4880 * Note: The minimum guaranteed reservation is done via DCBX.
4881 */
4882 int
ice_cfg_tc_node_bw_alloc(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u8 bw_alloc)4883 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4884 enum ice_rl_type rl_type, u8 bw_alloc)
4885 {
4886 return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
4887 }
4888
4889 /**
4890 * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default
4891 * @pi: port information structure
4892 * @vsi_handle: software VSI handle
4893 *
4894 * This function retrieves the aggregator ID based on VSI ID and TC,
4895 * and sets node's BW limit to default. This function needs to be
4896 * called with the scheduler lock held.
4897 */
4898 int
ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info * pi,u16 vsi_handle)4899 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle)
4900 {
4901 struct ice_vsi_ctx *vsi_ctx;
4902 int status = 0;
4903 u8 tc;
4904
4905 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4906 return ICE_ERR_PARAM;
4907 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
4908 if (!vsi_ctx)
4909 return ICE_ERR_PARAM;
4910
4911 ice_for_each_traffic_class(tc) {
4912 struct ice_sched_node *node;
4913
4914 node = vsi_ctx->sched.ag_node[tc];
4915 if (!node)
4916 continue;
4917
4918 /* Set min profile to default */
4919 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW);
4920 if (status)
4921 break;
4922
4923 /* Set max profile to default */
4924 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW);
4925 if (status)
4926 break;
4927
4928 /* Remove shared profile, if there is one */
4929 status = ice_sched_set_node_bw_dflt_lmt(pi, node,
4930 ICE_SHARED_BW);
4931 if (status)
4932 break;
4933 }
4934
4935 return status;
4936 }
4937
4938 /**
4939 * ice_sched_get_node_by_id_type - get node from ID type
4940 * @pi: port information structure
4941 * @id: identifier
4942 * @agg_type: type of aggregator
4943 * @tc: traffic class
4944 *
4945 * This function returns node identified by ID of type aggregator, and
4946 * based on traffic class (TC). This function needs to be called with
4947 * the scheduler lock held.
4948 */
4949 static struct ice_sched_node *
ice_sched_get_node_by_id_type(struct ice_port_info * pi,u32 id,enum ice_agg_type agg_type,u8 tc)4950 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
4951 enum ice_agg_type agg_type, u8 tc)
4952 {
4953 struct ice_sched_node *node = NULL;
4954
4955 switch (agg_type) {
4956 case ICE_AGG_TYPE_VSI: {
4957 struct ice_vsi_ctx *vsi_ctx;
4958 u16 vsi_handle = (u16)id;
4959
4960 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4961 break;
4962 /* Get sched_vsi_info */
4963 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
4964 if (!vsi_ctx)
4965 break;
4966 node = vsi_ctx->sched.vsi_node[tc];
4967 break;
4968 }
4969
4970 case ICE_AGG_TYPE_AGG: {
4971 struct ice_sched_node *tc_node;
4972
4973 tc_node = ice_sched_get_tc_node(pi, tc);
4974 if (tc_node)
4975 node = ice_sched_get_agg_node(pi, tc_node, id);
4976 break;
4977 }
4978
4979 case ICE_AGG_TYPE_Q:
4980 /* The current implementation allows single queue to modify */
4981 node = ice_sched_find_node_by_teid(pi->root, id);
4982 break;
4983
4984 case ICE_AGG_TYPE_QG: {
4985 struct ice_sched_node *child_node;
4986
4987 /* The current implementation allows single qg to modify */
4988 child_node = ice_sched_find_node_by_teid(pi->root, id);
4989 if (!child_node)
4990 break;
4991 node = child_node->parent;
4992 break;
4993 }
4994
4995 default:
4996 break;
4997 }
4998
4999 return node;
5000 }
5001
5002 /**
5003 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
5004 * @pi: port information structure
5005 * @id: ID (software VSI handle or AGG ID)
5006 * @agg_type: aggregator type (VSI or AGG type node)
5007 * @tc: traffic class
5008 * @rl_type: min or max
5009 * @bw: bandwidth in Kbps
5010 *
5011 * This function sets BW limit of VSI or Aggregator scheduling node
5012 * based on TC information from passed in argument BW.
5013 */
5014 int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info * pi,u32 id,enum ice_agg_type agg_type,u8 tc,enum ice_rl_type rl_type,u32 bw)5015 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
5016 enum ice_agg_type agg_type, u8 tc,
5017 enum ice_rl_type rl_type, u32 bw)
5018 {
5019 struct ice_sched_node *node;
5020 int status = ICE_ERR_PARAM;
5021
5022 if (!pi)
5023 return status;
5024
5025 if (rl_type == ICE_UNKNOWN_BW)
5026 return status;
5027
5028 ice_acquire_lock(&pi->sched_lock);
5029 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
5030 if (!node) {
5031 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
5032 goto exit_set_node_bw_lmt_per_tc;
5033 }
5034 if (bw == ICE_SCHED_DFLT_BW)
5035 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
5036 else
5037 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
5038
5039 exit_set_node_bw_lmt_per_tc:
5040 ice_release_lock(&pi->sched_lock);
5041 return status;
5042 }
5043
5044 /**
5045 * ice_sched_validate_vsi_srl_node - validate VSI SRL node
5046 * @pi: port information structure
5047 * @vsi_handle: software VSI handle
5048 *
5049 * This function validates SRL node of the VSI node if available SRL layer is
5050 * different than the VSI node layer on all TC(s).This function needs to be
5051 * called with scheduler lock held.
5052 */
5053 static int
ice_sched_validate_vsi_srl_node(struct ice_port_info * pi,u16 vsi_handle)5054 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
5055 {
5056 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
5057 u8 tc;
5058
5059 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5060 return ICE_ERR_PARAM;
5061
5062 /* Return success if no nodes are present across TC */
5063 ice_for_each_traffic_class(tc) {
5064 struct ice_sched_node *tc_node, *vsi_node;
5065 enum ice_rl_type rl_type = ICE_SHARED_BW;
5066 int status;
5067
5068 tc_node = ice_sched_get_tc_node(pi, tc);
5069 if (!tc_node)
5070 continue;
5071
5072 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5073 if (!vsi_node)
5074 continue;
5075
5076 /* SRL bandwidth layer selection */
5077 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
5078 u8 node_layer = vsi_node->tx_sched_layer;
5079 u8 layer_num;
5080
5081 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
5082 node_layer);
5083 if (layer_num >= pi->hw->num_tx_sched_layers)
5084 return ICE_ERR_PARAM;
5085 sel_layer = layer_num;
5086 }
5087
5088 status = ice_sched_validate_srl_node(vsi_node, sel_layer);
5089 if (status)
5090 return status;
5091 }
5092 return 0;
5093 }
5094
5095 /**
5096 * ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values
5097 * @pi: port information structure
5098 * @vsi_handle: software VSI handle
5099 * @tc: traffic class
5100 * @srl_node: sched node to configure
5101 * @rl_type: rate limit type minimum, maximum, or shared
5102 * @bw: minimum, maximum, or shared bandwidth in Kbps
5103 *
5104 * Configure shared rate limiter(SRL) of VSI type nodes across given traffic
5105 * class, and saves those value for later use for replaying purposes. The
5106 * caller holds the scheduler lock.
5107 */
5108 static int
ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info * pi,u16 vsi_handle,u8 tc,struct ice_sched_node * srl_node,enum ice_rl_type rl_type,u32 bw)5109 ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
5110 u8 tc, struct ice_sched_node *srl_node,
5111 enum ice_rl_type rl_type, u32 bw)
5112 {
5113 int status;
5114
5115 if (bw == ICE_SCHED_DFLT_BW) {
5116 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
5117 } else {
5118 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
5119 if (status)
5120 return status;
5121 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
5122 }
5123 return status;
5124 }
5125
5126 /**
5127 * ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc
5128 * @pi: port information structure
5129 * @vsi_handle: software VSI handle
5130 * @tc: traffic class
5131 * @min_bw: minimum bandwidth in Kbps
5132 * @max_bw: maximum bandwidth in Kbps
5133 * @shared_bw: shared bandwidth in Kbps
5134 *
5135 * Configure shared rate limiter(SRL) of VSI type nodes across requested
5136 * traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW
5137 * is passed, it removes the corresponding bw from the node. The caller
5138 * holds scheduler lock.
5139 */
5140 static int
ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)5141 ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
5142 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
5143 {
5144 struct ice_sched_node *tc_node, *vsi_node, *cfg_node;
5145 u8 layer_num;
5146 int status;
5147
5148 tc_node = ice_sched_get_tc_node(pi, tc);
5149 if (!tc_node)
5150 return ICE_ERR_CFG;
5151
5152 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5153 if (!vsi_node)
5154 return ICE_ERR_CFG;
5155
5156 layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW,
5157 vsi_node->tx_sched_layer);
5158 if (layer_num >= pi->hw->num_tx_sched_layers)
5159 return ICE_ERR_PARAM;
5160
5161 /* SRL node may be different */
5162 cfg_node = ice_sched_get_srl_node(vsi_node, layer_num);
5163 if (!cfg_node)
5164 return ICE_ERR_CFG;
5165
5166 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
5167 cfg_node, ICE_MIN_BW,
5168 min_bw);
5169 if (status)
5170 return status;
5171
5172 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
5173 cfg_node, ICE_MAX_BW,
5174 max_bw);
5175 if (status)
5176 return status;
5177
5178 return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node,
5179 ICE_SHARED_BW, shared_bw);
5180 }
5181
5182 /**
5183 * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit
5184 * @pi: port information structure
5185 * @vsi_handle: software VSI handle
5186 * @min_bw: minimum bandwidth in Kbps
5187 * @max_bw: maximum bandwidth in Kbps
5188 * @shared_bw: shared bandwidth in Kbps
5189 *
5190 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
5191 * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is
5192 * passed, it removes those value(s) from the node.
5193 */
5194 int
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info * pi,u16 vsi_handle,u32 min_bw,u32 max_bw,u32 shared_bw)5195 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
5196 u32 min_bw, u32 max_bw, u32 shared_bw)
5197 {
5198 int status = 0;
5199 u8 tc;
5200
5201 if (!pi)
5202 return ICE_ERR_PARAM;
5203
5204 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5205 return ICE_ERR_PARAM;
5206
5207 ice_acquire_lock(&pi->sched_lock);
5208 status = ice_sched_validate_vsi_srl_node(pi, vsi_handle);
5209 if (status)
5210 goto exit_set_vsi_bw_shared_lmt;
5211 /* Return success if no nodes are present across TC */
5212 ice_for_each_traffic_class(tc) {
5213 struct ice_sched_node *tc_node, *vsi_node;
5214
5215 tc_node = ice_sched_get_tc_node(pi, tc);
5216 if (!tc_node)
5217 continue;
5218
5219 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5220 if (!vsi_node)
5221 continue;
5222
5223 status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc,
5224 min_bw, max_bw,
5225 shared_bw);
5226 if (status)
5227 break;
5228 }
5229
5230 exit_set_vsi_bw_shared_lmt:
5231 ice_release_lock(&pi->sched_lock);
5232 return status;
5233 }
5234
5235 /**
5236 * ice_sched_validate_agg_srl_node - validate AGG SRL node
5237 * @pi: port information structure
5238 * @agg_id: aggregator ID
5239 *
5240 * This function validates SRL node of the AGG node if available SRL layer is
5241 * different than the AGG node layer on all TC(s).This function needs to be
5242 * called with scheduler lock held.
5243 */
5244 static int
ice_sched_validate_agg_srl_node(struct ice_port_info * pi,u32 agg_id)5245 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
5246 {
5247 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
5248 struct ice_sched_agg_info *agg_info;
5249 bool agg_id_present = false;
5250 int status = 0;
5251 u8 tc;
5252
5253 LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info,
5254 list_entry)
5255 if (agg_info->agg_id == agg_id) {
5256 agg_id_present = true;
5257 break;
5258 }
5259 if (!agg_id_present)
5260 return ICE_ERR_PARAM;
5261 /* Return success if no nodes are present across TC */
5262 ice_for_each_traffic_class(tc) {
5263 struct ice_sched_node *tc_node, *agg_node;
5264 enum ice_rl_type rl_type = ICE_SHARED_BW;
5265
5266 tc_node = ice_sched_get_tc_node(pi, tc);
5267 if (!tc_node)
5268 continue;
5269
5270 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
5271 if (!agg_node)
5272 continue;
5273 /* SRL bandwidth layer selection */
5274 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
5275 u8 node_layer = agg_node->tx_sched_layer;
5276 u8 layer_num;
5277
5278 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
5279 node_layer);
5280 if (layer_num >= pi->hw->num_tx_sched_layers)
5281 return ICE_ERR_PARAM;
5282 sel_layer = layer_num;
5283 }
5284
5285 status = ice_sched_validate_srl_node(agg_node, sel_layer);
5286 if (status)
5287 break;
5288 }
5289 return status;
5290 }
5291
5292 /**
5293 * ice_sched_validate_agg_id - Validate aggregator id
5294 * @pi: port information structure
5295 * @agg_id: aggregator ID
5296 *
5297 * This function validates aggregator id. Caller holds the scheduler lock.
5298 */
5299 static int
ice_sched_validate_agg_id(struct ice_port_info * pi,u32 agg_id)5300 ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
5301 {
5302 struct ice_sched_agg_info *agg_info;
5303 struct ice_sched_agg_info *tmp;
5304 bool agg_id_present = false;
5305 int status;
5306
5307 status = ice_sched_validate_agg_srl_node(pi, agg_id);
5308 if (status)
5309 return status;
5310
5311 LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list,
5312 ice_sched_agg_info, list_entry)
5313 if (agg_info->agg_id == agg_id) {
5314 agg_id_present = true;
5315 break;
5316 }
5317
5318 if (!agg_id_present)
5319 return ICE_ERR_PARAM;
5320
5321 return 0;
5322 }
5323
5324 /**
5325 * ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values
5326 * @pi: port information structure
5327 * @agg_id: aggregator ID
5328 * @tc: traffic class
5329 * @srl_node: sched node to configure
5330 * @rl_type: rate limit type minimum, maximum, or shared
5331 * @bw: minimum, maximum, or shared bandwidth in Kbps
5332 *
5333 * Configure shared rate limiter(SRL) of aggregator type nodes across
5334 * requested traffic class, and saves those value for later use for
5335 * replaying purposes. The caller holds the scheduler lock.
5336 */
5337 static int
ice_sched_set_save_agg_srl_node_bw(struct ice_port_info * pi,u32 agg_id,u8 tc,struct ice_sched_node * srl_node,enum ice_rl_type rl_type,u32 bw)5338 ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
5339 struct ice_sched_node *srl_node,
5340 enum ice_rl_type rl_type, u32 bw)
5341 {
5342 int status;
5343
5344 if (bw == ICE_SCHED_DFLT_BW) {
5345 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
5346 } else {
5347 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
5348 if (status)
5349 return status;
5350 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
5351 }
5352 return status;
5353 }
5354
5355 /**
5356 * ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc
5357 * @pi: port information structure
5358 * @agg_id: aggregator ID
5359 * @tc: traffic class
5360 * @min_bw: minimum bandwidth in Kbps
5361 * @max_bw: maximum bandwidth in Kbps
5362 * @shared_bw: shared bandwidth in Kbps
5363 *
5364 * This function configures the shared rate limiter(SRL) of aggregator type
5365 * node for a given traffic class for aggregator matching agg_id. When BW
5366 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller
5367 * holds the scheduler lock.
5368 */
5369 static int
ice_sched_set_agg_node_srl_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)5370 ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
5371 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
5372 {
5373 struct ice_sched_node *tc_node, *agg_node, *cfg_node;
5374 enum ice_rl_type rl_type = ICE_SHARED_BW;
5375 int status = ICE_ERR_CFG;
5376 u8 layer_num;
5377
5378 tc_node = ice_sched_get_tc_node(pi, tc);
5379 if (!tc_node)
5380 return ICE_ERR_CFG;
5381
5382 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
5383 if (!agg_node)
5384 return ICE_ERR_CFG;
5385
5386 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
5387 agg_node->tx_sched_layer);
5388 if (layer_num >= pi->hw->num_tx_sched_layers)
5389 return ICE_ERR_PARAM;
5390
5391 /* SRL node may be different */
5392 cfg_node = ice_sched_get_srl_node(agg_node, layer_num);
5393 if (!cfg_node)
5394 return ICE_ERR_CFG;
5395
5396 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
5397 ICE_MIN_BW, min_bw);
5398 if (status)
5399 return status;
5400
5401 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
5402 ICE_MAX_BW, max_bw);
5403 if (status)
5404 return status;
5405
5406 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
5407 ICE_SHARED_BW, shared_bw);
5408 return status;
5409 }
5410
5411 /**
5412 * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit
5413 * @pi: port information structure
5414 * @agg_id: aggregator ID
5415 * @min_bw: minimum bandwidth in Kbps
5416 * @max_bw: maximum bandwidth in Kbps
5417 * @shared_bw: shared bandwidth in Kbps
5418 *
5419 * This function configures the shared rate limiter(SRL) of all aggregator type
5420 * nodes across all traffic classes for aggregator matching agg_id. When
5421 * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
5422 * node(s).
5423 */
5424 int
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info * pi,u32 agg_id,u32 min_bw,u32 max_bw,u32 shared_bw)5425 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id,
5426 u32 min_bw, u32 max_bw, u32 shared_bw)
5427 {
5428 int status;
5429 u8 tc;
5430
5431 if (!pi)
5432 return ICE_ERR_PARAM;
5433
5434 ice_acquire_lock(&pi->sched_lock);
5435 status = ice_sched_validate_agg_id(pi, agg_id);
5436 if (status)
5437 goto exit_agg_bw_shared_lmt;
5438
5439 /* Return success if no nodes are present across TC */
5440 ice_for_each_traffic_class(tc) {
5441 struct ice_sched_node *tc_node, *agg_node;
5442
5443 tc_node = ice_sched_get_tc_node(pi, tc);
5444 if (!tc_node)
5445 continue;
5446
5447 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
5448 if (!agg_node)
5449 continue;
5450
5451 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc,
5452 min_bw, max_bw,
5453 shared_bw);
5454 if (status)
5455 break;
5456 }
5457
5458 exit_agg_bw_shared_lmt:
5459 ice_release_lock(&pi->sched_lock);
5460 return status;
5461 }
5462
5463 /**
5464 * ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc
5465 * @pi: port information structure
5466 * @agg_id: aggregator ID
5467 * @tc: traffic class
5468 * @min_bw: minimum bandwidth in Kbps
5469 * @max_bw: maximum bandwidth in Kbps
5470 * @shared_bw: shared bandwidth in Kbps
5471 *
5472 * This function configures the shared rate limiter(SRL) of aggregator type
5473 * node for a given traffic class for aggregator matching agg_id. When BW
5474 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node.
5475 */
5476 int
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)5477 ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
5478 u8 tc, u32 min_bw, u32 max_bw,
5479 u32 shared_bw)
5480 {
5481 int status;
5482
5483 if (!pi)
5484 return ICE_ERR_PARAM;
5485 ice_acquire_lock(&pi->sched_lock);
5486 status = ice_sched_validate_agg_id(pi, agg_id);
5487 if (status)
5488 goto exit_agg_bw_shared_lmt_per_tc;
5489
5490 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw,
5491 max_bw, shared_bw);
5492
5493 exit_agg_bw_shared_lmt_per_tc:
5494 ice_release_lock(&pi->sched_lock);
5495 return status;
5496 }
5497
5498 /**
5499 * ice_sched_cfg_sibl_node_prio - configure node sibling priority
5500 * @pi: port information structure
5501 * @node: sched node to configure
5502 * @priority: sibling priority
5503 *
5504 * This function configures node element's sibling priority only. This
5505 * function needs to be called with scheduler lock held.
5506 */
5507 int
ice_sched_cfg_sibl_node_prio(struct ice_port_info * pi,struct ice_sched_node * node,u8 priority)5508 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
5509 struct ice_sched_node *node, u8 priority)
5510 {
5511 struct ice_aqc_txsched_elem_data buf;
5512 struct ice_aqc_txsched_elem *data;
5513 struct ice_hw *hw = pi->hw;
5514 int status;
5515
5516 if (!hw)
5517 return ICE_ERR_PARAM;
5518 buf = node->info;
5519 data = &buf.data;
5520 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
5521 priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) &
5522 ICE_AQC_ELEM_GENERIC_PRIO_M;
5523 data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M;
5524 data->generic |= priority;
5525
5526 /* Configure element */
5527 status = ice_sched_update_elem(hw, node, &buf);
5528 return status;
5529 }
5530
5531 /**
5532 * ice_cfg_rl_burst_size - Set burst size value
5533 * @hw: pointer to the HW struct
5534 * @bytes: burst size in bytes
5535 *
5536 * This function configures/set the burst size to requested new value. The new
5537 * burst size value is used for future rate limit calls. It doesn't change the
5538 * existing or previously created RL profiles.
5539 */
ice_cfg_rl_burst_size(struct ice_hw * hw,u32 bytes)5540 int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
5541 {
5542 u16 burst_size_to_prog;
5543
5544 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
5545 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
5546 return ICE_ERR_PARAM;
5547 if (ice_round_to_num(bytes, 64) <=
5548 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
5549 /* 64 byte granularity case */
5550 /* Disable MSB granularity bit */
5551 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
5552 /* round number to nearest 64 byte granularity */
5553 bytes = ice_round_to_num(bytes, 64);
5554 /* The value is in 64 byte chunks */
5555 burst_size_to_prog |= (u16)(bytes / 64);
5556 } else {
5557 /* k bytes granularity case */
5558 /* Enable MSB granularity bit */
5559 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
5560 /* round number to nearest 1024 granularity */
5561 bytes = ice_round_to_num(bytes, 1024);
5562 /* check rounding doesn't go beyond allowed */
5563 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
5564 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
5565 /* The value is in k bytes */
5566 burst_size_to_prog |= (u16)(bytes / 1024);
5567 }
5568 hw->max_burst_size = burst_size_to_prog;
5569 return 0;
5570 }
5571
5572 /**
5573 * ice_sched_replay_node_prio - re-configure node priority
5574 * @hw: pointer to the HW struct
5575 * @node: sched node to configure
5576 * @priority: priority value
5577 *
5578 * This function configures node element's priority value. It
5579 * needs to be called with scheduler lock held.
5580 */
5581 static int
ice_sched_replay_node_prio(struct ice_hw * hw,struct ice_sched_node * node,u8 priority)5582 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
5583 u8 priority)
5584 {
5585 struct ice_aqc_txsched_elem_data buf;
5586 struct ice_aqc_txsched_elem *data;
5587 int status;
5588
5589 buf = node->info;
5590 data = &buf.data;
5591 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
5592 data->generic = priority;
5593
5594 /* Configure element */
5595 status = ice_sched_update_elem(hw, node, &buf);
5596 return status;
5597 }
5598
5599 /**
5600 * ice_sched_replay_node_bw - replay node(s) BW
5601 * @hw: pointer to the HW struct
5602 * @node: sched node to configure
5603 * @bw_t_info: BW type information
5604 *
5605 * This function restores node's BW from bw_t_info. The caller needs
5606 * to hold the scheduler lock.
5607 */
5608 static int
ice_sched_replay_node_bw(struct ice_hw * hw,struct ice_sched_node * node,struct ice_bw_type_info * bw_t_info)5609 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
5610 struct ice_bw_type_info *bw_t_info)
5611 {
5612 struct ice_port_info *pi = hw->port_info;
5613 int status = ICE_ERR_PARAM;
5614 u16 bw_alloc;
5615
5616 if (!node)
5617 return status;
5618 if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
5619 return 0;
5620 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) {
5621 status = ice_sched_replay_node_prio(hw, node,
5622 bw_t_info->generic);
5623 if (status)
5624 return status;
5625 }
5626 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) {
5627 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
5628 bw_t_info->cir_bw.bw);
5629 if (status)
5630 return status;
5631 }
5632 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) {
5633 bw_alloc = bw_t_info->cir_bw.bw_alloc;
5634 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
5635 bw_alloc);
5636 if (status)
5637 return status;
5638 }
5639 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) {
5640 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
5641 bw_t_info->eir_bw.bw);
5642 if (status)
5643 return status;
5644 }
5645 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) {
5646 bw_alloc = bw_t_info->eir_bw.bw_alloc;
5647 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
5648 bw_alloc);
5649 if (status)
5650 return status;
5651 }
5652 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED))
5653 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
5654 bw_t_info->shared_bw);
5655 return status;
5656 }
5657
5658 /**
5659 * ice_sched_replay_agg_bw - replay aggregator node(s) BW
5660 * @hw: pointer to the HW struct
5661 * @agg_info: aggregator data structure
5662 *
5663 * This function re-creates aggregator type nodes. The caller needs to hold
5664 * the scheduler lock.
5665 */
5666 static int
ice_sched_replay_agg_bw(struct ice_hw * hw,struct ice_sched_agg_info * agg_info)5667 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info)
5668 {
5669 struct ice_sched_node *tc_node, *agg_node;
5670 int status = 0;
5671 u8 tc;
5672
5673 if (!agg_info)
5674 return ICE_ERR_PARAM;
5675 ice_for_each_traffic_class(tc) {
5676 if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap,
5677 ICE_BW_TYPE_CNT))
5678 continue;
5679 tc_node = ice_sched_get_tc_node(hw->port_info, tc);
5680 if (!tc_node) {
5681 status = ICE_ERR_PARAM;
5682 break;
5683 }
5684 agg_node = ice_sched_get_agg_node(hw->port_info, tc_node,
5685 agg_info->agg_id);
5686 if (!agg_node) {
5687 status = ICE_ERR_PARAM;
5688 break;
5689 }
5690 status = ice_sched_replay_node_bw(hw, agg_node,
5691 &agg_info->bw_t_info[tc]);
5692 if (status)
5693 break;
5694 }
5695 return status;
5696 }
5697
5698 /**
5699 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
5700 * @pi: port info struct
5701 * @tc_bitmap: 8 bits TC bitmap to check
5702 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
5703 *
5704 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
5705 * may be missing, it returns enabled TCs. This function needs to be called with
5706 * scheduler lock held.
5707 */
5708 static void
ice_sched_get_ena_tc_bitmap(struct ice_port_info * pi,ice_bitmap_t * tc_bitmap,ice_bitmap_t * ena_tc_bitmap)5709 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap,
5710 ice_bitmap_t *ena_tc_bitmap)
5711 {
5712 u8 tc;
5713
5714 /* Some TC(s) may be missing after reset, adjust for replay */
5715 ice_for_each_traffic_class(tc)
5716 if (ice_is_tc_ena(*tc_bitmap, tc) &&
5717 (ice_sched_get_tc_node(pi, tc)))
5718 ice_set_bit(tc, ena_tc_bitmap);
5719 }
5720
5721 /**
5722 * ice_sched_replay_agg - recreate aggregator node(s)
5723 * @hw: pointer to the HW struct
5724 *
5725 * This function recreate aggregator type nodes which are not replayed earlier.
5726 * It also replay aggregator BW information. These aggregator nodes are not
5727 * associated with VSI type node yet.
5728 */
ice_sched_replay_agg(struct ice_hw * hw)5729 void ice_sched_replay_agg(struct ice_hw *hw)
5730 {
5731 struct ice_port_info *pi = hw->port_info;
5732 struct ice_sched_agg_info *agg_info;
5733
5734 ice_acquire_lock(&pi->sched_lock);
5735 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
5736 list_entry)
5737 /* replay aggregator (re-create aggregator node) */
5738 if (!ice_cmp_bitmap(agg_info->tc_bitmap,
5739 agg_info->replay_tc_bitmap,
5740 ICE_MAX_TRAFFIC_CLASS)) {
5741 ice_declare_bitmap(replay_bitmap,
5742 ICE_MAX_TRAFFIC_CLASS);
5743 int status;
5744
5745 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5746 ice_sched_get_ena_tc_bitmap(pi,
5747 agg_info->replay_tc_bitmap,
5748 replay_bitmap);
5749 status = ice_sched_cfg_agg(hw->port_info,
5750 agg_info->agg_id,
5751 ICE_AGG_TYPE_AGG,
5752 replay_bitmap);
5753 if (status) {
5754 ice_info(hw, "Replay agg id[%d] failed\n",
5755 agg_info->agg_id);
5756 /* Move on to next one */
5757 continue;
5758 }
5759 /* Replay aggregator node BW (restore aggregator BW) */
5760 status = ice_sched_replay_agg_bw(hw, agg_info);
5761 if (status)
5762 ice_info(hw, "Replay agg bw [id=%d] failed\n",
5763 agg_info->agg_id);
5764 }
5765 ice_release_lock(&pi->sched_lock);
5766 }
5767
5768 /**
5769 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
5770 * @hw: pointer to the HW struct
5771 *
5772 * This function initialize aggregator(s) TC bitmap to zero. A required
5773 * preinit step for replaying aggregators.
5774 */
ice_sched_replay_agg_vsi_preinit(struct ice_hw * hw)5775 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
5776 {
5777 struct ice_port_info *pi = hw->port_info;
5778 struct ice_sched_agg_info *agg_info;
5779
5780 ice_acquire_lock(&pi->sched_lock);
5781 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
5782 list_entry) {
5783 struct ice_sched_agg_vsi_info *agg_vsi_info;
5784
5785 agg_info->tc_bitmap[0] = 0;
5786 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
5787 ice_sched_agg_vsi_info, list_entry)
5788 agg_vsi_info->tc_bitmap[0] = 0;
5789 }
5790 ice_release_lock(&pi->sched_lock);
5791 }
5792
5793 /**
5794 * ice_sched_replay_root_node_bw - replay root node BW
5795 * @pi: port information structure
5796 *
5797 * Replay root node BW settings.
5798 */
ice_sched_replay_root_node_bw(struct ice_port_info * pi)5799 int ice_sched_replay_root_node_bw(struct ice_port_info *pi)
5800 {
5801 int status = 0;
5802
5803 if (!pi->hw)
5804 return ICE_ERR_PARAM;
5805 ice_acquire_lock(&pi->sched_lock);
5806
5807 status = ice_sched_replay_node_bw(pi->hw, pi->root,
5808 &pi->root_node_bw_t_info);
5809 ice_release_lock(&pi->sched_lock);
5810 return status;
5811 }
5812
5813 /**
5814 * ice_sched_replay_tc_node_bw - replay TC node(s) BW
5815 * @pi: port information structure
5816 *
5817 * This function replay TC nodes.
5818 */
ice_sched_replay_tc_node_bw(struct ice_port_info * pi)5819 int ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
5820 {
5821 int status = 0;
5822 u8 tc;
5823
5824 if (!pi->hw)
5825 return ICE_ERR_PARAM;
5826 ice_acquire_lock(&pi->sched_lock);
5827 ice_for_each_traffic_class(tc) {
5828 struct ice_sched_node *tc_node;
5829
5830 tc_node = ice_sched_get_tc_node(pi, tc);
5831 if (!tc_node)
5832 continue; /* TC not present */
5833 status = ice_sched_replay_node_bw(pi->hw, tc_node,
5834 &pi->tc_node_bw_t_info[tc]);
5835 if (status)
5836 break;
5837 }
5838 ice_release_lock(&pi->sched_lock);
5839 return status;
5840 }
5841
5842 /**
5843 * ice_sched_replay_vsi_bw - replay VSI type node(s) BW
5844 * @hw: pointer to the HW struct
5845 * @vsi_handle: software VSI handle
5846 * @tc_bitmap: 8 bits TC bitmap
5847 *
5848 * This function replays VSI type nodes bandwidth. This function needs to be
5849 * called with scheduler lock held.
5850 */
5851 static int
ice_sched_replay_vsi_bw(struct ice_hw * hw,u16 vsi_handle,ice_bitmap_t * tc_bitmap)5852 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
5853 ice_bitmap_t *tc_bitmap)
5854 {
5855 struct ice_sched_node *vsi_node, *tc_node;
5856 struct ice_port_info *pi = hw->port_info;
5857 struct ice_bw_type_info *bw_t_info;
5858 struct ice_vsi_ctx *vsi_ctx;
5859 int status = 0;
5860 u8 tc;
5861
5862 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
5863 if (!vsi_ctx)
5864 return ICE_ERR_PARAM;
5865 ice_for_each_traffic_class(tc) {
5866 if (!ice_is_tc_ena(*tc_bitmap, tc))
5867 continue;
5868 tc_node = ice_sched_get_tc_node(pi, tc);
5869 if (!tc_node)
5870 continue;
5871 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5872 if (!vsi_node)
5873 continue;
5874 bw_t_info = &vsi_ctx->sched.bw_t_info[tc];
5875 status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info);
5876 if (status)
5877 break;
5878 }
5879 return status;
5880 }
5881
5882 /**
5883 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
5884 * @hw: pointer to the HW struct
5885 * @vsi_handle: software VSI handle
5886 *
5887 * This function replays aggregator node, VSI to aggregator type nodes, and
5888 * their node bandwidth information. This function needs to be called with
5889 * scheduler lock held.
5890 */
5891 static int
ice_sched_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)5892 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
5893 {
5894 ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5895 struct ice_sched_agg_vsi_info *agg_vsi_info;
5896 struct ice_port_info *pi = hw->port_info;
5897 struct ice_sched_agg_info *agg_info;
5898 int status;
5899
5900 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5901 if (!ice_is_vsi_valid(hw, vsi_handle))
5902 return ICE_ERR_PARAM;
5903 agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
5904 if (!agg_info)
5905 return 0; /* Not present in list - default Agg case */
5906 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
5907 if (!agg_vsi_info)
5908 return 0; /* Not present in list - default Agg case */
5909 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
5910 replay_bitmap);
5911 /* Replay aggregator node associated to vsi_handle */
5912 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
5913 ICE_AGG_TYPE_AGG, replay_bitmap);
5914 if (status)
5915 return status;
5916 /* Replay aggregator node BW (restore aggregator BW) */
5917 status = ice_sched_replay_agg_bw(hw, agg_info);
5918 if (status)
5919 return status;
5920
5921 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5922 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
5923 replay_bitmap);
5924 /* Move this VSI (vsi_handle) to above aggregator */
5925 status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
5926 replay_bitmap);
5927 if (status)
5928 return status;
5929 /* Replay VSI BW (restore VSI BW) */
5930 return ice_sched_replay_vsi_bw(hw, vsi_handle,
5931 agg_vsi_info->tc_bitmap);
5932 }
5933
5934 /**
5935 * ice_replay_vsi_agg - replay VSI to aggregator node
5936 * @hw: pointer to the HW struct
5937 * @vsi_handle: software VSI handle
5938 *
5939 * This function replays association of VSI to aggregator type nodes, and
5940 * node bandwidth information.
5941 */
ice_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)5942 int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
5943 {
5944 struct ice_port_info *pi = hw->port_info;
5945 int status;
5946
5947 ice_acquire_lock(&pi->sched_lock);
5948 status = ice_sched_replay_vsi_agg(hw, vsi_handle);
5949 ice_release_lock(&pi->sched_lock);
5950 return status;
5951 }
5952
5953 /**
5954 * ice_sched_replay_q_bw - replay queue type node BW
5955 * @pi: port information structure
5956 * @q_ctx: queue context structure
5957 *
5958 * This function replays queue type node bandwidth. This function needs to be
5959 * called with scheduler lock held.
5960 */
5961 int
ice_sched_replay_q_bw(struct ice_port_info * pi,struct ice_q_ctx * q_ctx)5962 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
5963 {
5964 struct ice_sched_node *q_node;
5965
5966 /* Following also checks the presence of node in tree */
5967 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
5968 if (!q_node)
5969 return ICE_ERR_PARAM;
5970 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
5971 }
5972