Lines Matching +full:row +full:- +full:hold
1 // SPDX-License-Identifier: GPL-2.0-or-later
50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
78 struct rb_root row; member
82 /* When class changes from state 1->2 and disconnects from
101 int quantum; /* but stored for parent-to-leaf return */
172 /* time of nearest event per level (row) */
191 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
202 #define HTB_DIRECT ((struct htb_class *)-1L)
205 * htb_classify - classify a packet into class
210 * It returns NULL if the packet should be dropped or -1 if the packet
228 /* allow to select class by setting skb->priority to valid classid; in htb_classify()
232 if (skb->priority == sch->handle) in htb_classify()
234 cl = htb_find(skb->priority, sch); in htb_classify()
236 if (cl->level == 0) in htb_classify()
238 /* Start with inner filter chain if a non-leaf class is selected */ in htb_classify()
239 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
241 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
259 if (res.classid == sch->handle) in htb_classify()
265 if (!cl->level) in htb_classify()
269 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
273 if (!cl || cl->level) in htb_classify()
279 * htb_add_to_id_tree - adds class to the round robin list
290 struct rb_node **p = &root->rb_node, *parent = NULL; in htb_add_to_id_tree()
297 if (cl->common.classid > c->common.classid) in htb_add_to_id_tree()
298 p = &parent->rb_right; in htb_add_to_id_tree()
300 p = &parent->rb_left; in htb_add_to_id_tree()
302 rb_link_node(&cl->node[prio], parent, p); in htb_add_to_id_tree()
303 rb_insert_color(&cl->node[prio], root); in htb_add_to_id_tree()
307 * htb_add_to_wait_tree - adds class to the event queue with delay
313 * change its mode in cl->pq_key microseconds. Make sure that class is not
319 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
321 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
322 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
323 cl->pq_key++; in htb_add_to_wait_tree()
326 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
327 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
333 if (cl->pq_key >= c->pq_key) in htb_add_to_wait_tree()
334 p = &parent->rb_right; in htb_add_to_wait_tree()
336 p = &parent->rb_left; in htb_add_to_wait_tree()
338 rb_link_node(&cl->pq_node, parent, p); in htb_add_to_wait_tree()
339 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
343 * htb_next_rb_node - finds next node in binary tree
355 * htb_add_class_to_row - add class to its row
360 * The class is added to row at priorities marked in mask.
366 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
370 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
387 * htb_remove_class_from_row - removes class from its row
392 * The class is removed from row at priorities marked in mask.
399 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
403 struct htb_prio *hprio = &hlevel->hprio[prio]; in htb_remove_class_from_row()
406 if (hprio->ptr == cl->node + prio) in htb_remove_class_from_row()
407 htb_next_rb_node(&hprio->ptr); in htb_remove_class_from_row()
409 htb_safe_rb_erase(cl->node + prio, &hprio->row); in htb_remove_class_from_row()
410 if (!hprio->row.rb_node) in htb_remove_class_from_row()
413 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
417 * htb_activate_prios - creates active classe's feed chain
422 * for priorities it is participating on. cl->cmode must be new
423 * (activated) mode. It does nothing if cl->prio_activity == 0.
427 struct htb_class *p = cl->parent; in htb_activate_prios()
428 long m, mask = cl->prio_activity; in htb_activate_prios()
430 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_activate_prios()
435 if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio))) in htb_activate_prios()
439 if (p->inner.clprio[prio].feed.rb_node) in htb_activate_prios()
445 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); in htb_activate_prios()
447 p->prio_activity |= mask; in htb_activate_prios()
449 p = cl->parent; in htb_activate_prios()
452 if (cl->cmode == HTB_CAN_SEND && mask) in htb_activate_prios()
457 * htb_deactivate_prios - remove class from feed chain
461 * cl->cmode must represent old mode (before deactivation). It does
462 * nothing if cl->prio_activity == 0. Class is removed from all feed
467 struct htb_class *p = cl->parent; in htb_deactivate_prios()
468 long m, mask = cl->prio_activity; in htb_deactivate_prios()
470 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_deactivate_prios()
477 if (p->inner.clprio[prio].ptr == cl->node + prio) { in htb_deactivate_prios()
479 * parent feed - forget the pointer but remember in htb_deactivate_prios()
482 p->inner.clprio[prio].last_ptr_id = cl->common.classid; in htb_deactivate_prios()
483 p->inner.clprio[prio].ptr = NULL; in htb_deactivate_prios()
486 htb_safe_rb_erase(cl->node + prio, in htb_deactivate_prios()
487 &p->inner.clprio[prio].feed); in htb_deactivate_prios()
489 if (!p->inner.clprio[prio].feed.rb_node) in htb_deactivate_prios()
493 p->prio_activity &= ~mask; in htb_deactivate_prios()
495 p = cl->parent; in htb_deactivate_prios()
498 if (cl->cmode == HTB_CAN_SEND && mask) in htb_deactivate_prios()
505 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; in htb_lowater()
512 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; in htb_hiwater()
519 * htb_class_mode - computes and returns current class mode
523 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
524 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
527 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
528 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
536 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { in htb_class_mode()
537 *diff = -toks; in htb_class_mode()
541 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) in htb_class_mode()
544 *diff = -toks; in htb_class_mode()
549 * htb_change_class_mode - changes classe's mode
557 * be different from old one and cl->pq_key has to be valid if changing
565 if (new_mode == cl->cmode) in htb_change_class_mode()
569 cl->overlimits++; in htb_change_class_mode()
570 q->overlimits++; in htb_change_class_mode()
573 if (cl->prio_activity) { /* not necessary: speed optimization */ in htb_change_class_mode()
574 if (cl->cmode != HTB_CANT_SEND) in htb_change_class_mode()
576 cl->cmode = new_mode; in htb_change_class_mode()
580 cl->cmode = new_mode; in htb_change_class_mode()
584 * htb_activate - inserts leaf cl into appropriate active feeds
594 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); in htb_activate()
596 if (!cl->prio_activity) { in htb_activate()
597 cl->prio_activity = 1 << cl->prio; in htb_activate()
603 * htb_deactivate - remove leaf cl from active feeds
608 * with non-active leaf. It also removes class from the drop list.
612 WARN_ON(!cl->prio_activity); in htb_deactivate()
615 cl->prio_activity = 0; in htb_deactivate()
628 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue()
629 __qdisc_enqueue_tail(skb, &q->direct_queue); in htb_enqueue()
630 q->direct_pkts++; in htb_enqueue()
641 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, in htb_enqueue()
645 cl->drops++; in htb_enqueue()
652 sch->qstats.backlog += len; in htb_enqueue()
653 sch->q.qlen++; in htb_enqueue()
659 s64 toks = diff + cl->tokens; in htb_accnt_tokens()
661 if (toks > cl->buffer) in htb_accnt_tokens()
662 toks = cl->buffer; in htb_accnt_tokens()
663 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); in htb_accnt_tokens()
664 if (toks <= -cl->mbuffer) in htb_accnt_tokens()
665 toks = 1 - cl->mbuffer; in htb_accnt_tokens()
667 cl->tokens = toks; in htb_accnt_tokens()
672 s64 toks = diff + cl->ctokens; in htb_accnt_ctokens()
674 if (toks > cl->cbuffer) in htb_accnt_ctokens()
675 toks = cl->cbuffer; in htb_accnt_ctokens()
676 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens()
677 if (toks <= -cl->mbuffer) in htb_accnt_ctokens()
678 toks = 1 - cl->mbuffer; in htb_accnt_ctokens()
680 cl->ctokens = toks; in htb_accnt_ctokens()
684 * htb_charge_class - charges amount "bytes" to leaf and ancestors
706 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
707 if (cl->level >= level) { in htb_charge_class()
708 if (cl->level == level) in htb_charge_class()
709 cl->xstats.lends++; in htb_charge_class()
712 cl->xstats.borrows++; in htb_charge_class()
713 cl->tokens += diff; /* we moved t_c; update tokens */ in htb_charge_class()
716 cl->t_c = q->now; in htb_charge_class()
718 old_mode = cl->cmode; in htb_charge_class()
721 if (old_mode != cl->cmode) { in htb_charge_class()
723 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
724 if (cl->cmode != HTB_CAN_SEND) in htb_charge_class()
729 if (cl->level) in htb_charge_class()
730 bstats_update(&cl->bstats, skb); in htb_charge_class()
732 cl = cl->parent; in htb_charge_class()
737 * htb_do_events - make mode changes to classes at the level
739 * @level: which wait_pq in 'q->hlevel'
743 * next pending event (0 for no event in pq, q->now for too many events).
744 * Note: Applied are events whose have cl->pq_key <= q->now.
754 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; in htb_do_events()
765 if (cl->pq_key > q->now) in htb_do_events()
766 return cl->pq_key; in htb_do_events()
769 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
771 if (cl->cmode != HTB_CAN_SEND) in htb_do_events()
775 /* too much load - let's continue after a break for scheduling */ in htb_do_events()
776 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { in htb_do_events()
778 q->warned |= HTB_WARN_TOOMANYEVENTS; in htb_do_events()
781 return q->now; in htb_do_events()
784 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
795 if (id > cl->common.classid) { in htb_id_find_next_upper()
796 n = n->rb_right; in htb_id_find_next_upper()
797 } else if (id < cl->common.classid) { in htb_id_find_next_upper()
799 n = n->rb_left; in htb_id_find_next_upper()
808 * htb_lookup_leaf - returns next leaf class in DRR order
823 BUG_ON(!hprio->row.rb_node); in htb_lookup_leaf()
824 sp->root = hprio->row.rb_node; in htb_lookup_leaf()
825 sp->pptr = &hprio->ptr; in htb_lookup_leaf()
826 sp->pid = &hprio->last_ptr_id; in htb_lookup_leaf()
829 if (!*sp->pptr && *sp->pid) { in htb_lookup_leaf()
830 /* ptr was invalidated but id is valid - try to recover in htb_lookup_leaf()
833 *sp->pptr = in htb_lookup_leaf()
834 htb_id_find_next_upper(prio, sp->root, *sp->pid); in htb_lookup_leaf()
836 *sp->pid = 0; /* ptr is valid now so that remove this hint as it in htb_lookup_leaf()
839 if (!*sp->pptr) { /* we are at right end; rewind & go up */ in htb_lookup_leaf()
840 *sp->pptr = sp->root; in htb_lookup_leaf()
841 while ((*sp->pptr)->rb_left) in htb_lookup_leaf()
842 *sp->pptr = (*sp->pptr)->rb_left; in htb_lookup_leaf()
844 sp--; in htb_lookup_leaf()
845 if (!*sp->pptr) { in htb_lookup_leaf()
849 htb_next_rb_node(sp->pptr); in htb_lookup_leaf()
855 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
856 if (!cl->level) in htb_lookup_leaf()
858 clp = &cl->inner.clprio[prio]; in htb_lookup_leaf()
859 (++sp)->root = clp->feed.rb_node; in htb_lookup_leaf()
860 sp->pptr = &clp->ptr; in htb_lookup_leaf()
861 sp->pid = &clp->last_ptr_id; in htb_lookup_leaf()
876 struct htb_level *hlevel = &q->hlevel[level]; in htb_dequeue_tree()
877 struct htb_prio *hprio = &hlevel->hprio[prio]; in htb_dequeue_tree()
879 /* look initial class up in the row */ in htb_dequeue_tree()
887 /* class can be empty - it is unlikely but can be true if leaf in htb_dequeue_tree()
892 if (unlikely(cl->leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
896 /* row/level might become empty */ in htb_dequeue_tree()
897 if ((q->row_mask[level] & (1 << prio)) == 0) in htb_dequeue_tree()
908 skb = cl->leaf.q->dequeue(cl->leaf.q); in htb_dequeue_tree()
912 qdisc_warn_nonwc("htb", cl->leaf.q); in htb_dequeue_tree()
913 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: in htb_dequeue_tree()
914 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
920 bstats_update(&cl->bstats, skb); in htb_dequeue_tree()
921 cl->leaf.deficit[level] -= qdisc_pkt_len(skb); in htb_dequeue_tree()
922 if (cl->leaf.deficit[level] < 0) { in htb_dequeue_tree()
923 cl->leaf.deficit[level] += cl->quantum; in htb_dequeue_tree()
924 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : in htb_dequeue_tree()
925 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
930 if (!cl->leaf.q->q.qlen) in htb_dequeue_tree()
946 skb = __qdisc_dequeue_head(&q->direct_queue); in htb_dequeue()
951 sch->q.qlen--; in htb_dequeue()
955 if (!sch->q.qlen) in htb_dequeue()
957 q->now = ktime_get_ns(); in htb_dequeue()
960 next_event = q->now + 5LLU * NSEC_PER_SEC; in htb_dequeue()
963 /* common case optimization - skip event handler quickly */ in htb_dequeue()
965 s64 event = q->near_ev_cache[level]; in htb_dequeue()
967 if (q->now >= event) { in htb_dequeue()
970 event = q->now + NSEC_PER_SEC; in htb_dequeue()
971 q->near_ev_cache[level] = event; in htb_dequeue()
977 m = ~q->row_mask[level]; in htb_dequeue()
978 while (m != (int)(-1)) { in htb_dequeue()
987 if (likely(next_event > q->now)) in htb_dequeue()
988 qdisc_watchdog_schedule_ns(&q->watchdog, next_event); in htb_dequeue()
990 schedule_work(&q->work); in htb_dequeue()
1003 for (i = 0; i < q->clhash.hashsize; i++) { in htb_reset()
1004 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
1005 if (cl->level) in htb_reset()
1006 memset(&cl->inner, 0, sizeof(cl->inner)); in htb_reset()
1008 if (cl->leaf.q && !q->offload) in htb_reset()
1009 qdisc_reset(cl->leaf.q); in htb_reset()
1011 cl->prio_activity = 0; in htb_reset()
1012 cl->cmode = HTB_CAN_SEND; in htb_reset()
1015 qdisc_watchdog_cancel(&q->watchdog); in htb_reset()
1016 __qdisc_reset_queue(&q->direct_queue); in htb_reset()
1017 memset(q->hlevel, 0, sizeof(q->hlevel)); in htb_reset()
1018 memset(q->row_mask, 0, sizeof(q->row_mask)); in htb_reset()
1035 struct Qdisc *sch = q->watchdog.qdisc; in htb_work_func()
1051 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt); in htb_offload()
1066 qdisc_watchdog_init(&q->watchdog, sch); in htb_init()
1067 INIT_WORK(&q->work, htb_work_func); in htb_init()
1070 return -EINVAL; in htb_init()
1072 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in htb_init()
1082 return -EINVAL; in htb_init()
1085 if (gopt->version != HTB_VER >> 16) in htb_init()
1086 return -EINVAL; in htb_init()
1091 if (sch->parent != TC_H_ROOT) { in htb_init()
1093 return -EOPNOTSUPP; in htb_init()
1096 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) { in htb_init()
1097 NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on"); in htb_init()
1098 return -EOPNOTSUPP; in htb_init()
1101 q->num_direct_qdiscs = dev->real_num_tx_queues; in htb_init()
1102 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, in htb_init()
1103 sizeof(*q->direct_qdiscs), in htb_init()
1105 if (!q->direct_qdiscs) in htb_init()
1106 return -ENOMEM; in htb_init()
1109 err = qdisc_class_hash_init(&q->clhash); in htb_init()
1114 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); in htb_init()
1116 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; in htb_init()
1118 if ((q->rate2quantum = gopt->rate2quantum) < 1) in htb_init()
1119 q->rate2quantum = 1; in htb_init()
1120 q->defcls = gopt->defcls; in htb_init()
1125 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_init()
1130 TC_H_MAKE(sch->handle, 0), extack); in htb_init()
1132 return -ENOMEM; in htb_init()
1136 q->direct_qdiscs[ntx] = qdisc; in htb_init()
1137 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in htb_init()
1140 sch->flags |= TCQ_F_MQROOT; in htb_init()
1144 .parent_classid = TC_H_MAJ(sch->handle) >> 16, in htb_init()
1145 .classid = TC_H_MIN(q->defcls), in htb_init()
1152 /* Defer this assignment, so that htb_destroy skips offload-related in htb_init()
1155 q->offload = true; in htb_init()
1166 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_attach_offload()
1167 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx]; in htb_attach_offload()
1169 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in htb_attach_offload()
1173 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { in htb_attach_offload()
1180 kfree(q->direct_qdiscs); in htb_attach_offload()
1181 q->direct_qdiscs = NULL; in htb_attach_offload()
1190 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in htb_attach_software()
1204 if (q->offload) in htb_attach()
1216 if (q->offload) in htb_dump()
1217 sch->flags |= TCQ_F_OFFLOADED; in htb_dump()
1219 sch->flags &= ~TCQ_F_OFFLOADED; in htb_dump()
1221 sch->qstats.overlimits = q->overlimits; in htb_dump()
1222 /* Its safe to not acquire qdisc lock. As we hold RTNL, in htb_dump()
1226 gopt.direct_pkts = q->direct_pkts; in htb_dump()
1228 gopt.rate2quantum = q->rate2quantum; in htb_dump()
1229 gopt.defcls = q->defcls; in htb_dump()
1236 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) in htb_dump()
1238 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump()
1245 return -1; in htb_dump()
1256 /* Its safe to not acquire qdisc lock. As we hold RTNL, in htb_dump_class()
1259 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; in htb_dump_class()
1260 tcm->tcm_handle = cl->common.classid; in htb_dump_class()
1261 if (!cl->level && cl->leaf.q) in htb_dump_class()
1262 tcm->tcm_info = cl->leaf.q->handle; in htb_dump_class()
1270 psched_ratecfg_getrate(&opt.rate, &cl->rate); in htb_dump_class()
1271 opt.buffer = PSCHED_NS2TICKS(cl->buffer); in htb_dump_class()
1272 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class()
1273 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); in htb_dump_class()
1274 opt.quantum = cl->quantum; in htb_dump_class()
1275 opt.prio = cl->prio; in htb_dump_class()
1276 opt.level = cl->level; in htb_dump_class()
1279 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump_class()
1281 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1282 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, in htb_dump_class()
1285 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1286 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, in htb_dump_class()
1294 return -1; in htb_dump_class()
1304 gnet_stats_basic_sync_init(&cl->bstats); in htb_offload_aggregate_stats()
1306 for (i = 0; i < q->clhash.hashsize; i++) { in htb_offload_aggregate_stats()
1307 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { in htb_offload_aggregate_stats()
1310 while (p && p->level < cl->level) in htb_offload_aggregate_stats()
1311 p = p->parent; in htb_offload_aggregate_stats()
1316 bytes += u64_stats_read(&c->bstats_bias.bytes); in htb_offload_aggregate_stats()
1317 packets += u64_stats_read(&c->bstats_bias.packets); in htb_offload_aggregate_stats()
1318 if (c->level == 0) { in htb_offload_aggregate_stats()
1319 bytes += u64_stats_read(&c->leaf.q->bstats.bytes); in htb_offload_aggregate_stats()
1320 packets += u64_stats_read(&c->leaf.q->bstats.packets); in htb_offload_aggregate_stats()
1324 _bstats_update(&cl->bstats, bytes, packets); in htb_offload_aggregate_stats()
1333 .drops = cl->drops, in htb_dump_class_stats()
1334 .overlimits = cl->overlimits, in htb_dump_class_stats()
1338 if (!cl->level && cl->leaf.q) in htb_dump_class_stats()
1339 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); in htb_dump_class_stats()
1341 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), in htb_dump_class_stats()
1343 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), in htb_dump_class_stats()
1346 if (q->offload) { in htb_dump_class_stats()
1347 if (!cl->level) { in htb_dump_class_stats()
1348 if (cl->leaf.q) in htb_dump_class_stats()
1349 cl->bstats = cl->leaf.q->bstats; in htb_dump_class_stats()
1351 gnet_stats_basic_sync_init(&cl->bstats); in htb_dump_class_stats()
1352 _bstats_update(&cl->bstats, in htb_dump_class_stats()
1353 u64_stats_read(&cl->bstats_bias.bytes), in htb_dump_class_stats()
1354 u64_stats_read(&cl->bstats_bias.packets)); in htb_dump_class_stats()
1360 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || in htb_dump_class_stats()
1361 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in htb_dump_class_stats()
1363 return -1; in htb_dump_class_stats()
1365 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in htb_dump_class_stats()
1376 if (!q->offload) in htb_select_queue()
1377 return sch->dev_queue; in htb_select_queue()
1381 .classid = TC_H_MIN(tcm->tcm_parent), in htb_select_queue()
1384 if (err || offload_opt.qid >= dev->num_tx_queues) in htb_select_queue()
1392 struct net_device *dev = dev_queue->dev; in htb_graft_helper()
1395 if (dev->flags & IFF_UP) in htb_graft_helper()
1399 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in htb_graft_helper()
1400 if (dev->flags & IFF_UP) in htb_graft_helper()
1410 queue = cl->leaf.offload_queue; in htb_offload_get_queue()
1411 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_get_queue()
1412 WARN_ON(cl->leaf.q->dev_queue != queue); in htb_offload_get_queue()
1429 if (dev->flags & IFF_UP) in htb_offload_move_qdisc()
1432 WARN_ON(qdisc != cl_old->leaf.q); in htb_offload_move_qdisc()
1435 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_move_qdisc()
1436 cl_old->leaf.q->dev_queue = queue_new; in htb_offload_move_qdisc()
1437 cl_old->leaf.offload_queue = queue_new; in htb_offload_move_qdisc()
1442 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q); in htb_offload_move_qdisc()
1443 if (dev->flags & IFF_UP) in htb_offload_move_qdisc()
1445 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN)); in htb_offload_move_qdisc()
1452 struct netdev_queue *dev_queue = sch->dev_queue; in htb_graft()
1457 if (cl->level) in htb_graft()
1458 return -EINVAL; in htb_graft()
1460 if (q->offload) in htb_graft()
1465 cl->common.classid, extack); in htb_graft()
1467 return -ENOBUFS; in htb_graft()
1470 if (q->offload) { in htb_graft()
1472 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_graft()
1477 *old = qdisc_replace(sch, new, &cl->leaf.q); in htb_graft()
1479 if (q->offload) { in htb_graft()
1490 return !cl->level ? cl->leaf.q : NULL; in htb_leaf()
1502 if (!cl->parent) in htb_parent_last_child()
1505 if (cl->parent->children > 1) in htb_parent_last_child()
1515 struct htb_class *parent = cl->parent; in htb_parent_to_leaf()
1517 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1519 if (parent->cmode != HTB_CAN_SEND) in htb_parent_to_leaf()
1520 htb_safe_rb_erase(&parent->pq_node, in htb_parent_to_leaf()
1521 &q->hlevel[parent->level].wait_pq); in htb_parent_to_leaf()
1523 parent->level = 0; in htb_parent_to_leaf()
1524 memset(&parent->inner, 0, sizeof(parent->inner)); in htb_parent_to_leaf()
1525 parent->leaf.q = new_q ? new_q : &noop_qdisc; in htb_parent_to_leaf()
1526 parent->tokens = parent->buffer; in htb_parent_to_leaf()
1527 parent->ctokens = parent->cbuffer; in htb_parent_to_leaf()
1528 parent->t_c = ktime_get_ns(); in htb_parent_to_leaf()
1529 parent->cmode = HTB_CAN_SEND; in htb_parent_to_leaf()
1530 if (q->offload) in htb_parent_to_leaf()
1531 parent->leaf.offload_queue = cl->leaf.offload_queue; in htb_parent_to_leaf()
1540 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_parent_to_leaf_offload()
1544 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); in htb_parent_to_leaf_offload()
1553 struct Qdisc *q = cl->leaf.q; in htb_destroy_class_offload()
1557 if (cl->level) in htb_destroy_class_offload()
1558 return -EINVAL; in htb_destroy_class_offload()
1568 /* Last qdisc grafted should be the same as cl->leaf.q when in htb_destroy_class_offload()
1574 if (cl->parent) { in htb_destroy_class_offload()
1575 _bstats_update(&cl->parent->bstats_bias, in htb_destroy_class_offload()
1576 u64_stats_read(&q->bstats.bytes), in htb_destroy_class_offload()
1577 u64_stats_read(&q->bstats.packets)); in htb_destroy_class_offload()
1584 .classid = cl->common.classid, in htb_destroy_class_offload()
1599 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) { in htb_destroy_class_offload()
1600 u32 classid = TC_H_MAJ(sch->handle) | in htb_destroy_class_offload()
1612 if (!cl->level) { in htb_destroy_class()
1613 WARN_ON(!cl->leaf.q); in htb_destroy_class()
1614 qdisc_put(cl->leaf.q); in htb_destroy_class()
1616 gen_kill_estimator(&cl->rate_est); in htb_destroy_class()
1617 tcf_block_put(cl->block); in htb_destroy_class()
1631 cancel_work_sync(&q->work); in htb_destroy()
1632 qdisc_watchdog_cancel(&q->watchdog); in htb_destroy()
1638 tcf_block_put(q->block); in htb_destroy()
1640 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1641 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_destroy()
1642 tcf_block_put(cl->block); in htb_destroy()
1643 cl->block = NULL; in htb_destroy()
1650 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1651 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1655 if (!q->offload) { in htb_destroy()
1662 if (cl->level) in htb_destroy()
1670 qdisc_class_hash_remove(&q->clhash, in htb_destroy()
1671 &cl->common); in htb_destroy()
1672 if (cl->parent) in htb_destroy()
1673 cl->parent->children--; in htb_destroy()
1682 qdisc_class_hash_destroy(&q->clhash); in htb_destroy()
1683 __qdisc_reset_queue(&q->direct_queue); in htb_destroy()
1685 if (q->offload) { in htb_destroy()
1692 if (!q->direct_qdiscs) in htb_destroy()
1694 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++) in htb_destroy()
1695 qdisc_put(q->direct_qdiscs[i]); in htb_destroy()
1696 kfree(q->direct_qdiscs); in htb_destroy()
1712 if (cl->children || qdisc_class_in_use(&cl->common)) { in htb_delete()
1714 return -EBUSY; in htb_delete()
1717 if (!cl->level && htb_parent_last_child(cl)) in htb_delete()
1720 if (q->offload) { in htb_delete()
1728 struct netdev_queue *dev_queue = sch->dev_queue; in htb_delete()
1730 if (q->offload) in htb_delete()
1734 cl->parent->common.classid, in htb_delete()
1736 if (q->offload) { in htb_delete()
1745 if (!cl->level) in htb_delete()
1746 qdisc_purge_queue(cl->leaf.q); in htb_delete()
1749 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1750 if (cl->parent) in htb_delete()
1751 cl->parent->children--; in htb_delete()
1753 if (cl->prio_activity) in htb_delete()
1756 if (cl->cmode != HTB_CAN_SEND) in htb_delete()
1757 htb_safe_rb_erase(&cl->pq_node, in htb_delete()
1758 &q->hlevel[cl->level].wait_pq); in htb_delete()
1773 int err = -EINVAL; in htb_change_class()
1794 err = -EINVAL; in htb_change_class()
1801 if (!hopt->rate.rate || !hopt->ceil.rate) in htb_change_class()
1804 if (q->offload) { in htb_change_class()
1806 if (hopt->rate.overhead || hopt->ceil.overhead) { in htb_change_class()
1810 if (hopt->rate.mpu || hopt->ceil.mpu) { in htb_change_class()
1817 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) in htb_change_class()
1818 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB], in htb_change_class()
1821 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) in htb_change_class()
1822 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB], in htb_change_class()
1848 if (!classid || TC_H_MAJ(classid ^ sch->handle) || in htb_change_class()
1853 if (parent && parent->parent && parent->parent->level < 2) { in htb_change_class()
1857 err = -ENOBUFS; in htb_change_class()
1862 gnet_stats_basic_sync_init(&cl->bstats); in htb_change_class()
1863 gnet_stats_basic_sync_init(&cl->bstats_bias); in htb_change_class()
1865 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in htb_change_class()
1871 err = gen_new_estimator(&cl->bstats, NULL, in htb_change_class()
1872 &cl->rate_est, in htb_change_class()
1880 cl->children = 0; in htb_change_class()
1881 RB_CLEAR_NODE(&cl->pq_node); in htb_change_class()
1884 RB_CLEAR_NODE(&cl->node[prio]); in htb_change_class()
1886 cl->common.classid = classid; in htb_change_class()
1895 * -- thanks to Karlis Peisenieks in htb_change_class()
1897 if (!q->offload) { in htb_change_class()
1898 dev_queue = sch->dev_queue; in htb_change_class()
1899 } else if (!(parent && !parent->level)) { in htb_change_class()
1903 .classid = cl->common.classid, in htb_change_class()
1905 TC_H_MIN(parent->common.classid) : in htb_change_class()
1907 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
1908 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
1909 .prio = hopt->prio, in htb_change_class()
1910 .quantum = hopt->quantum, in htb_change_class()
1923 WARN_ON(old_q != parent->leaf.q); in htb_change_class()
1926 .classid = cl->common.classid, in htb_change_class()
1928 TC_H_MIN(parent->common.classid), in htb_change_class()
1929 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
1930 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
1931 .prio = hopt->prio, in htb_change_class()
1932 .quantum = hopt->quantum, in htb_change_class()
1942 _bstats_update(&parent->bstats_bias, in htb_change_class()
1943 u64_stats_read(&old_q->bstats.bytes), in htb_change_class()
1944 u64_stats_read(&old_q->bstats.packets)); in htb_change_class()
1949 if (q->offload) { in htb_change_class()
1952 /* One ref for cl->leaf.q, the other for in htb_change_class()
1953 * dev_queue->qdisc. in htb_change_class()
1959 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); in htb_change_class()
1962 if (parent && !parent->level) { in htb_change_class()
1964 qdisc_purge_queue(parent->leaf.q); in htb_change_class()
1965 parent_qdisc = parent->leaf.q; in htb_change_class()
1966 if (parent->prio_activity) in htb_change_class()
1970 if (parent->cmode != HTB_CAN_SEND) { in htb_change_class()
1971 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); in htb_change_class()
1972 parent->cmode = HTB_CAN_SEND; in htb_change_class()
1974 parent->level = (parent->parent ? parent->parent->level in htb_change_class()
1975 : TC_HTB_MAXDEPTH) - 1; in htb_change_class()
1976 memset(&parent->inner, 0, sizeof(parent->inner)); in htb_change_class()
1980 cl->leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1981 if (q->offload) in htb_change_class()
1982 cl->leaf.offload_queue = dev_queue; in htb_change_class()
1984 cl->parent = parent; in htb_change_class()
1987 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1988 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1989 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ in htb_change_class()
1990 cl->t_c = ktime_get_ns(); in htb_change_class()
1991 cl->cmode = HTB_CAN_SEND; in htb_change_class()
1994 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1996 parent->children++; in htb_change_class()
1997 if (cl->leaf.q != &noop_qdisc) in htb_change_class()
1998 qdisc_hash_add(cl->leaf.q, true); in htb_change_class()
2001 err = gen_replace_estimator(&cl->bstats, NULL, in htb_change_class()
2002 &cl->rate_est, in htb_change_class()
2010 if (q->offload) { in htb_change_class()
2015 .classid = cl->common.classid, in htb_change_class()
2016 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
2017 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
2018 .prio = hopt->prio, in htb_change_class()
2019 .quantum = hopt->quantum, in htb_change_class()
2036 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); in htb_change_class()
2037 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); in htb_change_class()
2040 * is really leaf before changing cl->leaf ! in htb_change_class()
2042 if (!cl->level) { in htb_change_class()
2043 u64 quantum = cl->rate.rate_bytes_ps; in htb_change_class()
2045 do_div(quantum, q->rate2quantum); in htb_change_class()
2046 cl->quantum = min_t(u64, quantum, INT_MAX); in htb_change_class()
2048 if (!hopt->quantum && cl->quantum < 1000) { in htb_change_class()
2049 warn = -1; in htb_change_class()
2050 cl->quantum = 1000; in htb_change_class()
2052 if (!hopt->quantum && cl->quantum > 200000) { in htb_change_class()
2054 cl->quantum = 200000; in htb_change_class()
2056 if (hopt->quantum) in htb_change_class()
2057 cl->quantum = hopt->quantum; in htb_change_class()
2058 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) in htb_change_class()
2059 cl->prio = TC_HTB_NUMPRIO - 1; in htb_change_class()
2062 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
2063 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
2071 cl->common.classid, (warn == -1 ? "small" : "big")); in htb_change_class()
2073 qdisc_class_hash_grow(sch, &q->clhash); in htb_change_class()
2079 gen_kill_estimator(&cl->rate_est); in htb_change_class()
2081 tcf_block_put(cl->block); in htb_change_class()
2093 return cl ? cl->block : q->block; in htb_tcf_block()
2101 /*if (cl && !cl->level) return 0; in htb_bind_filter()
2105 * ---- in htb_bind_filter()
2106 * 19.6.2002 As Werner explained it is ok - bind filter is just in htb_bind_filter()
2107 * another way to "lock" the class - unlike "get" this lock can in htb_bind_filter()
2111 qdisc_class_get(&cl->common); in htb_bind_filter()
2119 qdisc_class_put(&cl->common); in htb_unbind_filter()
2128 if (arg->stop) in htb_walk()
2131 for (i = 0; i < q->clhash.hashsize; i++) { in htb_walk()
2132 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()