1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/sch_fifo.c The simplest FIFO queue. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/types.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/skbuff.h> 14 #include <net/pkt_sched.h> 15 #include <net/pkt_cls.h> 16 17 /* 1 band FIFO pseudo-"scheduler" */ 18 19 static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, 20 struct sk_buff **to_free) 21 { 22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= 23 READ_ONCE(sch->limit))) 24 return qdisc_enqueue_tail(skb, sch); 25 26 return qdisc_drop(skb, sch, to_free); 27 } 28 29 static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, 30 struct sk_buff **to_free) 31 { 32 if (likely(sch->q.qlen < READ_ONCE(sch->limit))) 33 return qdisc_enqueue_tail(skb, sch); 34 35 return qdisc_drop(skb, sch, to_free); 36 } 37 38 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, 39 struct sk_buff **to_free) 40 { 41 unsigned int prev_backlog; 42 43 if (unlikely(READ_ONCE(sch->limit) == 0)) 44 return qdisc_drop(skb, sch, to_free); 45 46 if (likely(sch->q.qlen < READ_ONCE(sch->limit))) 47 return qdisc_enqueue_tail(skb, sch); 48 49 prev_backlog = sch->qstats.backlog; 50 /* queue full, remove one skb to fulfill the limit */ 51 __qdisc_queue_drop_head(sch, &sch->q, to_free); 52 qdisc_qstats_drop(sch); 53 qdisc_enqueue_tail(skb, sch); 54 55 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); 56 return NET_XMIT_CN; 57 } 58 59 static void fifo_offload_init(struct Qdisc *sch) 60 { 61 struct net_device *dev = qdisc_dev(sch); 62 struct tc_fifo_qopt_offload qopt; 63 64 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 65 return; 66 67 qopt.command = TC_FIFO_REPLACE; 68 qopt.handle = sch->handle; 69 qopt.parent = sch->parent; 70 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt); 71 } 72 73 static void fifo_offload_destroy(struct Qdisc *sch) 74 { 75 struct net_device *dev = qdisc_dev(sch); 76 struct tc_fifo_qopt_offload qopt; 77 78 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 79 return; 80 81 qopt.command = TC_FIFO_DESTROY; 82 qopt.handle = sch->handle; 83 qopt.parent = sch->parent; 84 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt); 85 } 86 87 static int fifo_offload_dump(struct Qdisc *sch) 88 { 89 struct tc_fifo_qopt_offload qopt; 90 91 qopt.command = TC_FIFO_STATS; 92 qopt.handle = sch->handle; 93 qopt.parent = sch->parent; 94 qopt.stats.bstats = &sch->bstats; 95 qopt.stats.qstats = &sch->qstats; 96 97 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt); 98 } 99 100 static int __fifo_init(struct Qdisc *sch, struct nlattr *opt, 101 struct netlink_ext_ack *extack) 102 { 103 bool bypass; 104 bool is_bfifo = sch->ops == &bfifo_qdisc_ops; 105 106 if (opt == NULL) { 107 u32 limit = qdisc_dev(sch)->tx_queue_len; 108 109 if (is_bfifo) 110 limit *= psched_mtu(qdisc_dev(sch)); 111 112 WRITE_ONCE(sch->limit, limit); 113 } else { 114 struct tc_fifo_qopt *ctl = nla_data(opt); 115 116 if (nla_len(opt) < sizeof(*ctl)) 117 return -EINVAL; 118 119 WRITE_ONCE(sch->limit, ctl->limit); 120 } 121 122 if (is_bfifo) 123 bypass = sch->limit >= psched_mtu(qdisc_dev(sch)); 124 else 125 bypass = sch->limit >= 1; 126 127 if (bypass) 128 sch->flags |= TCQ_F_CAN_BYPASS; 129 else 130 sch->flags &= ~TCQ_F_CAN_BYPASS; 131 132 return 0; 133 } 134 135 static int fifo_init(struct Qdisc *sch, struct nlattr *opt, 136 struct netlink_ext_ack *extack) 137 { 138 int err; 139 140 err = __fifo_init(sch, opt, extack); 141 if (err) 142 return err; 143 144 fifo_offload_init(sch); 145 return 0; 146 } 147 148 static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt, 149 struct netlink_ext_ack *extack) 150 { 151 return __fifo_init(sch, opt, extack); 152 } 153 154 static void fifo_destroy(struct Qdisc *sch) 155 { 156 fifo_offload_destroy(sch); 157 } 158 159 static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb) 160 { 161 struct tc_fifo_qopt opt = { .limit = READ_ONCE(sch->limit) }; 162 163 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 164 goto nla_put_failure; 165 return skb->len; 166 167 nla_put_failure: 168 return -1; 169 } 170 171 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) 172 { 173 int err; 174 175 err = fifo_offload_dump(sch); 176 if (err) 177 return err; 178 179 return __fifo_dump(sch, skb); 180 } 181 182 static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb) 183 { 184 return __fifo_dump(sch, skb); 185 } 186 187 struct Qdisc_ops pfifo_qdisc_ops __read_mostly = { 188 .id = "pfifo", 189 .priv_size = 0, 190 .enqueue = pfifo_enqueue, 191 .dequeue = qdisc_dequeue_head, 192 .peek = qdisc_peek_head, 193 .init = fifo_init, 194 .destroy = fifo_destroy, 195 .reset = qdisc_reset_queue, 196 .change = fifo_init, 197 .dump = fifo_dump, 198 .owner = THIS_MODULE, 199 }; 200 EXPORT_SYMBOL(pfifo_qdisc_ops); 201 202 struct Qdisc_ops bfifo_qdisc_ops __read_mostly = { 203 .id = "bfifo", 204 .priv_size = 0, 205 .enqueue = bfifo_enqueue, 206 .dequeue = qdisc_dequeue_head, 207 .peek = qdisc_peek_head, 208 .init = fifo_init, 209 .destroy = fifo_destroy, 210 .reset = qdisc_reset_queue, 211 .change = fifo_init, 212 .dump = fifo_dump, 213 .owner = THIS_MODULE, 214 }; 215 EXPORT_SYMBOL(bfifo_qdisc_ops); 216 217 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = { 218 .id = "pfifo_head_drop", 219 .priv_size = 0, 220 .enqueue = pfifo_tail_enqueue, 221 .dequeue = qdisc_dequeue_head, 222 .peek = qdisc_peek_head, 223 .init = fifo_hd_init, 224 .reset = qdisc_reset_queue, 225 .change = fifo_hd_init, 226 .dump = fifo_hd_dump, 227 .owner = THIS_MODULE, 228 }; 229 230 /* Pass size change message down to embedded FIFO */ 231 int fifo_set_limit(struct Qdisc *q, unsigned int limit) 232 { 233 struct nlattr *nla; 234 int ret = -ENOMEM; 235 236 /* Hack to avoid sending change message to non-FIFO */ 237 if (strncmp(q->ops->id + 1, "fifo", 4) != 0) 238 return 0; 239 240 if (!q->ops->change) 241 return 0; 242 243 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 244 if (nla) { 245 nla->nla_type = RTM_NEWQDISC; 246 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); 247 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; 248 249 ret = q->ops->change(q, nla, NULL); 250 kfree(nla); 251 } 252 return ret; 253 } 254 EXPORT_SYMBOL(fifo_set_limit); 255 256 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, 257 unsigned int limit, 258 struct netlink_ext_ack *extack) 259 { 260 struct Qdisc *q; 261 int err = -ENOMEM; 262 263 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1), 264 extack); 265 if (q) { 266 err = fifo_set_limit(q, limit); 267 if (err < 0) { 268 qdisc_put(q); 269 q = NULL; 270 } 271 } 272 273 return q ? : ERR_PTR(err); 274 } 275 EXPORT_SYMBOL(fifo_create_dflt); 276 MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler"); 277