1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/sch_mqprio.c
5 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <linux/module.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_cls.h>
21 struct Qdisc **qdiscs;
26 u64 min_rate[TC_QOPT_MAX_QUEUE];
27 u64 max_rate[TC_QOPT_MAX_QUEUE];
30 static void mqprio_destroy(struct Qdisc *sch)
32 struct net_device *dev = qdisc_dev(sch);
33 struct mqprio_sched *priv = qdisc_priv(sch);
38 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
40 qdisc_put(priv->qdiscs[ntx]);
44 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
45 struct tc_mqprio_qopt_offload mqprio = { { 0 } };
48 case TC_MQPRIO_MODE_DCB:
49 case TC_MQPRIO_MODE_CHANNEL:
50 dev->netdev_ops->ndo_setup_tc(dev,
51 TC_SETUP_QDISC_MQPRIO,
58 netdev_set_num_tc(dev, 0);
62 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
66 /* Verify num_tc is not out of max range */
67 if (qopt->num_tc > TC_MAX_QUEUE)
70 /* Verify priority mapping uses valid tcs */
71 for (i = 0; i < TC_BITMASK + 1; i++) {
72 if (qopt->prio_tc_map[i] >= qopt->num_tc)
76 /* Limit qopt->hw to maximum supported offload value. Drivers have
77 * the option of overriding this later if they don't support the a
80 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
81 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
83 /* If hardware offload is requested we will leave it to the device
84 * to either populate the queue counts itself or to validate the
85 * provided queue counts. If ndo_setup_tc is not present then
86 * hardware doesn't support offload and we should return an error.
89 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
91 for (i = 0; i < qopt->num_tc; i++) {
92 unsigned int last = qopt->offset[i] + qopt->count[i];
94 /* Verify the queue count is in tx range being equal to the
95 * real_num_tx_queues indicates the last queue is in use.
97 if (qopt->offset[i] >= dev->real_num_tx_queues ||
99 last > dev->real_num_tx_queues)
102 /* Verify that the offset and counts do not overlap */
103 for (j = i + 1; j < qopt->num_tc; j++) {
104 if (last > qopt->offset[j])
112 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
113 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
114 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
115 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
116 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
119 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
120 const struct nla_policy *policy, int len)
122 int nested_len = nla_len(nla) - NLA_ALIGN(len);
124 if (nested_len >= nla_attr_size(0))
125 return nla_parse_deprecated(tb, maxtype,
126 nla_data(nla) + NLA_ALIGN(len),
127 nested_len, policy, NULL);
129 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
133 static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
135 struct netlink_ext_ack *extack)
137 struct mqprio_sched *priv = qdisc_priv(sch);
138 struct nlattr *tb[TCA_MQPRIO_MAX + 1];
142 err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
148 NL_SET_ERR_MSG(extack,
149 "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode");
153 if (tb[TCA_MQPRIO_MODE]) {
154 priv->flags |= TC_MQPRIO_F_MODE;
155 priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
158 if (tb[TCA_MQPRIO_SHAPER]) {
159 priv->flags |= TC_MQPRIO_F_SHAPER;
160 priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
163 if (tb[TCA_MQPRIO_MIN_RATE64]) {
164 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
165 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64],
166 "min_rate accepted only when shaper is in bw_rlimit mode");
170 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
172 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
173 NL_SET_ERR_MSG_ATTR(extack, attr,
174 "Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
178 if (nla_len(attr) != sizeof(u64)) {
179 NL_SET_ERR_MSG_ATTR(extack, attr,
180 "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
184 if (i >= qopt->num_tc)
186 priv->min_rate[i] = *(u64 *)nla_data(attr);
189 priv->flags |= TC_MQPRIO_F_MIN_RATE;
192 if (tb[TCA_MQPRIO_MAX_RATE64]) {
193 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
194 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64],
195 "max_rate accepted only when shaper is in bw_rlimit mode");
199 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
201 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
202 NL_SET_ERR_MSG_ATTR(extack, attr,
203 "Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
207 if (nla_len(attr) != sizeof(u64)) {
208 NL_SET_ERR_MSG_ATTR(extack, attr,
209 "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
213 if (i >= qopt->num_tc)
215 priv->max_rate[i] = *(u64 *)nla_data(attr);
218 priv->flags |= TC_MQPRIO_F_MAX_RATE;
224 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
225 struct netlink_ext_ack *extack)
227 struct net_device *dev = qdisc_dev(sch);
228 struct mqprio_sched *priv = qdisc_priv(sch);
229 struct netdev_queue *dev_queue;
231 int i, err = -EOPNOTSUPP;
232 struct tc_mqprio_qopt *qopt = NULL;
235 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
236 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
238 if (sch->parent != TC_H_ROOT)
241 if (!netif_is_multiqueue(dev))
244 /* make certain can allocate enough classids to handle queues */
245 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
248 if (!opt || nla_len(opt) < sizeof(*qopt))
251 qopt = nla_data(opt);
252 if (mqprio_parse_opt(dev, qopt))
255 len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
257 err = mqprio_parse_nlattr(sch, qopt, opt, extack);
262 /* pre-allocate qdisc, attachment can't fail */
263 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
268 for (i = 0; i < dev->num_tx_queues; i++) {
269 dev_queue = netdev_get_tx_queue(dev, i);
270 qdisc = qdisc_create_dflt(dev_queue,
271 get_default_qdisc_ops(dev, i),
272 TC_H_MAKE(TC_H_MAJ(sch->handle),
273 TC_H_MIN(i + 1)), extack);
277 priv->qdiscs[i] = qdisc;
278 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
281 /* If the mqprio options indicate that hardware should own
282 * the queue mapping then run ndo_setup_tc otherwise use the
283 * supplied and verified mapping
286 struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
288 switch (priv->mode) {
289 case TC_MQPRIO_MODE_DCB:
290 if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
293 case TC_MQPRIO_MODE_CHANNEL:
294 mqprio.flags = priv->flags;
295 if (priv->flags & TC_MQPRIO_F_MODE)
296 mqprio.mode = priv->mode;
297 if (priv->flags & TC_MQPRIO_F_SHAPER)
298 mqprio.shaper = priv->shaper;
299 if (priv->flags & TC_MQPRIO_F_MIN_RATE)
300 for (i = 0; i < mqprio.qopt.num_tc; i++)
301 mqprio.min_rate[i] = priv->min_rate[i];
302 if (priv->flags & TC_MQPRIO_F_MAX_RATE)
303 for (i = 0; i < mqprio.qopt.num_tc; i++)
304 mqprio.max_rate[i] = priv->max_rate[i];
309 err = dev->netdev_ops->ndo_setup_tc(dev,
310 TC_SETUP_QDISC_MQPRIO,
315 priv->hw_offload = mqprio.qopt.hw;
317 netdev_set_num_tc(dev, qopt->num_tc);
318 for (i = 0; i < qopt->num_tc; i++)
319 netdev_set_tc_queue(dev, i,
320 qopt->count[i], qopt->offset[i]);
323 /* Always use supplied priority mappings */
324 for (i = 0; i < TC_BITMASK + 1; i++)
325 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
327 sch->flags |= TCQ_F_MQROOT;
331 static void mqprio_attach(struct Qdisc *sch)
333 struct net_device *dev = qdisc_dev(sch);
334 struct mqprio_sched *priv = qdisc_priv(sch);
335 struct Qdisc *qdisc, *old;
338 /* Attach underlying qdisc */
339 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
340 qdisc = priv->qdiscs[ntx];
341 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
344 if (ntx < dev->real_num_tx_queues)
345 qdisc_hash_add(qdisc, false);
351 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
354 struct net_device *dev = qdisc_dev(sch);
355 unsigned long ntx = cl - 1;
357 if (ntx >= dev->num_tx_queues)
359 return netdev_get_tx_queue(dev, ntx);
362 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
363 struct Qdisc **old, struct netlink_ext_ack *extack)
365 struct net_device *dev = qdisc_dev(sch);
366 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
371 if (dev->flags & IFF_UP)
374 *old = dev_graft_qdisc(dev_queue, new);
377 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
379 if (dev->flags & IFF_UP)
385 static int dump_rates(struct mqprio_sched *priv,
386 struct tc_mqprio_qopt *opt, struct sk_buff *skb)
391 if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
392 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
394 goto nla_put_failure;
396 for (i = 0; i < opt->num_tc; i++) {
397 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
398 sizeof(priv->min_rate[i]),
400 goto nla_put_failure;
402 nla_nest_end(skb, nest);
405 if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
406 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
408 goto nla_put_failure;
410 for (i = 0; i < opt->num_tc; i++) {
411 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
412 sizeof(priv->max_rate[i]),
414 goto nla_put_failure;
416 nla_nest_end(skb, nest);
421 nla_nest_cancel(skb, nest);
425 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
427 struct net_device *dev = qdisc_dev(sch);
428 struct mqprio_sched *priv = qdisc_priv(sch);
429 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
430 struct tc_mqprio_qopt opt = { 0 };
432 unsigned int ntx, tc;
435 gnet_stats_basic_sync_init(&sch->bstats);
436 memset(&sch->qstats, 0, sizeof(sch->qstats));
438 /* MQ supports lockless qdiscs. However, statistics accounting needs
439 * to account for all, none, or a mix of locked and unlocked child
440 * qdiscs. Percpu stats are added to counters in-band and locking
441 * qdisc totals are added at end.
443 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
444 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
445 spin_lock_bh(qdisc_lock(qdisc));
447 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
448 &qdisc->bstats, false);
449 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
451 sch->q.qlen += qdisc_qlen(qdisc);
453 spin_unlock_bh(qdisc_lock(qdisc));
456 opt.num_tc = netdev_get_num_tc(dev);
457 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
458 opt.hw = priv->hw_offload;
460 for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
461 opt.count[tc] = dev->tc_to_txq[tc].count;
462 opt.offset[tc] = dev->tc_to_txq[tc].offset;
465 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
466 goto nla_put_failure;
468 if ((priv->flags & TC_MQPRIO_F_MODE) &&
469 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
470 goto nla_put_failure;
472 if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
473 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
474 goto nla_put_failure;
476 if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
477 priv->flags & TC_MQPRIO_F_MAX_RATE) &&
478 (dump_rates(priv, &opt, skb) != 0))
479 goto nla_put_failure;
481 return nla_nest_end(skb, nla);
483 nlmsg_trim(skb, nla);
487 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
489 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
494 return rtnl_dereference(dev_queue->qdisc_sleeping);
497 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
499 struct net_device *dev = qdisc_dev(sch);
500 unsigned int ntx = TC_H_MIN(classid);
502 /* There are essentially two regions here that have valid classid
503 * values. The first region will have a classid value of 1 through
504 * num_tx_queues. All of these are backed by actual Qdiscs.
506 if (ntx < TC_H_MIN_PRIORITY)
507 return (ntx <= dev->num_tx_queues) ? ntx : 0;
509 /* The second region represents the hardware traffic classes. These
510 * are represented by classid values of TC_H_MIN_PRIORITY through
511 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
513 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
516 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
517 struct sk_buff *skb, struct tcmsg *tcm)
519 if (cl < TC_H_MIN_PRIORITY) {
520 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
521 struct net_device *dev = qdisc_dev(sch);
522 int tc = netdev_txq_to_tc(dev, cl - 1);
524 tcm->tcm_parent = (tc < 0) ? 0 :
525 TC_H_MAKE(TC_H_MAJ(sch->handle),
526 TC_H_MIN(tc + TC_H_MIN_PRIORITY));
527 tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
529 tcm->tcm_parent = TC_H_ROOT;
532 tcm->tcm_handle |= TC_H_MIN(cl);
536 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
541 if (cl >= TC_H_MIN_PRIORITY) {
544 struct gnet_stats_queue qstats = {0};
545 struct gnet_stats_basic_sync bstats;
546 struct net_device *dev = qdisc_dev(sch);
547 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
549 gnet_stats_basic_sync_init(&bstats);
550 /* Drop lock here it will be reclaimed before touching
551 * statistics this is required because the d->lock we
552 * hold here is the look on dev_queue->qdisc_sleeping
553 * also acquired below.
556 spin_unlock_bh(d->lock);
558 for (i = tc.offset; i < tc.offset + tc.count; i++) {
559 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
560 struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
562 spin_lock_bh(qdisc_lock(qdisc));
564 gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
565 &qdisc->bstats, false);
566 gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
568 sch->q.qlen += qdisc_qlen(qdisc);
570 spin_unlock_bh(qdisc_lock(qdisc));
572 qlen = qdisc_qlen(sch) + qstats.qlen;
574 /* Reclaim root sleeping lock before completing stats */
576 spin_lock_bh(d->lock);
577 if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
578 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
581 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
583 sch = rtnl_dereference(dev_queue->qdisc_sleeping);
584 if (gnet_stats_copy_basic(d, sch->cpu_bstats,
585 &sch->bstats, true) < 0 ||
586 qdisc_qstats_copy(d, sch) < 0)
592 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
594 struct net_device *dev = qdisc_dev(sch);
600 /* Walk hierarchy with a virtual class per tc */
601 arg->count = arg->skip;
602 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
603 if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg))
607 /* Pad the values and skip over unused traffic classes */
608 if (ntx < TC_MAX_QUEUE) {
609 arg->count = TC_MAX_QUEUE;
613 /* Reset offset, sort out remaining per-queue qdiscs */
614 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
615 if (arg->fn(sch, ntx + 1, arg) < 0) {
623 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
626 return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
629 static const struct Qdisc_class_ops mqprio_class_ops = {
630 .graft = mqprio_graft,
634 .dump = mqprio_dump_class,
635 .dump_stats = mqprio_dump_class_stats,
636 .select_queue = mqprio_select_queue,
639 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
640 .cl_ops = &mqprio_class_ops,
642 .priv_size = sizeof(struct mqprio_sched),
644 .destroy = mqprio_destroy,
645 .attach = mqprio_attach,
646 .change_real_num_tx = mq_change_real_num_tx,
648 .owner = THIS_MODULE,
651 static int __init mqprio_module_init(void)
653 return register_qdisc(&mqprio_qdisc_ops);
656 static void __exit mqprio_module_exit(void)
658 unregister_qdisc(&mqprio_qdisc_ops);
661 module_init(mqprio_module_init);
662 module_exit(mqprio_module_exit);
664 MODULE_LICENSE("GPL");