1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_mqprio.c
4  *
5  * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6  */
7 
8 #include <linux/ethtool_netlink.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/module.h>
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
18 #include <net/sch_generic.h>
19 #include <net/pkt_cls.h>
20 
21 #include "sch_mqprio_lib.h"
22 
23 struct mqprio_sched {
24 	struct Qdisc		**qdiscs;
25 	u16 mode;
26 	u16 shaper;
27 	int hw_offload;
28 	u32 flags;
29 	u64 min_rate[TC_QOPT_MAX_QUEUE];
30 	u64 max_rate[TC_QOPT_MAX_QUEUE];
31 	u32 fp[TC_QOPT_MAX_QUEUE];
32 };
33 
mqprio_enable_offload(struct Qdisc * sch,const struct tc_mqprio_qopt * qopt,struct netlink_ext_ack * extack)34 static int mqprio_enable_offload(struct Qdisc *sch,
35 				 const struct tc_mqprio_qopt *qopt,
36 				 struct netlink_ext_ack *extack)
37 {
38 	struct mqprio_sched *priv = qdisc_priv(sch);
39 	struct net_device *dev = qdisc_dev(sch);
40 	struct tc_mqprio_qopt_offload mqprio = {
41 		.qopt = *qopt,
42 		.extack = extack,
43 	};
44 	int err, i;
45 
46 	switch (priv->mode) {
47 	case TC_MQPRIO_MODE_DCB:
48 		if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
49 			return -EINVAL;
50 		break;
51 	case TC_MQPRIO_MODE_CHANNEL:
52 		mqprio.flags = priv->flags;
53 		if (priv->flags & TC_MQPRIO_F_MODE)
54 			mqprio.mode = priv->mode;
55 		if (priv->flags & TC_MQPRIO_F_SHAPER)
56 			mqprio.shaper = priv->shaper;
57 		if (priv->flags & TC_MQPRIO_F_MIN_RATE)
58 			for (i = 0; i < mqprio.qopt.num_tc; i++)
59 				mqprio.min_rate[i] = priv->min_rate[i];
60 		if (priv->flags & TC_MQPRIO_F_MAX_RATE)
61 			for (i = 0; i < mqprio.qopt.num_tc; i++)
62 				mqprio.max_rate[i] = priv->max_rate[i];
63 		break;
64 	default:
65 		return -EINVAL;
66 	}
67 
68 	mqprio_fp_to_offload(priv->fp, &mqprio);
69 
70 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
71 					    &mqprio);
72 	if (err)
73 		return err;
74 
75 	priv->hw_offload = mqprio.qopt.hw;
76 
77 	return 0;
78 }
79 
mqprio_disable_offload(struct Qdisc * sch)80 static void mqprio_disable_offload(struct Qdisc *sch)
81 {
82 	struct tc_mqprio_qopt_offload mqprio = { { 0 } };
83 	struct mqprio_sched *priv = qdisc_priv(sch);
84 	struct net_device *dev = qdisc_dev(sch);
85 
86 	switch (priv->mode) {
87 	case TC_MQPRIO_MODE_DCB:
88 	case TC_MQPRIO_MODE_CHANNEL:
89 		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
90 					      &mqprio);
91 		break;
92 	}
93 }
94 
mqprio_destroy(struct Qdisc * sch)95 static void mqprio_destroy(struct Qdisc *sch)
96 {
97 	struct net_device *dev = qdisc_dev(sch);
98 	struct mqprio_sched *priv = qdisc_priv(sch);
99 	unsigned int ntx;
100 
101 	if (priv->qdiscs) {
102 		for (ntx = 0;
103 		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
104 		     ntx++)
105 			qdisc_put(priv->qdiscs[ntx]);
106 		kfree(priv->qdiscs);
107 	}
108 
109 	if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc)
110 		mqprio_disable_offload(sch);
111 	else
112 		netdev_set_num_tc(dev, 0);
113 }
114 
mqprio_parse_opt(struct net_device * dev,struct tc_mqprio_qopt * qopt,const struct tc_mqprio_caps * caps,struct netlink_ext_ack * extack)115 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
116 			    const struct tc_mqprio_caps *caps,
117 			    struct netlink_ext_ack *extack)
118 {
119 	int err;
120 
121 	/* Limit qopt->hw to maximum supported offload value.  Drivers have
122 	 * the option of overriding this later if they don't support the a
123 	 * given offload type.
124 	 */
125 	if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
126 		qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
127 
128 	/* If hardware offload is requested, we will leave 3 options to the
129 	 * device driver:
130 	 * - populate the queue counts itself (and ignore what was requested)
131 	 * - validate the provided queue counts by itself (and apply them)
132 	 * - request queue count validation here (and apply them)
133 	 */
134 	err = mqprio_validate_qopt(dev, qopt,
135 				   !qopt->hw || caps->validate_queue_counts,
136 				   false, extack);
137 	if (err)
138 		return err;
139 
140 	/* If ndo_setup_tc is not present then hardware doesn't support offload
141 	 * and we should return an error.
142 	 */
143 	if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) {
144 		NL_SET_ERR_MSG(extack,
145 			       "Device does not support hardware offload");
146 		return -EINVAL;
147 	}
148 
149 	return 0;
150 }
151 
152 static const struct
153 nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
154 	[TCA_MQPRIO_TC_ENTRY_INDEX]	= NLA_POLICY_MAX(NLA_U32,
155 							 TC_QOPT_MAX_QUEUE),
156 	[TCA_MQPRIO_TC_ENTRY_FP]	= NLA_POLICY_RANGE(NLA_U32,
157 							   TC_FP_EXPRESS,
158 							   TC_FP_PREEMPTIBLE),
159 };
160 
161 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
162 	[TCA_MQPRIO_MODE]	= { .len = sizeof(u16) },
163 	[TCA_MQPRIO_SHAPER]	= { .len = sizeof(u16) },
164 	[TCA_MQPRIO_MIN_RATE64]	= { .type = NLA_NESTED },
165 	[TCA_MQPRIO_MAX_RATE64]	= { .type = NLA_NESTED },
166 	[TCA_MQPRIO_TC_ENTRY]	= { .type = NLA_NESTED },
167 };
168 
mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE],struct nlattr * opt,unsigned long * seen_tcs,struct netlink_ext_ack * extack)169 static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE],
170 				 struct nlattr *opt,
171 				 unsigned long *seen_tcs,
172 				 struct netlink_ext_ack *extack)
173 {
174 	struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1];
175 	int err, tc;
176 
177 	err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt,
178 			       mqprio_tc_entry_policy, extack);
179 	if (err < 0)
180 		return err;
181 
182 	if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) {
183 		NL_SET_ERR_MSG(extack, "TC entry index missing");
184 		return -EINVAL;
185 	}
186 
187 	tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]);
188 	if (*seen_tcs & BIT(tc)) {
189 		NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX],
190 				    "Duplicate tc entry");
191 		return -EINVAL;
192 	}
193 
194 	*seen_tcs |= BIT(tc);
195 
196 	if (tb[TCA_MQPRIO_TC_ENTRY_FP])
197 		fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]);
198 
199 	return 0;
200 }
201 
mqprio_parse_tc_entries(struct Qdisc * sch,struct nlattr * nlattr_opt,int nlattr_opt_len,struct netlink_ext_ack * extack)202 static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt,
203 				   int nlattr_opt_len,
204 				   struct netlink_ext_ack *extack)
205 {
206 	struct mqprio_sched *priv = qdisc_priv(sch);
207 	struct net_device *dev = qdisc_dev(sch);
208 	bool have_preemption = false;
209 	unsigned long seen_tcs = 0;
210 	u32 fp[TC_QOPT_MAX_QUEUE];
211 	struct nlattr *n;
212 	int tc, rem;
213 	int err = 0;
214 
215 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
216 		fp[tc] = priv->fp[tc];
217 
218 	nla_for_each_attr_type(n, TCA_MQPRIO_TC_ENTRY, nlattr_opt,
219 			       nlattr_opt_len, rem) {
220 		err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack);
221 		if (err)
222 			goto out;
223 	}
224 
225 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
226 		priv->fp[tc] = fp[tc];
227 		if (fp[tc] == TC_FP_PREEMPTIBLE)
228 			have_preemption = true;
229 	}
230 
231 	if (have_preemption && !ethtool_dev_mm_supported(dev)) {
232 		NL_SET_ERR_MSG(extack, "Device does not support preemption");
233 		return -EOPNOTSUPP;
234 	}
235 out:
236 	return err;
237 }
238 
239 /* Parse the other netlink attributes that represent the payload of
240  * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt.
241  */
mqprio_parse_nlattr(struct Qdisc * sch,struct tc_mqprio_qopt * qopt,struct nlattr * opt,struct netlink_ext_ack * extack)242 static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
243 			       struct nlattr *opt,
244 			       struct netlink_ext_ack *extack)
245 {
246 	struct nlattr *nlattr_opt = nla_data(opt) + NLA_ALIGN(sizeof(*qopt));
247 	int nlattr_opt_len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
248 	struct mqprio_sched *priv = qdisc_priv(sch);
249 	struct nlattr *tb[TCA_MQPRIO_MAX + 1] = {};
250 	struct nlattr *attr;
251 	int i, rem, err;
252 
253 	if (nlattr_opt_len >= nla_attr_size(0)) {
254 		err = nla_parse_deprecated(tb, TCA_MQPRIO_MAX, nlattr_opt,
255 					   nlattr_opt_len, mqprio_policy,
256 					   NULL);
257 		if (err < 0)
258 			return err;
259 	}
260 
261 	if (!qopt->hw) {
262 		NL_SET_ERR_MSG(extack,
263 			       "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode");
264 		return -EINVAL;
265 	}
266 
267 	if (tb[TCA_MQPRIO_MODE]) {
268 		priv->flags |= TC_MQPRIO_F_MODE;
269 		priv->mode = nla_get_u16(tb[TCA_MQPRIO_MODE]);
270 	}
271 
272 	if (tb[TCA_MQPRIO_SHAPER]) {
273 		priv->flags |= TC_MQPRIO_F_SHAPER;
274 		priv->shaper = nla_get_u16(tb[TCA_MQPRIO_SHAPER]);
275 	}
276 
277 	if (tb[TCA_MQPRIO_MIN_RATE64]) {
278 		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
279 			NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64],
280 					    "min_rate accepted only when shaper is in bw_rlimit mode");
281 			return -EINVAL;
282 		}
283 		i = 0;
284 		nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
285 				    rem) {
286 			if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
287 				NL_SET_ERR_MSG_ATTR(extack, attr,
288 						    "Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
289 				return -EINVAL;
290 			}
291 
292 			if (nla_len(attr) != sizeof(u64)) {
293 				NL_SET_ERR_MSG_ATTR(extack, attr,
294 						    "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
295 				return -EINVAL;
296 			}
297 
298 			if (i >= qopt->num_tc)
299 				break;
300 			priv->min_rate[i] = nla_get_u64(attr);
301 			i++;
302 		}
303 		priv->flags |= TC_MQPRIO_F_MIN_RATE;
304 	}
305 
306 	if (tb[TCA_MQPRIO_MAX_RATE64]) {
307 		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
308 			NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64],
309 					    "max_rate accepted only when shaper is in bw_rlimit mode");
310 			return -EINVAL;
311 		}
312 		i = 0;
313 		nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
314 				    rem) {
315 			if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
316 				NL_SET_ERR_MSG_ATTR(extack, attr,
317 						    "Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
318 				return -EINVAL;
319 			}
320 
321 			if (nla_len(attr) != sizeof(u64)) {
322 				NL_SET_ERR_MSG_ATTR(extack, attr,
323 						    "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
324 				return -EINVAL;
325 			}
326 
327 			if (i >= qopt->num_tc)
328 				break;
329 			priv->max_rate[i] = nla_get_u64(attr);
330 			i++;
331 		}
332 		priv->flags |= TC_MQPRIO_F_MAX_RATE;
333 	}
334 
335 	if (tb[TCA_MQPRIO_TC_ENTRY]) {
336 		err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len,
337 					      extack);
338 		if (err)
339 			return err;
340 	}
341 
342 	return 0;
343 }
344 
mqprio_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)345 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
346 		       struct netlink_ext_ack *extack)
347 {
348 	struct net_device *dev = qdisc_dev(sch);
349 	struct mqprio_sched *priv = qdisc_priv(sch);
350 	struct netdev_queue *dev_queue;
351 	struct Qdisc *qdisc;
352 	int i, err = -EOPNOTSUPP;
353 	struct tc_mqprio_qopt *qopt = NULL;
354 	struct tc_mqprio_caps caps;
355 	int len, tc;
356 
357 	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
358 	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
359 
360 	if (sch->parent != TC_H_ROOT)
361 		return -EOPNOTSUPP;
362 
363 	if (!netif_is_multiqueue(dev))
364 		return -EOPNOTSUPP;
365 
366 	/* make certain can allocate enough classids to handle queues */
367 	if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
368 		return -ENOMEM;
369 
370 	if (!opt || nla_len(opt) < sizeof(*qopt))
371 		return -EINVAL;
372 
373 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
374 		priv->fp[tc] = TC_FP_EXPRESS;
375 
376 	qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO,
377 				 &caps, sizeof(caps));
378 
379 	qopt = nla_data(opt);
380 	if (mqprio_parse_opt(dev, qopt, &caps, extack))
381 		return -EINVAL;
382 
383 	len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
384 	if (len > 0) {
385 		err = mqprio_parse_nlattr(sch, qopt, opt, extack);
386 		if (err)
387 			return err;
388 	}
389 
390 	/* pre-allocate qdisc, attachment can't fail */
391 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
392 			       GFP_KERNEL);
393 	if (!priv->qdiscs)
394 		return -ENOMEM;
395 
396 	for (i = 0; i < dev->num_tx_queues; i++) {
397 		dev_queue = netdev_get_tx_queue(dev, i);
398 		qdisc = qdisc_create_dflt(dev_queue,
399 					  get_default_qdisc_ops(dev, i),
400 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
401 						    TC_H_MIN(i + 1)), extack);
402 		if (!qdisc)
403 			return -ENOMEM;
404 
405 		priv->qdiscs[i] = qdisc;
406 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
407 	}
408 
409 	/* If the mqprio options indicate that hardware should own
410 	 * the queue mapping then run ndo_setup_tc otherwise use the
411 	 * supplied and verified mapping
412 	 */
413 	if (qopt->hw) {
414 		err = mqprio_enable_offload(sch, qopt, extack);
415 		if (err)
416 			return err;
417 	} else {
418 		netdev_set_num_tc(dev, qopt->num_tc);
419 		for (i = 0; i < qopt->num_tc; i++)
420 			netdev_set_tc_queue(dev, i,
421 					    qopt->count[i], qopt->offset[i]);
422 	}
423 
424 	/* Always use supplied priority mappings */
425 	for (i = 0; i < TC_BITMASK + 1; i++)
426 		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
427 
428 	sch->flags |= TCQ_F_MQROOT;
429 	return 0;
430 }
431 
mqprio_attach(struct Qdisc * sch)432 static void mqprio_attach(struct Qdisc *sch)
433 {
434 	struct net_device *dev = qdisc_dev(sch);
435 	struct mqprio_sched *priv = qdisc_priv(sch);
436 	struct Qdisc *qdisc, *old;
437 	unsigned int ntx;
438 
439 	/* Attach underlying qdisc */
440 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
441 		qdisc = priv->qdiscs[ntx];
442 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
443 		if (old)
444 			qdisc_put(old);
445 		if (ntx < dev->real_num_tx_queues)
446 			qdisc_hash_add(qdisc, false);
447 	}
448 	kfree(priv->qdiscs);
449 	priv->qdiscs = NULL;
450 }
451 
mqprio_queue_get(struct Qdisc * sch,unsigned long cl)452 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
453 					     unsigned long cl)
454 {
455 	struct net_device *dev = qdisc_dev(sch);
456 	unsigned long ntx = cl - 1;
457 
458 	if (ntx >= dev->num_tx_queues)
459 		return NULL;
460 	return netdev_get_tx_queue(dev, ntx);
461 }
462 
mqprio_graft(struct Qdisc * sch,unsigned long cl,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)463 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
464 			struct Qdisc **old, struct netlink_ext_ack *extack)
465 {
466 	struct net_device *dev = qdisc_dev(sch);
467 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
468 
469 	if (!dev_queue)
470 		return -EINVAL;
471 
472 	if (dev->flags & IFF_UP)
473 		dev_deactivate(dev);
474 
475 	*old = dev_graft_qdisc(dev_queue, new);
476 
477 	if (new)
478 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
479 
480 	if (dev->flags & IFF_UP)
481 		dev_activate(dev);
482 
483 	return 0;
484 }
485 
dump_rates(struct mqprio_sched * priv,struct tc_mqprio_qopt * opt,struct sk_buff * skb)486 static int dump_rates(struct mqprio_sched *priv,
487 		      struct tc_mqprio_qopt *opt, struct sk_buff *skb)
488 {
489 	struct nlattr *nest;
490 	int i;
491 
492 	if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
493 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
494 		if (!nest)
495 			goto nla_put_failure;
496 
497 		for (i = 0; i < opt->num_tc; i++) {
498 			if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
499 				    sizeof(priv->min_rate[i]),
500 				    &priv->min_rate[i]))
501 				goto nla_put_failure;
502 		}
503 		nla_nest_end(skb, nest);
504 	}
505 
506 	if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
507 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
508 		if (!nest)
509 			goto nla_put_failure;
510 
511 		for (i = 0; i < opt->num_tc; i++) {
512 			if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
513 				    sizeof(priv->max_rate[i]),
514 				    &priv->max_rate[i]))
515 				goto nla_put_failure;
516 		}
517 		nla_nest_end(skb, nest);
518 	}
519 	return 0;
520 
521 nla_put_failure:
522 	nla_nest_cancel(skb, nest);
523 	return -1;
524 }
525 
mqprio_dump_tc_entries(struct mqprio_sched * priv,struct sk_buff * skb)526 static int mqprio_dump_tc_entries(struct mqprio_sched *priv,
527 				  struct sk_buff *skb)
528 {
529 	struct nlattr *n;
530 	int tc;
531 
532 	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
533 		n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY);
534 		if (!n)
535 			return -EMSGSIZE;
536 
537 		if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc))
538 			goto nla_put_failure;
539 
540 		if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc]))
541 			goto nla_put_failure;
542 
543 		nla_nest_end(skb, n);
544 	}
545 
546 	return 0;
547 
548 nla_put_failure:
549 	nla_nest_cancel(skb, n);
550 	return -EMSGSIZE;
551 }
552 
mqprio_dump(struct Qdisc * sch,struct sk_buff * skb)553 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
554 {
555 	struct net_device *dev = qdisc_dev(sch);
556 	struct mqprio_sched *priv = qdisc_priv(sch);
557 	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
558 	struct tc_mqprio_qopt opt = { 0 };
559 	struct Qdisc *qdisc;
560 	unsigned int ntx;
561 
562 	sch->q.qlen = 0;
563 	gnet_stats_basic_sync_init(&sch->bstats);
564 	memset(&sch->qstats, 0, sizeof(sch->qstats));
565 
566 	/* MQ supports lockless qdiscs. However, statistics accounting needs
567 	 * to account for all, none, or a mix of locked and unlocked child
568 	 * qdiscs. Percpu stats are added to counters in-band and locking
569 	 * qdisc totals are added at end.
570 	 */
571 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
572 		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
573 		spin_lock_bh(qdisc_lock(qdisc));
574 
575 		gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
576 				     &qdisc->bstats, false);
577 		gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
578 				     &qdisc->qstats);
579 		sch->q.qlen += qdisc_qlen(qdisc);
580 
581 		spin_unlock_bh(qdisc_lock(qdisc));
582 	}
583 
584 	mqprio_qopt_reconstruct(dev, &opt);
585 	opt.hw = priv->hw_offload;
586 
587 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
588 		goto nla_put_failure;
589 
590 	if ((priv->flags & TC_MQPRIO_F_MODE) &&
591 	    nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
592 		goto nla_put_failure;
593 
594 	if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
595 	    nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
596 		goto nla_put_failure;
597 
598 	if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
599 	     priv->flags & TC_MQPRIO_F_MAX_RATE) &&
600 	    (dump_rates(priv, &opt, skb) != 0))
601 		goto nla_put_failure;
602 
603 	if (mqprio_dump_tc_entries(priv, skb))
604 		goto nla_put_failure;
605 
606 	return nla_nest_end(skb, nla);
607 nla_put_failure:
608 	nlmsg_trim(skb, nla);
609 	return -1;
610 }
611 
mqprio_leaf(struct Qdisc * sch,unsigned long cl)612 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
613 {
614 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
615 
616 	if (!dev_queue)
617 		return NULL;
618 
619 	return rtnl_dereference(dev_queue->qdisc_sleeping);
620 }
621 
mqprio_find(struct Qdisc * sch,u32 classid)622 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
623 {
624 	struct net_device *dev = qdisc_dev(sch);
625 	unsigned int ntx = TC_H_MIN(classid);
626 
627 	/* There are essentially two regions here that have valid classid
628 	 * values. The first region will have a classid value of 1 through
629 	 * num_tx_queues. All of these are backed by actual Qdiscs.
630 	 */
631 	if (ntx < TC_H_MIN_PRIORITY)
632 		return (ntx <= dev->num_tx_queues) ? ntx : 0;
633 
634 	/* The second region represents the hardware traffic classes. These
635 	 * are represented by classid values of TC_H_MIN_PRIORITY through
636 	 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
637 	 */
638 	return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
639 }
640 
mqprio_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)641 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
642 			 struct sk_buff *skb, struct tcmsg *tcm)
643 {
644 	if (cl < TC_H_MIN_PRIORITY) {
645 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
646 		struct net_device *dev = qdisc_dev(sch);
647 		int tc = netdev_txq_to_tc(dev, cl - 1);
648 
649 		tcm->tcm_parent = (tc < 0) ? 0 :
650 			TC_H_MAKE(TC_H_MAJ(sch->handle),
651 				  TC_H_MIN(tc + TC_H_MIN_PRIORITY));
652 		tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
653 	} else {
654 		tcm->tcm_parent = TC_H_ROOT;
655 		tcm->tcm_info = 0;
656 	}
657 	tcm->tcm_handle |= TC_H_MIN(cl);
658 	return 0;
659 }
660 
mqprio_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)661 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
662 				   struct gnet_dump *d)
663 	__releases(d->lock)
664 	__acquires(d->lock)
665 {
666 	if (cl >= TC_H_MIN_PRIORITY) {
667 		int i;
668 		__u32 qlen;
669 		struct gnet_stats_queue qstats = {0};
670 		struct gnet_stats_basic_sync bstats;
671 		struct net_device *dev = qdisc_dev(sch);
672 		struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
673 
674 		gnet_stats_basic_sync_init(&bstats);
675 		/* Drop lock here it will be reclaimed before touching
676 		 * statistics this is required because the d->lock we
677 		 * hold here is the look on dev_queue->qdisc_sleeping
678 		 * also acquired below.
679 		 */
680 		if (d->lock)
681 			spin_unlock_bh(d->lock);
682 
683 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
684 			struct netdev_queue *q = netdev_get_tx_queue(dev, i);
685 			struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
686 
687 			spin_lock_bh(qdisc_lock(qdisc));
688 
689 			gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
690 					     &qdisc->bstats, false);
691 			gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
692 					     &qdisc->qstats);
693 			sch->q.qlen += qdisc_qlen(qdisc);
694 
695 			spin_unlock_bh(qdisc_lock(qdisc));
696 		}
697 		qlen = qdisc_qlen(sch) + qstats.qlen;
698 
699 		/* Reclaim root sleeping lock before completing stats */
700 		if (d->lock)
701 			spin_lock_bh(d->lock);
702 		if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
703 		    gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
704 			return -1;
705 	} else {
706 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
707 
708 		sch = rtnl_dereference(dev_queue->qdisc_sleeping);
709 		if (gnet_stats_copy_basic(d, sch->cpu_bstats,
710 					  &sch->bstats, true) < 0 ||
711 		    qdisc_qstats_copy(d, sch) < 0)
712 			return -1;
713 	}
714 	return 0;
715 }
716 
mqprio_walk(struct Qdisc * sch,struct qdisc_walker * arg)717 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
718 {
719 	struct net_device *dev = qdisc_dev(sch);
720 	unsigned long ntx;
721 
722 	if (arg->stop)
723 		return;
724 
725 	/* Walk hierarchy with a virtual class per tc */
726 	arg->count = arg->skip;
727 	for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
728 		if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg))
729 			return;
730 	}
731 
732 	/* Pad the values and skip over unused traffic classes */
733 	if (ntx < TC_MAX_QUEUE) {
734 		arg->count = TC_MAX_QUEUE;
735 		ntx = TC_MAX_QUEUE;
736 	}
737 
738 	/* Reset offset, sort out remaining per-queue qdiscs */
739 	for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
740 		if (arg->fn(sch, ntx + 1, arg) < 0) {
741 			arg->stop = 1;
742 			return;
743 		}
744 		arg->count++;
745 	}
746 }
747 
mqprio_select_queue(struct Qdisc * sch,struct tcmsg * tcm)748 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
749 						struct tcmsg *tcm)
750 {
751 	return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
752 }
753 
754 static const struct Qdisc_class_ops mqprio_class_ops = {
755 	.graft		= mqprio_graft,
756 	.leaf		= mqprio_leaf,
757 	.find		= mqprio_find,
758 	.walk		= mqprio_walk,
759 	.dump		= mqprio_dump_class,
760 	.dump_stats	= mqprio_dump_class_stats,
761 	.select_queue	= mqprio_select_queue,
762 };
763 
764 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
765 	.cl_ops		= &mqprio_class_ops,
766 	.id		= "mqprio",
767 	.priv_size	= sizeof(struct mqprio_sched),
768 	.init		= mqprio_init,
769 	.destroy	= mqprio_destroy,
770 	.attach		= mqprio_attach,
771 	.change_real_num_tx = mq_change_real_num_tx,
772 	.dump		= mqprio_dump,
773 	.owner		= THIS_MODULE,
774 };
775 MODULE_ALIAS_NET_SCH("mqprio");
776 
mqprio_module_init(void)777 static int __init mqprio_module_init(void)
778 {
779 	return register_qdisc(&mqprio_qdisc_ops);
780 }
781 
mqprio_module_exit(void)782 static void __exit mqprio_module_exit(void)
783 {
784 	unregister_qdisc(&mqprio_qdisc_ops);
785 }
786 
787 module_init(mqprio_module_init);
788 module_exit(mqprio_module_exit);
789 
790 MODULE_LICENSE("GPL");
791 MODULE_DESCRIPTION("Classful multiqueue prio qdisc");
792