1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	bool	nonempty;
21 	unsigned long cookie;
22 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23 };
24 
25 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27 #define NET_CLS_ALIAS_PREFIX "net-cls-"
28 #define MODULE_ALIAS_NET_CLS(kind)	MODULE_ALIAS(NET_CLS_ALIAS_PREFIX kind)
29 
30 struct tcf_block_ext_info {
31 	enum flow_block_binder_type binder_type;
32 	tcf_chain_head_change_t *chain_head_change;
33 	void *chain_head_change_priv;
34 	u32 block_index;
35 };
36 
37 struct tcf_qevent {
38 	struct tcf_block	*block;
39 	struct tcf_block_ext_info info;
40 	struct tcf_proto __rcu *filter_chain;
41 };
42 
43 struct tcf_block_cb;
44 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
45 
46 #ifdef CONFIG_NET_CLS
47 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
48 				       u32 chain_index);
49 void tcf_chain_put_by_act(struct tcf_chain *chain);
50 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
51 				     struct tcf_chain *chain);
52 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
53 				     struct tcf_proto *tp);
54 void tcf_block_netif_keep_dst(struct tcf_block *block);
55 int tcf_block_get(struct tcf_block **p_block,
56 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
57 		  struct netlink_ext_ack *extack);
58 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
59 		      struct tcf_block_ext_info *ei,
60 		      struct netlink_ext_ack *extack);
61 void tcf_block_put(struct tcf_block *block);
62 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
63 		       struct tcf_block_ext_info *ei);
64 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
65 		     int police, struct tcf_proto *tp, u32 handle, bool used_action_miss);
66 
tcf_block_shared(struct tcf_block * block)67 static inline bool tcf_block_shared(struct tcf_block *block)
68 {
69 	return block->index;
70 }
71 
tcf_block_non_null_shared(struct tcf_block * block)72 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
73 {
74 	return block && block->index;
75 }
76 
77 #ifdef CONFIG_NET_CLS_ACT
78 DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
79 
tcf_block_bypass_sw(struct tcf_block * block)80 static inline bool tcf_block_bypass_sw(struct tcf_block *block)
81 {
82 	return block && block->bypass_wanted;
83 }
84 #endif
85 
tcf_block_q(struct tcf_block * block)86 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
87 {
88 	WARN_ON(tcf_block_shared(block));
89 	return block->q;
90 }
91 
92 int tcf_classify(struct sk_buff *skb,
93 		 const struct tcf_block *block,
94 		 const struct tcf_proto *tp, struct tcf_result *res,
95 		 bool compat_mode);
96 
tc_cls_stats_dump(struct tcf_proto * tp,struct tcf_walker * arg,void * filter)97 static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
98 				     struct tcf_walker *arg,
99 				     void *filter)
100 {
101 	if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
102 		arg->stop = 1;
103 		return false;
104 	}
105 
106 	arg->count++;
107 	return true;
108 }
109 
110 #else
tcf_block_shared(struct tcf_block * block)111 static inline bool tcf_block_shared(struct tcf_block *block)
112 {
113 	return false;
114 }
115 
tcf_block_non_null_shared(struct tcf_block * block)116 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
117 {
118 	return false;
119 }
120 
121 static inline
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)122 int tcf_block_get(struct tcf_block **p_block,
123 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
124 		  struct netlink_ext_ack *extack)
125 {
126 	return 0;
127 }
128 
129 static inline
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)130 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
131 		      struct tcf_block_ext_info *ei,
132 		      struct netlink_ext_ack *extack)
133 {
134 	return 0;
135 }
136 
tcf_block_put(struct tcf_block * block)137 static inline void tcf_block_put(struct tcf_block *block)
138 {
139 }
140 
141 static inline
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)142 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
143 		       struct tcf_block_ext_info *ei)
144 {
145 }
146 
tcf_block_q(struct tcf_block * block)147 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
148 {
149 	return NULL;
150 }
151 
tcf_classify(struct sk_buff * skb,const struct tcf_block * block,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)152 static inline int tcf_classify(struct sk_buff *skb,
153 			       const struct tcf_block *block,
154 			       const struct tcf_proto *tp,
155 			       struct tcf_result *res, bool compat_mode)
156 {
157 	return TC_ACT_UNSPEC;
158 }
159 
160 #endif
161 
162 static inline unsigned long
__cls_set_class(unsigned long * clp,unsigned long cl)163 __cls_set_class(unsigned long *clp, unsigned long cl)
164 {
165 	return xchg(clp, cl);
166 }
167 
168 static inline void
__tcf_bind_filter(struct Qdisc * q,struct tcf_result * r,unsigned long base)169 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
170 {
171 	unsigned long cl;
172 
173 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
174 	cl = __cls_set_class(&r->class, cl);
175 	if (cl)
176 		q->ops->cl_ops->unbind_tcf(q, cl);
177 }
178 
179 static inline void
tcf_bind_filter(struct tcf_proto * tp,struct tcf_result * r,unsigned long base)180 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
181 {
182 	struct Qdisc *q = tp->chain->block->q;
183 
184 	/* Check q as it is not set for shared blocks. In that case,
185 	 * setting class is not supported.
186 	 */
187 	if (!q)
188 		return;
189 	sch_tree_lock(q);
190 	__tcf_bind_filter(q, r, base);
191 	sch_tree_unlock(q);
192 }
193 
194 static inline void
__tcf_unbind_filter(struct Qdisc * q,struct tcf_result * r)195 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
196 {
197 	unsigned long cl;
198 
199 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
200 		q->ops->cl_ops->unbind_tcf(q, cl);
201 }
202 
203 static inline void
tcf_unbind_filter(struct tcf_proto * tp,struct tcf_result * r)204 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
205 {
206 	struct Qdisc *q = tp->chain->block->q;
207 
208 	if (!q)
209 		return;
210 	__tcf_unbind_filter(q, r);
211 }
212 
tc_cls_bind_class(u32 classid,unsigned long cl,void * q,struct tcf_result * res,unsigned long base)213 static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
214 				     void *q, struct tcf_result *res,
215 				     unsigned long base)
216 {
217 	if (res->classid == classid) {
218 		if (cl)
219 			__tcf_bind_filter(q, res, base);
220 		else
221 			__tcf_unbind_filter(q, res);
222 	}
223 }
224 
225 struct tcf_exts {
226 #ifdef CONFIG_NET_CLS_ACT
227 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
228 	int nr_actions;
229 	struct tc_action **actions;
230 	struct net	*net;
231 	netns_tracker	ns_tracker;
232 	struct tcf_exts_miss_cookie_node *miss_cookie_node;
233 #endif
234 	/* Map to export classifier specific extension TLV types to the
235 	 * generic extensions API. Unsupported extensions must be set to 0.
236 	 */
237 	int action;
238 	int police;
239 };
240 
tcf_exts_init(struct tcf_exts * exts,struct net * net,int action,int police)241 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
242 				int action, int police)
243 {
244 #ifdef CONFIG_NET_CLS
245 	return tcf_exts_init_ex(exts, net, action, police, NULL, 0, false);
246 #else
247 	return -EOPNOTSUPP;
248 #endif
249 }
250 
251 /* Return false if the netns is being destroyed in cleanup_net(). Callers
252  * need to do cleanup synchronously in this case, otherwise may race with
253  * tc_action_net_exit(). Return true for other cases.
254  */
tcf_exts_get_net(struct tcf_exts * exts)255 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
256 {
257 #ifdef CONFIG_NET_CLS_ACT
258 	exts->net = maybe_get_net(exts->net);
259 	if (exts->net)
260 		netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
261 	return exts->net != NULL;
262 #else
263 	return true;
264 #endif
265 }
266 
tcf_exts_put_net(struct tcf_exts * exts)267 static inline void tcf_exts_put_net(struct tcf_exts *exts)
268 {
269 #ifdef CONFIG_NET_CLS_ACT
270 	if (exts->net)
271 		put_net_track(exts->net, &exts->ns_tracker);
272 #endif
273 }
274 
275 #ifdef CONFIG_NET_CLS_ACT
276 #define tcf_exts_for_each_action(i, a, exts) \
277 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
278 #else
279 #define tcf_exts_for_each_action(i, a, exts) \
280 	for (; 0; (void)(i), (void)(a), (void)(exts))
281 #endif
282 
283 #define tcf_act_for_each_action(i, a, actions) \
284 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
285 
tc_act_in_hw(struct tc_action * act)286 static inline bool tc_act_in_hw(struct tc_action *act)
287 {
288 	return !!act->in_hw_count;
289 }
290 
291 static inline void
tcf_exts_hw_stats_update(const struct tcf_exts * exts,struct flow_stats * stats,bool use_act_stats)292 tcf_exts_hw_stats_update(const struct tcf_exts *exts,
293 			 struct flow_stats *stats,
294 			 bool use_act_stats)
295 {
296 #ifdef CONFIG_NET_CLS_ACT
297 	int i;
298 
299 	for (i = 0; i < exts->nr_actions; i++) {
300 		struct tc_action *a = exts->actions[i];
301 
302 		if (use_act_stats || tc_act_in_hw(a)) {
303 			if (!tcf_action_update_hw_stats(a))
304 				continue;
305 		}
306 
307 		preempt_disable();
308 		tcf_action_stats_update(a, stats->bytes, stats->pkts, stats->drops,
309 					stats->lastused, true);
310 		preempt_enable();
311 
312 		a->used_hw_stats = stats->used_hw_stats;
313 		a->used_hw_stats_valid = stats->used_hw_stats_valid;
314 	}
315 #endif
316 }
317 
318 /**
319  * tcf_exts_has_actions - check if at least one action is present
320  * @exts: tc filter extensions handle
321  *
322  * Returns true if at least one action is present.
323  */
tcf_exts_has_actions(struct tcf_exts * exts)324 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
325 {
326 #ifdef CONFIG_NET_CLS_ACT
327 	return exts->nr_actions;
328 #else
329 	return false;
330 #endif
331 }
332 
333 /**
334  * tcf_exts_exec - execute tc filter extensions
335  * @skb: socket buffer
336  * @exts: tc filter extensions handle
337  * @res: desired result
338  *
339  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
340  * a negative number if the filter must be considered unmatched or
341  * a positive action code (TC_ACT_*) which must be returned to the
342  * underlying layer.
343  */
344 static inline int
tcf_exts_exec(struct sk_buff * skb,struct tcf_exts * exts,struct tcf_result * res)345 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
346 	      struct tcf_result *res)
347 {
348 #ifdef CONFIG_NET_CLS_ACT
349 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
350 #endif
351 	return TC_ACT_OK;
352 }
353 
354 static inline int
tcf_exts_exec_ex(struct sk_buff * skb,struct tcf_exts * exts,int act_index,struct tcf_result * res)355 tcf_exts_exec_ex(struct sk_buff *skb, struct tcf_exts *exts, int act_index,
356 		 struct tcf_result *res)
357 {
358 #ifdef CONFIG_NET_CLS_ACT
359 	return tcf_action_exec(skb, exts->actions + act_index,
360 			       exts->nr_actions - act_index, res);
361 #else
362 	return TC_ACT_OK;
363 #endif
364 }
365 
366 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
367 		      struct nlattr **tb, struct nlattr *rate_tlv,
368 		      struct tcf_exts *exts, u32 flags,
369 		      struct netlink_ext_ack *extack);
370 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
371 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
372 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
373 void tcf_exts_destroy(struct tcf_exts *exts);
374 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
375 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
376 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
377 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
378 
379 /**
380  * struct tcf_pkt_info - packet information
381  *
382  * @ptr: start of the pkt data
383  * @nexthdr: offset of the next header
384  */
385 struct tcf_pkt_info {
386 	unsigned char *		ptr;
387 	int			nexthdr;
388 };
389 
390 #ifdef CONFIG_NET_EMATCH
391 
392 struct tcf_ematch_ops;
393 
394 /**
395  * struct tcf_ematch - extended match (ematch)
396  *
397  * @matchid: identifier to allow userspace to reidentify a match
398  * @flags: flags specifying attributes and the relation to other matches
399  * @ops: the operations lookup table of the corresponding ematch module
400  * @datalen: length of the ematch specific configuration data
401  * @data: ematch specific data
402  * @net: the network namespace
403  */
404 struct tcf_ematch {
405 	struct tcf_ematch_ops * ops;
406 	unsigned long		data;
407 	unsigned int		datalen;
408 	u16			matchid;
409 	u16			flags;
410 	struct net		*net;
411 };
412 
tcf_em_is_container(struct tcf_ematch * em)413 static inline int tcf_em_is_container(struct tcf_ematch *em)
414 {
415 	return !em->ops;
416 }
417 
tcf_em_is_simple(struct tcf_ematch * em)418 static inline int tcf_em_is_simple(struct tcf_ematch *em)
419 {
420 	return em->flags & TCF_EM_SIMPLE;
421 }
422 
tcf_em_is_inverted(struct tcf_ematch * em)423 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
424 {
425 	return em->flags & TCF_EM_INVERT;
426 }
427 
tcf_em_last_match(struct tcf_ematch * em)428 static inline int tcf_em_last_match(struct tcf_ematch *em)
429 {
430 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
431 }
432 
tcf_em_early_end(struct tcf_ematch * em,int result)433 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
434 {
435 	if (tcf_em_last_match(em))
436 		return 1;
437 
438 	if (result == 0 && em->flags & TCF_EM_REL_AND)
439 		return 1;
440 
441 	if (result != 0 && em->flags & TCF_EM_REL_OR)
442 		return 1;
443 
444 	return 0;
445 }
446 
447 /**
448  * struct tcf_ematch_tree - ematch tree handle
449  *
450  * @hdr: ematch tree header supplied by userspace
451  * @matches: array of ematches
452  */
453 struct tcf_ematch_tree {
454 	struct tcf_ematch_tree_hdr hdr;
455 	struct tcf_ematch *	matches;
456 
457 };
458 
459 /**
460  * struct tcf_ematch_ops - ematch module operations
461  *
462  * @kind: identifier (kind) of this ematch module
463  * @datalen: length of expected configuration data (optional)
464  * @change: called during validation (optional)
465  * @match: called during ematch tree evaluation, must return 1/0
466  * @destroy: called during destroyage (optional)
467  * @dump: called during dumping process (optional)
468  * @owner: owner, must be set to THIS_MODULE
469  * @link: link to previous/next ematch module (internal use)
470  */
471 struct tcf_ematch_ops {
472 	int			kind;
473 	int			datalen;
474 	int			(*change)(struct net *net, void *,
475 					  int, struct tcf_ematch *);
476 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
477 					 struct tcf_pkt_info *);
478 	void			(*destroy)(struct tcf_ematch *);
479 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
480 	struct module		*owner;
481 	struct list_head	link;
482 };
483 
484 int tcf_em_register(struct tcf_ematch_ops *);
485 void tcf_em_unregister(struct tcf_ematch_ops *);
486 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
487 			 struct tcf_ematch_tree *);
488 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
489 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
490 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
491 			struct tcf_pkt_info *);
492 
493 /**
494  * tcf_em_tree_match - evaluate an ematch tree
495  *
496  * @skb: socket buffer of the packet in question
497  * @tree: ematch tree to be used for evaluation
498  * @info: packet information examined by classifier
499  *
500  * This function matches @skb against the ematch tree in @tree by going
501  * through all ematches respecting their logic relations returning
502  * as soon as the result is obvious.
503  *
504  * Returns 1 if the ematch tree as-one matches, no ematches are configured
505  * or ematch is not enabled in the kernel, otherwise 0 is returned.
506  */
tcf_em_tree_match(struct sk_buff * skb,struct tcf_ematch_tree * tree,struct tcf_pkt_info * info)507 static inline int tcf_em_tree_match(struct sk_buff *skb,
508 				    struct tcf_ematch_tree *tree,
509 				    struct tcf_pkt_info *info)
510 {
511 	if (tree->hdr.nmatches)
512 		return __tcf_em_tree_match(skb, tree, info);
513 	else
514 		return 1;
515 }
516 
517 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
518 
519 #else /* CONFIG_NET_EMATCH */
520 
521 struct tcf_ematch_tree {
522 };
523 
524 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
525 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
526 #define tcf_em_tree_dump(skb, t, tlv) (0)
527 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
528 
529 #endif /* CONFIG_NET_EMATCH */
530 
tcf_get_base_ptr(struct sk_buff * skb,int layer)531 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
532 {
533 	switch (layer) {
534 		case TCF_LAYER_LINK:
535 			return skb_mac_header(skb);
536 		case TCF_LAYER_NETWORK:
537 			return skb_network_header(skb);
538 		case TCF_LAYER_TRANSPORT:
539 			return skb_transport_header(skb);
540 	}
541 
542 	return NULL;
543 }
544 
tcf_valid_offset(const struct sk_buff * skb,const unsigned char * ptr,const int len)545 static inline int tcf_valid_offset(const struct sk_buff *skb,
546 				   const unsigned char *ptr, const int len)
547 {
548 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
549 		      ptr >= skb->head &&
550 		      (ptr <= (ptr + len)));
551 }
552 
553 static inline int
tcf_change_indev(struct net * net,struct nlattr * indev_tlv,struct netlink_ext_ack * extack)554 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
555 		 struct netlink_ext_ack *extack)
556 {
557 	char indev[IFNAMSIZ];
558 	struct net_device *dev;
559 
560 	if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
561 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
562 				    "Interface name too long");
563 		return -EINVAL;
564 	}
565 	dev = __dev_get_by_name(net, indev);
566 	if (!dev) {
567 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
568 				    "Network device not found");
569 		return -ENODEV;
570 	}
571 	return dev->ifindex;
572 }
573 
574 static inline bool
tcf_match_indev(struct sk_buff * skb,int ifindex)575 tcf_match_indev(struct sk_buff *skb, int ifindex)
576 {
577 	if (!ifindex)
578 		return true;
579 	if  (!skb->skb_iif)
580 		return false;
581 	return ifindex == skb->skb_iif;
582 }
583 
584 int tc_setup_offload_action(struct flow_action *flow_action,
585 			    const struct tcf_exts *exts,
586 			    struct netlink_ext_ack *extack);
587 void tc_cleanup_offload_action(struct flow_action *flow_action);
588 int tc_setup_action(struct flow_action *flow_action,
589 		    struct tc_action *actions[],
590 		    u32 miss_cookie_base,
591 		    struct netlink_ext_ack *extack);
592 
593 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
594 		     void *type_data, bool err_stop, bool rtnl_held);
595 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
596 		    enum tc_setup_type type, void *type_data, bool err_stop,
597 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
598 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
599 			enum tc_setup_type type, void *type_data, bool err_stop,
600 			u32 *old_flags, unsigned int *old_in_hw_count,
601 			u32 *new_flags, unsigned int *new_in_hw_count,
602 			bool rtnl_held);
603 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
604 			enum tc_setup_type type, void *type_data, bool err_stop,
605 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
606 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
607 			  bool add, flow_setup_cb_t *cb,
608 			  enum tc_setup_type type, void *type_data,
609 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
610 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
611 
612 #ifdef CONFIG_NET_CLS_ACT
613 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
614 		    enum flow_block_binder_type binder_type,
615 		    struct nlattr *block_index_attr,
616 		    struct netlink_ext_ack *extack);
617 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
618 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
619 			       struct netlink_ext_ack *extack);
620 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
621 				  struct sk_buff **to_free, int *ret);
622 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
623 #else
tcf_qevent_init(struct tcf_qevent * qe,struct Qdisc * sch,enum flow_block_binder_type binder_type,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)624 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
625 				  enum flow_block_binder_type binder_type,
626 				  struct nlattr *block_index_attr,
627 				  struct netlink_ext_ack *extack)
628 {
629 	return 0;
630 }
631 
tcf_qevent_destroy(struct tcf_qevent * qe,struct Qdisc * sch)632 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
633 {
634 }
635 
tcf_qevent_validate_change(struct tcf_qevent * qe,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)636 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
637 					     struct netlink_ext_ack *extack)
638 {
639 	return 0;
640 }
641 
642 static inline struct sk_buff *
tcf_qevent_handle(struct tcf_qevent * qe,struct Qdisc * sch,struct sk_buff * skb,struct sk_buff ** to_free,int * ret)643 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
644 		  struct sk_buff **to_free, int *ret)
645 {
646 	return skb;
647 }
648 
tcf_qevent_dump(struct sk_buff * skb,int attr_name,struct tcf_qevent * qe)649 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
650 {
651 	return 0;
652 }
653 #endif
654 
655 struct tc_cls_u32_knode {
656 	struct tcf_exts *exts;
657 	struct tcf_result *res;
658 	struct tc_u32_sel *sel;
659 	u32 handle;
660 	u32 val;
661 	u32 mask;
662 	u32 link_handle;
663 	u8 fshift;
664 };
665 
666 struct tc_cls_u32_hnode {
667 	u32 handle;
668 	u32 prio;
669 	unsigned int divisor;
670 };
671 
672 enum tc_clsu32_command {
673 	TC_CLSU32_NEW_KNODE,
674 	TC_CLSU32_REPLACE_KNODE,
675 	TC_CLSU32_DELETE_KNODE,
676 	TC_CLSU32_NEW_HNODE,
677 	TC_CLSU32_REPLACE_HNODE,
678 	TC_CLSU32_DELETE_HNODE,
679 };
680 
681 struct tc_cls_u32_offload {
682 	struct flow_cls_common_offload common;
683 	/* knode values */
684 	enum tc_clsu32_command command;
685 	union {
686 		struct tc_cls_u32_knode knode;
687 		struct tc_cls_u32_hnode hnode;
688 	};
689 };
690 
tc_can_offload(const struct net_device * dev)691 static inline bool tc_can_offload(const struct net_device *dev)
692 {
693 	return dev->features & NETIF_F_HW_TC;
694 }
695 
tc_can_offload_extack(const struct net_device * dev,struct netlink_ext_ack * extack)696 static inline bool tc_can_offload_extack(const struct net_device *dev,
697 					 struct netlink_ext_ack *extack)
698 {
699 	bool can = tc_can_offload(dev);
700 
701 	if (!can)
702 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
703 
704 	return can;
705 }
706 
707 static inline bool
tc_cls_can_offload_and_chain0(const struct net_device * dev,struct flow_cls_common_offload * common)708 tc_cls_can_offload_and_chain0(const struct net_device *dev,
709 			      struct flow_cls_common_offload *common)
710 {
711 	if (!tc_can_offload_extack(dev, common->extack))
712 		return false;
713 	if (common->chain_index) {
714 		NL_SET_ERR_MSG(common->extack,
715 			       "Driver supports only offload of chain 0");
716 		return false;
717 	}
718 	return true;
719 }
720 
tc_skip_hw(u32 flags)721 static inline bool tc_skip_hw(u32 flags)
722 {
723 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
724 }
725 
tc_skip_sw(u32 flags)726 static inline bool tc_skip_sw(u32 flags)
727 {
728 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
729 }
730 
731 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
tc_flags_valid(u32 flags)732 static inline bool tc_flags_valid(u32 flags)
733 {
734 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
735 		      TCA_CLS_FLAGS_VERBOSE))
736 		return false;
737 
738 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
739 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
740 		return false;
741 
742 	return true;
743 }
744 
tc_in_hw(u32 flags)745 static inline bool tc_in_hw(u32 flags)
746 {
747 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
748 }
749 
750 static inline void
tc_cls_common_offload_init(struct flow_cls_common_offload * cls_common,const struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)751 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
752 			   const struct tcf_proto *tp, u32 flags,
753 			   struct netlink_ext_ack *extack)
754 {
755 	cls_common->chain_index = tp->chain->index;
756 	cls_common->protocol = tp->protocol;
757 	cls_common->prio = tp->prio >> 16;
758 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
759 		cls_common->extack = extack;
760 }
761 
762 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
tc_skb_ext_alloc(struct sk_buff * skb)763 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
764 {
765 	struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
766 
767 	if (tc_skb_ext)
768 		memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
769 	return tc_skb_ext;
770 }
771 #endif
772 
773 enum tc_matchall_command {
774 	TC_CLSMATCHALL_REPLACE,
775 	TC_CLSMATCHALL_DESTROY,
776 	TC_CLSMATCHALL_STATS,
777 };
778 
779 struct tc_cls_matchall_offload {
780 	struct flow_cls_common_offload common;
781 	enum tc_matchall_command command;
782 	struct flow_rule *rule;
783 	struct flow_stats stats;
784 	bool use_act_stats;
785 	unsigned long cookie;
786 };
787 
788 enum tc_clsbpf_command {
789 	TC_CLSBPF_OFFLOAD,
790 	TC_CLSBPF_STATS,
791 };
792 
793 struct tc_cls_bpf_offload {
794 	struct flow_cls_common_offload common;
795 	enum tc_clsbpf_command command;
796 	struct tcf_exts *exts;
797 	struct bpf_prog *prog;
798 	struct bpf_prog *oldprog;
799 	const char *name;
800 	bool exts_integrated;
801 };
802 
803 /* This structure holds cookie structure that is passed from user
804  * to the kernel for actions and classifiers
805  */
806 struct tc_cookie {
807 	u8  *data;
808 	u32 len;
809 	struct rcu_head rcu;
810 };
811 
812 struct tc_qopt_offload_stats {
813 	struct gnet_stats_basic_sync *bstats;
814 	struct gnet_stats_queue *qstats;
815 };
816 
817 enum tc_mq_command {
818 	TC_MQ_CREATE,
819 	TC_MQ_DESTROY,
820 	TC_MQ_STATS,
821 	TC_MQ_GRAFT,
822 };
823 
824 struct tc_mq_opt_offload_graft_params {
825 	unsigned long queue;
826 	u32 child_handle;
827 };
828 
829 struct tc_mq_qopt_offload {
830 	enum tc_mq_command command;
831 	u32 handle;
832 	union {
833 		struct tc_qopt_offload_stats stats;
834 		struct tc_mq_opt_offload_graft_params graft_params;
835 	};
836 };
837 
838 enum tc_htb_command {
839 	/* Root */
840 	TC_HTB_CREATE, /* Initialize HTB offload. */
841 	TC_HTB_DESTROY, /* Destroy HTB offload. */
842 
843 	/* Classes */
844 	/* Allocate qid and create leaf. */
845 	TC_HTB_LEAF_ALLOC_QUEUE,
846 	/* Convert leaf to inner, preserve and return qid, create new leaf. */
847 	TC_HTB_LEAF_TO_INNER,
848 	/* Delete leaf, while siblings remain. */
849 	TC_HTB_LEAF_DEL,
850 	/* Delete leaf, convert parent to leaf, preserving qid. */
851 	TC_HTB_LEAF_DEL_LAST,
852 	/* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
853 	TC_HTB_LEAF_DEL_LAST_FORCE,
854 	/* Modify parameters of a node. */
855 	TC_HTB_NODE_MODIFY,
856 
857 	/* Class qdisc */
858 	TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
859 };
860 
861 struct tc_htb_qopt_offload {
862 	struct netlink_ext_ack *extack;
863 	enum tc_htb_command command;
864 	u32 parent_classid;
865 	u16 classid;
866 	u16 qid;
867 	u32 quantum;
868 	u64 rate;
869 	u64 ceil;
870 	u8 prio;
871 };
872 
873 #define TC_HTB_CLASSID_ROOT U32_MAX
874 
875 enum tc_red_command {
876 	TC_RED_REPLACE,
877 	TC_RED_DESTROY,
878 	TC_RED_STATS,
879 	TC_RED_XSTATS,
880 	TC_RED_GRAFT,
881 };
882 
883 struct tc_red_qopt_offload_params {
884 	u32 min;
885 	u32 max;
886 	u32 probability;
887 	u32 limit;
888 	bool is_ecn;
889 	bool is_harddrop;
890 	bool is_nodrop;
891 	struct gnet_stats_queue *qstats;
892 };
893 
894 struct tc_red_qopt_offload {
895 	enum tc_red_command command;
896 	u32 handle;
897 	u32 parent;
898 	union {
899 		struct tc_red_qopt_offload_params set;
900 		struct tc_qopt_offload_stats stats;
901 		struct red_stats *xstats;
902 		u32 child_handle;
903 	};
904 };
905 
906 enum tc_gred_command {
907 	TC_GRED_REPLACE,
908 	TC_GRED_DESTROY,
909 	TC_GRED_STATS,
910 };
911 
912 struct tc_gred_vq_qopt_offload_params {
913 	bool present;
914 	u32 limit;
915 	u32 prio;
916 	u32 min;
917 	u32 max;
918 	bool is_ecn;
919 	bool is_harddrop;
920 	u32 probability;
921 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
922 	u32 *backlog;
923 };
924 
925 struct tc_gred_qopt_offload_params {
926 	bool grio_on;
927 	bool wred_on;
928 	unsigned int dp_cnt;
929 	unsigned int dp_def;
930 	struct gnet_stats_queue *qstats;
931 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
932 };
933 
934 struct tc_gred_qopt_offload_stats {
935 	struct gnet_stats_basic_sync bstats[MAX_DPs];
936 	struct gnet_stats_queue qstats[MAX_DPs];
937 	struct red_stats *xstats[MAX_DPs];
938 };
939 
940 struct tc_gred_qopt_offload {
941 	enum tc_gred_command command;
942 	u32 handle;
943 	u32 parent;
944 	union {
945 		struct tc_gred_qopt_offload_params set;
946 		struct tc_gred_qopt_offload_stats stats;
947 	};
948 };
949 
950 enum tc_prio_command {
951 	TC_PRIO_REPLACE,
952 	TC_PRIO_DESTROY,
953 	TC_PRIO_STATS,
954 	TC_PRIO_GRAFT,
955 };
956 
957 struct tc_prio_qopt_offload_params {
958 	int bands;
959 	u8 priomap[TC_PRIO_MAX + 1];
960 	/* At the point of un-offloading the Qdisc, the reported backlog and
961 	 * qlen need to be reduced by the portion that is in HW.
962 	 */
963 	struct gnet_stats_queue *qstats;
964 };
965 
966 struct tc_prio_qopt_offload_graft_params {
967 	u8 band;
968 	u32 child_handle;
969 };
970 
971 struct tc_prio_qopt_offload {
972 	enum tc_prio_command command;
973 	u32 handle;
974 	u32 parent;
975 	union {
976 		struct tc_prio_qopt_offload_params replace_params;
977 		struct tc_qopt_offload_stats stats;
978 		struct tc_prio_qopt_offload_graft_params graft_params;
979 	};
980 };
981 
982 enum tc_root_command {
983 	TC_ROOT_GRAFT,
984 };
985 
986 struct tc_root_qopt_offload {
987 	enum tc_root_command command;
988 	u32 handle;
989 	bool ingress;
990 };
991 
992 enum tc_ets_command {
993 	TC_ETS_REPLACE,
994 	TC_ETS_DESTROY,
995 	TC_ETS_STATS,
996 	TC_ETS_GRAFT,
997 };
998 
999 struct tc_ets_qopt_offload_replace_params {
1000 	unsigned int bands;
1001 	u8 priomap[TC_PRIO_MAX + 1];
1002 	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
1003 	unsigned int weights[TCQ_ETS_MAX_BANDS];
1004 	struct gnet_stats_queue *qstats;
1005 };
1006 
1007 struct tc_ets_qopt_offload_graft_params {
1008 	u8 band;
1009 	u32 child_handle;
1010 };
1011 
1012 struct tc_ets_qopt_offload {
1013 	enum tc_ets_command command;
1014 	u32 handle;
1015 	u32 parent;
1016 	union {
1017 		struct tc_ets_qopt_offload_replace_params replace_params;
1018 		struct tc_qopt_offload_stats stats;
1019 		struct tc_ets_qopt_offload_graft_params graft_params;
1020 	};
1021 };
1022 
1023 enum tc_tbf_command {
1024 	TC_TBF_REPLACE,
1025 	TC_TBF_DESTROY,
1026 	TC_TBF_STATS,
1027 	TC_TBF_GRAFT,
1028 };
1029 
1030 struct tc_tbf_qopt_offload_replace_params {
1031 	struct psched_ratecfg rate;
1032 	u32 max_size;
1033 	struct gnet_stats_queue *qstats;
1034 };
1035 
1036 struct tc_tbf_qopt_offload {
1037 	enum tc_tbf_command command;
1038 	u32 handle;
1039 	u32 parent;
1040 	union {
1041 		struct tc_tbf_qopt_offload_replace_params replace_params;
1042 		struct tc_qopt_offload_stats stats;
1043 		u32 child_handle;
1044 	};
1045 };
1046 
1047 enum tc_fifo_command {
1048 	TC_FIFO_REPLACE,
1049 	TC_FIFO_DESTROY,
1050 	TC_FIFO_STATS,
1051 };
1052 
1053 struct tc_fifo_qopt_offload {
1054 	enum tc_fifo_command command;
1055 	u32 handle;
1056 	u32 parent;
1057 	union {
1058 		struct tc_qopt_offload_stats stats;
1059 	};
1060 };
1061 
1062 #ifdef CONFIG_NET_CLS_ACT
1063 DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
1064 void tc_skb_ext_tc_enable(void);
1065 void tc_skb_ext_tc_disable(void);
1066 #define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
1067 #else /* CONFIG_NET_CLS_ACT */
tc_skb_ext_tc_enable(void)1068 static inline void tc_skb_ext_tc_enable(void) { }
tc_skb_ext_tc_disable(void)1069 static inline void tc_skb_ext_tc_disable(void) { }
1070 #define tc_skb_ext_tc_enabled() false
1071 #endif
1072 
1073 #endif
1074