1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * net/core/fib_rules.c		Generic Routing Rules
4   *
5   * Authors:	Thomas Graf <tgraf@suug.ch>
6   */
7  
8  #include <linux/types.h>
9  #include <linux/kernel.h>
10  #include <linux/slab.h>
11  #include <linux/list.h>
12  #include <linux/module.h>
13  #include <net/net_namespace.h>
14  #include <net/inet_dscp.h>
15  #include <net/sock.h>
16  #include <net/fib_rules.h>
17  #include <net/ip_tunnels.h>
18  #include <linux/indirect_call_wrapper.h>
19  
20  #if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES)
21  #ifdef CONFIG_IP_MULTIPLE_TABLES
22  #define INDIRECT_CALL_MT(f, f2, f1, ...) \
23  	INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__)
24  #else
25  #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
26  #endif
27  #elif defined(CONFIG_IP_MULTIPLE_TABLES)
28  #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
29  #else
30  #define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__)
31  #endif
32  
33  static const struct fib_kuid_range fib_kuid_range_unset = {
34  	KUIDT_INIT(0),
35  	KUIDT_INIT(~0),
36  };
37  
fib_rule_matchall(const struct fib_rule * rule)38  bool fib_rule_matchall(const struct fib_rule *rule)
39  {
40  	if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
41  	    rule->flags)
42  		return false;
43  	if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
44  		return false;
45  	if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
46  	    !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
47  		return false;
48  	if (fib_rule_port_range_set(&rule->sport_range))
49  		return false;
50  	if (fib_rule_port_range_set(&rule->dport_range))
51  		return false;
52  	return true;
53  }
54  EXPORT_SYMBOL_GPL(fib_rule_matchall);
55  
fib_default_rule_add(struct fib_rules_ops * ops,u32 pref,u32 table)56  int fib_default_rule_add(struct fib_rules_ops *ops,
57  			 u32 pref, u32 table)
58  {
59  	struct fib_rule *r;
60  
61  	r = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT);
62  	if (r == NULL)
63  		return -ENOMEM;
64  
65  	refcount_set(&r->refcnt, 1);
66  	r->action = FR_ACT_TO_TBL;
67  	r->pref = pref;
68  	r->table = table;
69  	r->proto = RTPROT_KERNEL;
70  	r->fr_net = ops->fro_net;
71  	r->uid_range = fib_kuid_range_unset;
72  
73  	r->suppress_prefixlen = -1;
74  	r->suppress_ifgroup = -1;
75  
76  	/* The lock is not required here, the list in unreachable
77  	 * at the moment this function is called */
78  	list_add_tail(&r->list, &ops->rules_list);
79  	return 0;
80  }
81  EXPORT_SYMBOL(fib_default_rule_add);
82  
fib_default_rule_pref(struct fib_rules_ops * ops)83  static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
84  {
85  	struct list_head *pos;
86  	struct fib_rule *rule;
87  
88  	if (!list_empty(&ops->rules_list)) {
89  		pos = ops->rules_list.next;
90  		if (pos->next != &ops->rules_list) {
91  			rule = list_entry(pos->next, struct fib_rule, list);
92  			if (rule->pref)
93  				return rule->pref - 1;
94  		}
95  	}
96  
97  	return 0;
98  }
99  
100  static void notify_rule_change(int event, struct fib_rule *rule,
101  			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
102  			       u32 pid);
103  
lookup_rules_ops(struct net * net,int family)104  static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
105  {
106  	struct fib_rules_ops *ops;
107  
108  	rcu_read_lock();
109  	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
110  		if (ops->family == family) {
111  			if (!try_module_get(ops->owner))
112  				ops = NULL;
113  			rcu_read_unlock();
114  			return ops;
115  		}
116  	}
117  	rcu_read_unlock();
118  
119  	return NULL;
120  }
121  
rules_ops_put(struct fib_rules_ops * ops)122  static void rules_ops_put(struct fib_rules_ops *ops)
123  {
124  	if (ops)
125  		module_put(ops->owner);
126  }
127  
flush_route_cache(struct fib_rules_ops * ops)128  static void flush_route_cache(struct fib_rules_ops *ops)
129  {
130  	if (ops->flush_cache)
131  		ops->flush_cache(ops);
132  }
133  
__fib_rules_register(struct fib_rules_ops * ops)134  static int __fib_rules_register(struct fib_rules_ops *ops)
135  {
136  	int err = -EEXIST;
137  	struct fib_rules_ops *o;
138  	struct net *net;
139  
140  	net = ops->fro_net;
141  
142  	if (ops->rule_size < sizeof(struct fib_rule))
143  		return -EINVAL;
144  
145  	if (ops->match == NULL || ops->configure == NULL ||
146  	    ops->compare == NULL || ops->fill == NULL ||
147  	    ops->action == NULL)
148  		return -EINVAL;
149  
150  	spin_lock(&net->rules_mod_lock);
151  	list_for_each_entry(o, &net->rules_ops, list)
152  		if (ops->family == o->family)
153  			goto errout;
154  
155  	list_add_tail_rcu(&ops->list, &net->rules_ops);
156  	err = 0;
157  errout:
158  	spin_unlock(&net->rules_mod_lock);
159  
160  	return err;
161  }
162  
163  struct fib_rules_ops *
fib_rules_register(const struct fib_rules_ops * tmpl,struct net * net)164  fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
165  {
166  	struct fib_rules_ops *ops;
167  	int err;
168  
169  	ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
170  	if (ops == NULL)
171  		return ERR_PTR(-ENOMEM);
172  
173  	INIT_LIST_HEAD(&ops->rules_list);
174  	ops->fro_net = net;
175  
176  	err = __fib_rules_register(ops);
177  	if (err) {
178  		kfree(ops);
179  		ops = ERR_PTR(err);
180  	}
181  
182  	return ops;
183  }
184  EXPORT_SYMBOL_GPL(fib_rules_register);
185  
fib_rules_cleanup_ops(struct fib_rules_ops * ops)186  static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
187  {
188  	struct fib_rule *rule, *tmp;
189  
190  	list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
191  		list_del_rcu(&rule->list);
192  		if (ops->delete)
193  			ops->delete(rule);
194  		fib_rule_put(rule);
195  	}
196  }
197  
fib_rules_unregister(struct fib_rules_ops * ops)198  void fib_rules_unregister(struct fib_rules_ops *ops)
199  {
200  	struct net *net = ops->fro_net;
201  
202  	spin_lock(&net->rules_mod_lock);
203  	list_del_rcu(&ops->list);
204  	spin_unlock(&net->rules_mod_lock);
205  
206  	fib_rules_cleanup_ops(ops);
207  	kfree_rcu(ops, rcu);
208  }
209  EXPORT_SYMBOL_GPL(fib_rules_unregister);
210  
uid_range_set(struct fib_kuid_range * range)211  static int uid_range_set(struct fib_kuid_range *range)
212  {
213  	return uid_valid(range->start) && uid_valid(range->end);
214  }
215  
nla_get_kuid_range(struct nlattr ** tb)216  static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
217  {
218  	struct fib_rule_uid_range *in;
219  	struct fib_kuid_range out;
220  
221  	in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
222  
223  	out.start = make_kuid(current_user_ns(), in->start);
224  	out.end = make_kuid(current_user_ns(), in->end);
225  
226  	return out;
227  }
228  
nla_put_uid_range(struct sk_buff * skb,struct fib_kuid_range * range)229  static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
230  {
231  	struct fib_rule_uid_range out = {
232  		from_kuid_munged(current_user_ns(), range->start),
233  		from_kuid_munged(current_user_ns(), range->end)
234  	};
235  
236  	return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
237  }
238  
nla_get_port_range(struct nlattr * pattr,struct fib_rule_port_range * port_range)239  static int nla_get_port_range(struct nlattr *pattr,
240  			      struct fib_rule_port_range *port_range)
241  {
242  	const struct fib_rule_port_range *pr = nla_data(pattr);
243  
244  	if (!fib_rule_port_range_valid(pr))
245  		return -EINVAL;
246  
247  	port_range->start = pr->start;
248  	port_range->end = pr->end;
249  
250  	return 0;
251  }
252  
nla_put_port_range(struct sk_buff * skb,int attrtype,struct fib_rule_port_range * range)253  static int nla_put_port_range(struct sk_buff *skb, int attrtype,
254  			      struct fib_rule_port_range *range)
255  {
256  	return nla_put(skb, attrtype, sizeof(*range), range);
257  }
258  
fib_rule_match(struct fib_rule * rule,struct fib_rules_ops * ops,struct flowi * fl,int flags,struct fib_lookup_arg * arg)259  static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
260  			  struct flowi *fl, int flags,
261  			  struct fib_lookup_arg *arg)
262  {
263  	int ret = 0;
264  
265  	if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
266  		goto out;
267  
268  	if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
269  		goto out;
270  
271  	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
272  		goto out;
273  
274  	if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
275  		goto out;
276  
277  	if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
278  		goto out;
279  
280  	if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
281  	    uid_gt(fl->flowi_uid, rule->uid_range.end))
282  		goto out;
283  
284  	ret = INDIRECT_CALL_MT(ops->match,
285  			       fib6_rule_match,
286  			       fib4_rule_match,
287  			       rule, fl, flags);
288  out:
289  	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
290  }
291  
fib_rules_lookup(struct fib_rules_ops * ops,struct flowi * fl,int flags,struct fib_lookup_arg * arg)292  int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
293  		     int flags, struct fib_lookup_arg *arg)
294  {
295  	struct fib_rule *rule;
296  	int err;
297  
298  	rcu_read_lock();
299  
300  	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
301  jumped:
302  		if (!fib_rule_match(rule, ops, fl, flags, arg))
303  			continue;
304  
305  		if (rule->action == FR_ACT_GOTO) {
306  			struct fib_rule *target;
307  
308  			target = rcu_dereference(rule->ctarget);
309  			if (target == NULL) {
310  				continue;
311  			} else {
312  				rule = target;
313  				goto jumped;
314  			}
315  		} else if (rule->action == FR_ACT_NOP)
316  			continue;
317  		else
318  			err = INDIRECT_CALL_MT(ops->action,
319  					       fib6_rule_action,
320  					       fib4_rule_action,
321  					       rule, fl, flags, arg);
322  
323  		if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
324  							      fib6_rule_suppress,
325  							      fib4_rule_suppress,
326  							      rule, flags, arg))
327  			continue;
328  
329  		if (err != -EAGAIN) {
330  			if ((arg->flags & FIB_LOOKUP_NOREF) ||
331  			    likely(refcount_inc_not_zero(&rule->refcnt))) {
332  				arg->rule = rule;
333  				goto out;
334  			}
335  			break;
336  		}
337  	}
338  
339  	err = -ESRCH;
340  out:
341  	rcu_read_unlock();
342  
343  	return err;
344  }
345  EXPORT_SYMBOL_GPL(fib_rules_lookup);
346  
call_fib_rule_notifier(struct notifier_block * nb,enum fib_event_type event_type,struct fib_rule * rule,int family,struct netlink_ext_ack * extack)347  static int call_fib_rule_notifier(struct notifier_block *nb,
348  				  enum fib_event_type event_type,
349  				  struct fib_rule *rule, int family,
350  				  struct netlink_ext_ack *extack)
351  {
352  	struct fib_rule_notifier_info info = {
353  		.info.family = family,
354  		.info.extack = extack,
355  		.rule = rule,
356  	};
357  
358  	return call_fib_notifier(nb, event_type, &info.info);
359  }
360  
call_fib_rule_notifiers(struct net * net,enum fib_event_type event_type,struct fib_rule * rule,struct fib_rules_ops * ops,struct netlink_ext_ack * extack)361  static int call_fib_rule_notifiers(struct net *net,
362  				   enum fib_event_type event_type,
363  				   struct fib_rule *rule,
364  				   struct fib_rules_ops *ops,
365  				   struct netlink_ext_ack *extack)
366  {
367  	struct fib_rule_notifier_info info = {
368  		.info.family = ops->family,
369  		.info.extack = extack,
370  		.rule = rule,
371  	};
372  
373  	ops->fib_rules_seq++;
374  	return call_fib_notifiers(net, event_type, &info.info);
375  }
376  
377  /* Called with rcu_read_lock() */
fib_rules_dump(struct net * net,struct notifier_block * nb,int family,struct netlink_ext_ack * extack)378  int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
379  		   struct netlink_ext_ack *extack)
380  {
381  	struct fib_rules_ops *ops;
382  	struct fib_rule *rule;
383  	int err = 0;
384  
385  	ops = lookup_rules_ops(net, family);
386  	if (!ops)
387  		return -EAFNOSUPPORT;
388  	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
389  		err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD,
390  					     rule, family, extack);
391  		if (err)
392  			break;
393  	}
394  	rules_ops_put(ops);
395  
396  	return err;
397  }
398  EXPORT_SYMBOL_GPL(fib_rules_dump);
399  
fib_rules_seq_read(struct net * net,int family)400  unsigned int fib_rules_seq_read(struct net *net, int family)
401  {
402  	unsigned int fib_rules_seq;
403  	struct fib_rules_ops *ops;
404  
405  	ASSERT_RTNL();
406  
407  	ops = lookup_rules_ops(net, family);
408  	if (!ops)
409  		return 0;
410  	fib_rules_seq = ops->fib_rules_seq;
411  	rules_ops_put(ops);
412  
413  	return fib_rules_seq;
414  }
415  EXPORT_SYMBOL_GPL(fib_rules_seq_read);
416  
rule_find(struct fib_rules_ops * ops,struct fib_rule_hdr * frh,struct nlattr ** tb,struct fib_rule * rule,bool user_priority)417  static struct fib_rule *rule_find(struct fib_rules_ops *ops,
418  				  struct fib_rule_hdr *frh,
419  				  struct nlattr **tb,
420  				  struct fib_rule *rule,
421  				  bool user_priority)
422  {
423  	struct fib_rule *r;
424  
425  	list_for_each_entry(r, &ops->rules_list, list) {
426  		if (rule->action && r->action != rule->action)
427  			continue;
428  
429  		if (rule->table && r->table != rule->table)
430  			continue;
431  
432  		if (user_priority && r->pref != rule->pref)
433  			continue;
434  
435  		if (rule->iifname[0] &&
436  		    memcmp(r->iifname, rule->iifname, IFNAMSIZ))
437  			continue;
438  
439  		if (rule->oifname[0] &&
440  		    memcmp(r->oifname, rule->oifname, IFNAMSIZ))
441  			continue;
442  
443  		if (rule->mark && r->mark != rule->mark)
444  			continue;
445  
446  		if (rule->suppress_ifgroup != -1 &&
447  		    r->suppress_ifgroup != rule->suppress_ifgroup)
448  			continue;
449  
450  		if (rule->suppress_prefixlen != -1 &&
451  		    r->suppress_prefixlen != rule->suppress_prefixlen)
452  			continue;
453  
454  		if (rule->mark_mask && r->mark_mask != rule->mark_mask)
455  			continue;
456  
457  		if (rule->tun_id && r->tun_id != rule->tun_id)
458  			continue;
459  
460  		if (r->fr_net != rule->fr_net)
461  			continue;
462  
463  		if (rule->l3mdev && r->l3mdev != rule->l3mdev)
464  			continue;
465  
466  		if (uid_range_set(&rule->uid_range) &&
467  		    (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
468  		    !uid_eq(r->uid_range.end, rule->uid_range.end)))
469  			continue;
470  
471  		if (rule->ip_proto && r->ip_proto != rule->ip_proto)
472  			continue;
473  
474  		if (rule->proto && r->proto != rule->proto)
475  			continue;
476  
477  		if (fib_rule_port_range_set(&rule->sport_range) &&
478  		    !fib_rule_port_range_compare(&r->sport_range,
479  						 &rule->sport_range))
480  			continue;
481  
482  		if (fib_rule_port_range_set(&rule->dport_range) &&
483  		    !fib_rule_port_range_compare(&r->dport_range,
484  						 &rule->dport_range))
485  			continue;
486  
487  		if (!ops->compare(r, frh, tb))
488  			continue;
489  		return r;
490  	}
491  
492  	return NULL;
493  }
494  
495  #ifdef CONFIG_NET_L3_MASTER_DEV
fib_nl2rule_l3mdev(struct nlattr * nla,struct fib_rule * nlrule,struct netlink_ext_ack * extack)496  static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
497  			      struct netlink_ext_ack *extack)
498  {
499  	nlrule->l3mdev = nla_get_u8(nla);
500  	if (nlrule->l3mdev != 1) {
501  		NL_SET_ERR_MSG(extack, "Invalid l3mdev attribute");
502  		return -1;
503  	}
504  
505  	return 0;
506  }
507  #else
fib_nl2rule_l3mdev(struct nlattr * nla,struct fib_rule * nlrule,struct netlink_ext_ack * extack)508  static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
509  			      struct netlink_ext_ack *extack)
510  {
511  	NL_SET_ERR_MSG(extack, "l3mdev support is not enabled in kernel");
512  	return -1;
513  }
514  #endif
515  
fib_nl2rule(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack,struct fib_rules_ops * ops,struct nlattr * tb[],struct fib_rule ** rule,bool * user_priority)516  static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
517  		       struct netlink_ext_ack *extack,
518  		       struct fib_rules_ops *ops,
519  		       struct nlattr *tb[],
520  		       struct fib_rule **rule,
521  		       bool *user_priority)
522  {
523  	struct net *net = sock_net(skb->sk);
524  	struct fib_rule_hdr *frh = nlmsg_data(nlh);
525  	struct fib_rule *nlrule = NULL;
526  	int err = -EINVAL;
527  
528  	if (frh->src_len)
529  		if (!tb[FRA_SRC] ||
530  		    frh->src_len > (ops->addr_size * 8) ||
531  		    nla_len(tb[FRA_SRC]) != ops->addr_size) {
532  			NL_SET_ERR_MSG(extack, "Invalid source address");
533  			goto errout;
534  	}
535  
536  	if (frh->dst_len)
537  		if (!tb[FRA_DST] ||
538  		    frh->dst_len > (ops->addr_size * 8) ||
539  		    nla_len(tb[FRA_DST]) != ops->addr_size) {
540  			NL_SET_ERR_MSG(extack, "Invalid dst address");
541  			goto errout;
542  	}
543  
544  	nlrule = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT);
545  	if (!nlrule) {
546  		err = -ENOMEM;
547  		goto errout;
548  	}
549  	refcount_set(&nlrule->refcnt, 1);
550  	nlrule->fr_net = net;
551  
552  	if (tb[FRA_PRIORITY]) {
553  		nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]);
554  		*user_priority = true;
555  	} else {
556  		nlrule->pref = fib_default_rule_pref(ops);
557  	}
558  
559  	nlrule->proto = tb[FRA_PROTOCOL] ?
560  		nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
561  
562  	if (tb[FRA_IIFNAME]) {
563  		struct net_device *dev;
564  
565  		nlrule->iifindex = -1;
566  		nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
567  		dev = __dev_get_by_name(net, nlrule->iifname);
568  		if (dev)
569  			nlrule->iifindex = dev->ifindex;
570  	}
571  
572  	if (tb[FRA_OIFNAME]) {
573  		struct net_device *dev;
574  
575  		nlrule->oifindex = -1;
576  		nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
577  		dev = __dev_get_by_name(net, nlrule->oifname);
578  		if (dev)
579  			nlrule->oifindex = dev->ifindex;
580  	}
581  
582  	if (tb[FRA_FWMARK]) {
583  		nlrule->mark = nla_get_u32(tb[FRA_FWMARK]);
584  		if (nlrule->mark)
585  			/* compatibility: if the mark value is non-zero all bits
586  			 * are compared unless a mask is explicitly specified.
587  			 */
588  			nlrule->mark_mask = 0xFFFFFFFF;
589  	}
590  
591  	if (tb[FRA_FWMASK])
592  		nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
593  
594  	if (tb[FRA_TUN_ID])
595  		nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
596  
597  	if (tb[FRA_L3MDEV] &&
598  	    fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0)
599  		goto errout_free;
600  
601  	nlrule->action = frh->action;
602  	nlrule->flags = frh->flags;
603  	nlrule->table = frh_get_table(frh, tb);
604  	if (tb[FRA_SUPPRESS_PREFIXLEN])
605  		nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
606  	else
607  		nlrule->suppress_prefixlen = -1;
608  
609  	if (tb[FRA_SUPPRESS_IFGROUP])
610  		nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
611  	else
612  		nlrule->suppress_ifgroup = -1;
613  
614  	if (tb[FRA_GOTO]) {
615  		if (nlrule->action != FR_ACT_GOTO) {
616  			NL_SET_ERR_MSG(extack, "Unexpected goto");
617  			goto errout_free;
618  		}
619  
620  		nlrule->target = nla_get_u32(tb[FRA_GOTO]);
621  		/* Backward jumps are prohibited to avoid endless loops */
622  		if (nlrule->target <= nlrule->pref) {
623  			NL_SET_ERR_MSG(extack, "Backward goto not supported");
624  			goto errout_free;
625  		}
626  	} else if (nlrule->action == FR_ACT_GOTO) {
627  		NL_SET_ERR_MSG(extack, "Missing goto target for action goto");
628  		goto errout_free;
629  	}
630  
631  	if (nlrule->l3mdev && nlrule->table) {
632  		NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive");
633  		goto errout_free;
634  	}
635  
636  	if (tb[FRA_UID_RANGE]) {
637  		if (current_user_ns() != net->user_ns) {
638  			err = -EPERM;
639  			NL_SET_ERR_MSG(extack, "No permission to set uid");
640  			goto errout_free;
641  		}
642  
643  		nlrule->uid_range = nla_get_kuid_range(tb);
644  
645  		if (!uid_range_set(&nlrule->uid_range) ||
646  		    !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) {
647  			NL_SET_ERR_MSG(extack, "Invalid uid range");
648  			goto errout_free;
649  		}
650  	} else {
651  		nlrule->uid_range = fib_kuid_range_unset;
652  	}
653  
654  	if (tb[FRA_IP_PROTO])
655  		nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
656  
657  	if (tb[FRA_SPORT_RANGE]) {
658  		err = nla_get_port_range(tb[FRA_SPORT_RANGE],
659  					 &nlrule->sport_range);
660  		if (err) {
661  			NL_SET_ERR_MSG(extack, "Invalid sport range");
662  			goto errout_free;
663  		}
664  	}
665  
666  	if (tb[FRA_DPORT_RANGE]) {
667  		err = nla_get_port_range(tb[FRA_DPORT_RANGE],
668  					 &nlrule->dport_range);
669  		if (err) {
670  			NL_SET_ERR_MSG(extack, "Invalid dport range");
671  			goto errout_free;
672  		}
673  	}
674  
675  	*rule = nlrule;
676  
677  	return 0;
678  
679  errout_free:
680  	kfree(nlrule);
681  errout:
682  	return err;
683  }
684  
rule_exists(struct fib_rules_ops * ops,struct fib_rule_hdr * frh,struct nlattr ** tb,struct fib_rule * rule)685  static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
686  		       struct nlattr **tb, struct fib_rule *rule)
687  {
688  	struct fib_rule *r;
689  
690  	list_for_each_entry(r, &ops->rules_list, list) {
691  		if (r->action != rule->action)
692  			continue;
693  
694  		if (r->table != rule->table)
695  			continue;
696  
697  		if (r->pref != rule->pref)
698  			continue;
699  
700  		if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
701  			continue;
702  
703  		if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
704  			continue;
705  
706  		if (r->mark != rule->mark)
707  			continue;
708  
709  		if (r->suppress_ifgroup != rule->suppress_ifgroup)
710  			continue;
711  
712  		if (r->suppress_prefixlen != rule->suppress_prefixlen)
713  			continue;
714  
715  		if (r->mark_mask != rule->mark_mask)
716  			continue;
717  
718  		if (r->tun_id != rule->tun_id)
719  			continue;
720  
721  		if (r->fr_net != rule->fr_net)
722  			continue;
723  
724  		if (r->l3mdev != rule->l3mdev)
725  			continue;
726  
727  		if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
728  		    !uid_eq(r->uid_range.end, rule->uid_range.end))
729  			continue;
730  
731  		if (r->ip_proto != rule->ip_proto)
732  			continue;
733  
734  		if (r->proto != rule->proto)
735  			continue;
736  
737  		if (!fib_rule_port_range_compare(&r->sport_range,
738  						 &rule->sport_range))
739  			continue;
740  
741  		if (!fib_rule_port_range_compare(&r->dport_range,
742  						 &rule->dport_range))
743  			continue;
744  
745  		if (!ops->compare(r, frh, tb))
746  			continue;
747  		return 1;
748  	}
749  	return 0;
750  }
751  
752  static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = {
753  	[FRA_UNSPEC]	= { .strict_start_type = FRA_DPORT_RANGE + 1 },
754  	[FRA_IIFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
755  	[FRA_OIFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
756  	[FRA_PRIORITY]	= { .type = NLA_U32 },
757  	[FRA_FWMARK]	= { .type = NLA_U32 },
758  	[FRA_FLOW]	= { .type = NLA_U32 },
759  	[FRA_TUN_ID]	= { .type = NLA_U64 },
760  	[FRA_FWMASK]	= { .type = NLA_U32 },
761  	[FRA_TABLE]     = { .type = NLA_U32 },
762  	[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 },
763  	[FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 },
764  	[FRA_GOTO]	= { .type = NLA_U32 },
765  	[FRA_L3MDEV]	= { .type = NLA_U8 },
766  	[FRA_UID_RANGE]	= { .len = sizeof(struct fib_rule_uid_range) },
767  	[FRA_PROTOCOL]  = { .type = NLA_U8 },
768  	[FRA_IP_PROTO]  = { .type = NLA_U8 },
769  	[FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
770  	[FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
771  	[FRA_DSCP]	= NLA_POLICY_MAX(NLA_U8, INET_DSCP_MASK >> 2),
772  };
773  
fib_nl_newrule(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)774  int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
775  		   struct netlink_ext_ack *extack)
776  {
777  	struct net *net = sock_net(skb->sk);
778  	struct fib_rule_hdr *frh = nlmsg_data(nlh);
779  	struct fib_rules_ops *ops = NULL;
780  	struct fib_rule *rule = NULL, *r, *last = NULL;
781  	struct nlattr *tb[FRA_MAX + 1];
782  	int err = -EINVAL, unresolved = 0;
783  	bool user_priority = false;
784  
785  	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
786  		NL_SET_ERR_MSG(extack, "Invalid msg length");
787  		goto errout;
788  	}
789  
790  	ops = lookup_rules_ops(net, frh->family);
791  	if (!ops) {
792  		err = -EAFNOSUPPORT;
793  		NL_SET_ERR_MSG(extack, "Rule family not supported");
794  		goto errout;
795  	}
796  
797  	err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
798  				     fib_rule_policy, extack);
799  	if (err < 0) {
800  		NL_SET_ERR_MSG(extack, "Error parsing msg");
801  		goto errout;
802  	}
803  
804  	err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
805  	if (err)
806  		goto errout;
807  
808  	if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
809  	    rule_exists(ops, frh, tb, rule)) {
810  		err = -EEXIST;
811  		goto errout_free;
812  	}
813  
814  	err = ops->configure(rule, skb, frh, tb, extack);
815  	if (err < 0)
816  		goto errout_free;
817  
818  	err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops,
819  				      extack);
820  	if (err < 0)
821  		goto errout_free;
822  
823  	list_for_each_entry(r, &ops->rules_list, list) {
824  		if (r->pref == rule->target) {
825  			RCU_INIT_POINTER(rule->ctarget, r);
826  			break;
827  		}
828  	}
829  
830  	if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
831  		unresolved = 1;
832  
833  	list_for_each_entry(r, &ops->rules_list, list) {
834  		if (r->pref > rule->pref)
835  			break;
836  		last = r;
837  	}
838  
839  	if (last)
840  		list_add_rcu(&rule->list, &last->list);
841  	else
842  		list_add_rcu(&rule->list, &ops->rules_list);
843  
844  	if (ops->unresolved_rules) {
845  		/*
846  		 * There are unresolved goto rules in the list, check if
847  		 * any of them are pointing to this new rule.
848  		 */
849  		list_for_each_entry(r, &ops->rules_list, list) {
850  			if (r->action == FR_ACT_GOTO &&
851  			    r->target == rule->pref &&
852  			    rtnl_dereference(r->ctarget) == NULL) {
853  				rcu_assign_pointer(r->ctarget, rule);
854  				if (--ops->unresolved_rules == 0)
855  					break;
856  			}
857  		}
858  	}
859  
860  	if (rule->action == FR_ACT_GOTO)
861  		ops->nr_goto_rules++;
862  
863  	if (unresolved)
864  		ops->unresolved_rules++;
865  
866  	if (rule->tun_id)
867  		ip_tunnel_need_metadata();
868  
869  	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
870  	flush_route_cache(ops);
871  	rules_ops_put(ops);
872  	return 0;
873  
874  errout_free:
875  	kfree(rule);
876  errout:
877  	rules_ops_put(ops);
878  	return err;
879  }
880  EXPORT_SYMBOL_GPL(fib_nl_newrule);
881  
fib_nl_delrule(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)882  int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
883  		   struct netlink_ext_ack *extack)
884  {
885  	struct net *net = sock_net(skb->sk);
886  	struct fib_rule_hdr *frh = nlmsg_data(nlh);
887  	struct fib_rules_ops *ops = NULL;
888  	struct fib_rule *rule = NULL, *r, *nlrule = NULL;
889  	struct nlattr *tb[FRA_MAX+1];
890  	int err = -EINVAL;
891  	bool user_priority = false;
892  
893  	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
894  		NL_SET_ERR_MSG(extack, "Invalid msg length");
895  		goto errout;
896  	}
897  
898  	ops = lookup_rules_ops(net, frh->family);
899  	if (ops == NULL) {
900  		err = -EAFNOSUPPORT;
901  		NL_SET_ERR_MSG(extack, "Rule family not supported");
902  		goto errout;
903  	}
904  
905  	err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
906  				     fib_rule_policy, extack);
907  	if (err < 0) {
908  		NL_SET_ERR_MSG(extack, "Error parsing msg");
909  		goto errout;
910  	}
911  
912  	err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
913  	if (err)
914  		goto errout;
915  
916  	rule = rule_find(ops, frh, tb, nlrule, user_priority);
917  	if (!rule) {
918  		err = -ENOENT;
919  		goto errout;
920  	}
921  
922  	if (rule->flags & FIB_RULE_PERMANENT) {
923  		err = -EPERM;
924  		goto errout;
925  	}
926  
927  	if (ops->delete) {
928  		err = ops->delete(rule);
929  		if (err)
930  			goto errout;
931  	}
932  
933  	if (rule->tun_id)
934  		ip_tunnel_unneed_metadata();
935  
936  	list_del_rcu(&rule->list);
937  
938  	if (rule->action == FR_ACT_GOTO) {
939  		ops->nr_goto_rules--;
940  		if (rtnl_dereference(rule->ctarget) == NULL)
941  			ops->unresolved_rules--;
942  	}
943  
944  	/*
945  	 * Check if this rule is a target to any of them. If so,
946  	 * adjust to the next one with the same preference or
947  	 * disable them. As this operation is eventually very
948  	 * expensive, it is only performed if goto rules, except
949  	 * current if it is goto rule, have actually been added.
950  	 */
951  	if (ops->nr_goto_rules > 0) {
952  		struct fib_rule *n;
953  
954  		n = list_next_entry(rule, list);
955  		if (&n->list == &ops->rules_list || n->pref != rule->pref)
956  			n = NULL;
957  		list_for_each_entry(r, &ops->rules_list, list) {
958  			if (rtnl_dereference(r->ctarget) != rule)
959  				continue;
960  			rcu_assign_pointer(r->ctarget, n);
961  			if (!n)
962  				ops->unresolved_rules++;
963  		}
964  	}
965  
966  	call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
967  				NULL);
968  	notify_rule_change(RTM_DELRULE, rule, ops, nlh,
969  			   NETLINK_CB(skb).portid);
970  	fib_rule_put(rule);
971  	flush_route_cache(ops);
972  	rules_ops_put(ops);
973  	kfree(nlrule);
974  	return 0;
975  
976  errout:
977  	kfree(nlrule);
978  	rules_ops_put(ops);
979  	return err;
980  }
981  EXPORT_SYMBOL_GPL(fib_nl_delrule);
982  
fib_rule_nlmsg_size(struct fib_rules_ops * ops,struct fib_rule * rule)983  static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
984  					 struct fib_rule *rule)
985  {
986  	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
987  			 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
988  			 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
989  			 + nla_total_size(4) /* FRA_PRIORITY */
990  			 + nla_total_size(4) /* FRA_TABLE */
991  			 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
992  			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
993  			 + nla_total_size(4) /* FRA_FWMARK */
994  			 + nla_total_size(4) /* FRA_FWMASK */
995  			 + nla_total_size_64bit(8) /* FRA_TUN_ID */
996  			 + nla_total_size(sizeof(struct fib_kuid_range))
997  			 + nla_total_size(1) /* FRA_PROTOCOL */
998  			 + nla_total_size(1) /* FRA_IP_PROTO */
999  			 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */
1000  			 + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */
1001  
1002  	if (ops->nlmsg_payload)
1003  		payload += ops->nlmsg_payload(rule);
1004  
1005  	return payload;
1006  }
1007  
fib_nl_fill_rule(struct sk_buff * skb,struct fib_rule * rule,u32 pid,u32 seq,int type,int flags,struct fib_rules_ops * ops)1008  static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
1009  			    u32 pid, u32 seq, int type, int flags,
1010  			    struct fib_rules_ops *ops)
1011  {
1012  	struct nlmsghdr *nlh;
1013  	struct fib_rule_hdr *frh;
1014  
1015  	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
1016  	if (nlh == NULL)
1017  		return -EMSGSIZE;
1018  
1019  	frh = nlmsg_data(nlh);
1020  	frh->family = ops->family;
1021  	frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
1022  	if (nla_put_u32(skb, FRA_TABLE, rule->table))
1023  		goto nla_put_failure;
1024  	if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
1025  		goto nla_put_failure;
1026  	frh->res1 = 0;
1027  	frh->res2 = 0;
1028  	frh->action = rule->action;
1029  	frh->flags = rule->flags;
1030  
1031  	if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto))
1032  		goto nla_put_failure;
1033  
1034  	if (rule->action == FR_ACT_GOTO &&
1035  	    rcu_access_pointer(rule->ctarget) == NULL)
1036  		frh->flags |= FIB_RULE_UNRESOLVED;
1037  
1038  	if (rule->iifname[0]) {
1039  		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
1040  			goto nla_put_failure;
1041  		if (rule->iifindex == -1)
1042  			frh->flags |= FIB_RULE_IIF_DETACHED;
1043  	}
1044  
1045  	if (rule->oifname[0]) {
1046  		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
1047  			goto nla_put_failure;
1048  		if (rule->oifindex == -1)
1049  			frh->flags |= FIB_RULE_OIF_DETACHED;
1050  	}
1051  
1052  	if ((rule->pref &&
1053  	     nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
1054  	    (rule->mark &&
1055  	     nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
1056  	    ((rule->mark_mask || rule->mark) &&
1057  	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
1058  	    (rule->target &&
1059  	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
1060  	    (rule->tun_id &&
1061  	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
1062  	    (rule->l3mdev &&
1063  	     nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
1064  	    (uid_range_set(&rule->uid_range) &&
1065  	     nla_put_uid_range(skb, &rule->uid_range)) ||
1066  	    (fib_rule_port_range_set(&rule->sport_range) &&
1067  	     nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) ||
1068  	    (fib_rule_port_range_set(&rule->dport_range) &&
1069  	     nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) ||
1070  	    (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto)))
1071  		goto nla_put_failure;
1072  
1073  	if (rule->suppress_ifgroup != -1) {
1074  		if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
1075  			goto nla_put_failure;
1076  	}
1077  
1078  	if (ops->fill(rule, skb, frh) < 0)
1079  		goto nla_put_failure;
1080  
1081  	nlmsg_end(skb, nlh);
1082  	return 0;
1083  
1084  nla_put_failure:
1085  	nlmsg_cancel(skb, nlh);
1086  	return -EMSGSIZE;
1087  }
1088  
dump_rules(struct sk_buff * skb,struct netlink_callback * cb,struct fib_rules_ops * ops)1089  static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
1090  		      struct fib_rules_ops *ops)
1091  {
1092  	int idx = 0;
1093  	struct fib_rule *rule;
1094  	int err = 0;
1095  
1096  	rcu_read_lock();
1097  	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
1098  		if (idx < cb->args[1])
1099  			goto skip;
1100  
1101  		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
1102  				       cb->nlh->nlmsg_seq, RTM_NEWRULE,
1103  				       NLM_F_MULTI, ops);
1104  		if (err)
1105  			break;
1106  skip:
1107  		idx++;
1108  	}
1109  	rcu_read_unlock();
1110  	cb->args[1] = idx;
1111  	rules_ops_put(ops);
1112  
1113  	return err;
1114  }
1115  
fib_valid_dumprule_req(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1116  static int fib_valid_dumprule_req(const struct nlmsghdr *nlh,
1117  				   struct netlink_ext_ack *extack)
1118  {
1119  	struct fib_rule_hdr *frh;
1120  
1121  	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
1122  		NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request");
1123  		return -EINVAL;
1124  	}
1125  
1126  	frh = nlmsg_data(nlh);
1127  	if (frh->dst_len || frh->src_len || frh->tos || frh->table ||
1128  	    frh->res1 || frh->res2 || frh->action || frh->flags) {
1129  		NL_SET_ERR_MSG(extack,
1130  			       "Invalid values in header for fib rule dump request");
1131  		return -EINVAL;
1132  	}
1133  
1134  	if (nlmsg_attrlen(nlh, sizeof(*frh))) {
1135  		NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request");
1136  		return -EINVAL;
1137  	}
1138  
1139  	return 0;
1140  }
1141  
fib_nl_dumprule(struct sk_buff * skb,struct netlink_callback * cb)1142  static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
1143  {
1144  	const struct nlmsghdr *nlh = cb->nlh;
1145  	struct net *net = sock_net(skb->sk);
1146  	struct fib_rules_ops *ops;
1147  	int err, idx = 0, family;
1148  
1149  	if (cb->strict_check) {
1150  		err = fib_valid_dumprule_req(nlh, cb->extack);
1151  
1152  		if (err < 0)
1153  			return err;
1154  	}
1155  
1156  	family = rtnl_msg_family(nlh);
1157  	if (family != AF_UNSPEC) {
1158  		/* Protocol specific dump request */
1159  		ops = lookup_rules_ops(net, family);
1160  		if (ops == NULL)
1161  			return -EAFNOSUPPORT;
1162  
1163  		return dump_rules(skb, cb, ops);
1164  	}
1165  
1166  	err = 0;
1167  	rcu_read_lock();
1168  	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
1169  		if (idx < cb->args[0] || !try_module_get(ops->owner))
1170  			goto skip;
1171  
1172  		err = dump_rules(skb, cb, ops);
1173  		if (err < 0)
1174  			break;
1175  
1176  		cb->args[1] = 0;
1177  skip:
1178  		idx++;
1179  	}
1180  	rcu_read_unlock();
1181  	cb->args[0] = idx;
1182  
1183  	return err;
1184  }
1185  
notify_rule_change(int event,struct fib_rule * rule,struct fib_rules_ops * ops,struct nlmsghdr * nlh,u32 pid)1186  static void notify_rule_change(int event, struct fib_rule *rule,
1187  			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
1188  			       u32 pid)
1189  {
1190  	struct net *net;
1191  	struct sk_buff *skb;
1192  	int err = -ENOMEM;
1193  
1194  	net = ops->fro_net;
1195  	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
1196  	if (skb == NULL)
1197  		goto errout;
1198  
1199  	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
1200  	if (err < 0) {
1201  		/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
1202  		WARN_ON(err == -EMSGSIZE);
1203  		kfree_skb(skb);
1204  		goto errout;
1205  	}
1206  
1207  	rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
1208  	return;
1209  errout:
1210  	rtnl_set_sk_err(net, ops->nlgroup, err);
1211  }
1212  
attach_rules(struct list_head * rules,struct net_device * dev)1213  static void attach_rules(struct list_head *rules, struct net_device *dev)
1214  {
1215  	struct fib_rule *rule;
1216  
1217  	list_for_each_entry(rule, rules, list) {
1218  		if (rule->iifindex == -1 &&
1219  		    strcmp(dev->name, rule->iifname) == 0)
1220  			rule->iifindex = dev->ifindex;
1221  		if (rule->oifindex == -1 &&
1222  		    strcmp(dev->name, rule->oifname) == 0)
1223  			rule->oifindex = dev->ifindex;
1224  	}
1225  }
1226  
detach_rules(struct list_head * rules,struct net_device * dev)1227  static void detach_rules(struct list_head *rules, struct net_device *dev)
1228  {
1229  	struct fib_rule *rule;
1230  
1231  	list_for_each_entry(rule, rules, list) {
1232  		if (rule->iifindex == dev->ifindex)
1233  			rule->iifindex = -1;
1234  		if (rule->oifindex == dev->ifindex)
1235  			rule->oifindex = -1;
1236  	}
1237  }
1238  
1239  
fib_rules_event(struct notifier_block * this,unsigned long event,void * ptr)1240  static int fib_rules_event(struct notifier_block *this, unsigned long event,
1241  			   void *ptr)
1242  {
1243  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1244  	struct net *net = dev_net(dev);
1245  	struct fib_rules_ops *ops;
1246  
1247  	ASSERT_RTNL();
1248  
1249  	switch (event) {
1250  	case NETDEV_REGISTER:
1251  		list_for_each_entry(ops, &net->rules_ops, list)
1252  			attach_rules(&ops->rules_list, dev);
1253  		break;
1254  
1255  	case NETDEV_CHANGENAME:
1256  		list_for_each_entry(ops, &net->rules_ops, list) {
1257  			detach_rules(&ops->rules_list, dev);
1258  			attach_rules(&ops->rules_list, dev);
1259  		}
1260  		break;
1261  
1262  	case NETDEV_UNREGISTER:
1263  		list_for_each_entry(ops, &net->rules_ops, list)
1264  			detach_rules(&ops->rules_list, dev);
1265  		break;
1266  	}
1267  
1268  	return NOTIFY_DONE;
1269  }
1270  
1271  static struct notifier_block fib_rules_notifier = {
1272  	.notifier_call = fib_rules_event,
1273  };
1274  
fib_rules_net_init(struct net * net)1275  static int __net_init fib_rules_net_init(struct net *net)
1276  {
1277  	INIT_LIST_HEAD(&net->rules_ops);
1278  	spin_lock_init(&net->rules_mod_lock);
1279  	return 0;
1280  }
1281  
fib_rules_net_exit(struct net * net)1282  static void __net_exit fib_rules_net_exit(struct net *net)
1283  {
1284  	WARN_ON_ONCE(!list_empty(&net->rules_ops));
1285  }
1286  
1287  static struct pernet_operations fib_rules_net_ops = {
1288  	.init = fib_rules_net_init,
1289  	.exit = fib_rules_net_exit,
1290  };
1291  
fib_rules_init(void)1292  static int __init fib_rules_init(void)
1293  {
1294  	int err;
1295  	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
1296  	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
1297  	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule,
1298  		      RTNL_FLAG_DUMP_UNLOCKED);
1299  
1300  	err = register_pernet_subsys(&fib_rules_net_ops);
1301  	if (err < 0)
1302  		goto fail;
1303  
1304  	err = register_netdevice_notifier(&fib_rules_notifier);
1305  	if (err < 0)
1306  		goto fail_unregister;
1307  
1308  	return 0;
1309  
1310  fail_unregister:
1311  	unregister_pernet_subsys(&fib_rules_net_ops);
1312  fail:
1313  	rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
1314  	rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
1315  	rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
1316  	return err;
1317  }
1318  
1319  subsys_initcall(fib_rules_init);
1320