1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *	Multicast support for IPv6
4   *	Linux INET6 implementation
5   *
6   *	Authors:
7   *	Pedro Roque		<roque@di.fc.ul.pt>
8   *
9   *	Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10   */
11  
12  /* Changes:
13   *
14   *	yoshfuji	: fix format of router-alert option
15   *	YOSHIFUJI Hideaki @USAGI:
16   *		Fixed source address for MLD message based on
17   *		<draft-ietf-magma-mld-source-05.txt>.
18   *	YOSHIFUJI Hideaki @USAGI:
19   *		- Ignore Queries for invalid addresses.
20   *		- MLD for link-local addresses.
21   *	David L Stevens <dlstevens@us.ibm.com>:
22   *		- MLDv2 support
23   */
24  
25  #include <linux/module.h>
26  #include <linux/errno.h>
27  #include <linux/types.h>
28  #include <linux/string.h>
29  #include <linux/socket.h>
30  #include <linux/sockios.h>
31  #include <linux/jiffies.h>
32  #include <linux/net.h>
33  #include <linux/in.h>
34  #include <linux/in6.h>
35  #include <linux/netdevice.h>
36  #include <linux/if_arp.h>
37  #include <linux/route.h>
38  #include <linux/init.h>
39  #include <linux/proc_fs.h>
40  #include <linux/seq_file.h>
41  #include <linux/slab.h>
42  #include <linux/pkt_sched.h>
43  #include <net/mld.h>
44  #include <linux/workqueue.h>
45  
46  #include <linux/netfilter.h>
47  #include <linux/netfilter_ipv6.h>
48  
49  #include <net/net_namespace.h>
50  #include <net/sock.h>
51  #include <net/snmp.h>
52  
53  #include <net/ipv6.h>
54  #include <net/protocol.h>
55  #include <net/if_inet6.h>
56  #include <net/ndisc.h>
57  #include <net/addrconf.h>
58  #include <net/ip6_route.h>
59  #include <net/inet_common.h>
60  
61  #include <net/ip6_checksum.h>
62  
63  /* Ensure that we have struct in6_addr aligned on 32bit word. */
64  static int __mld2_query_bugs[] __attribute__((__unused__)) = {
65  	BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
66  	BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
67  	BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
68  };
69  
70  static struct workqueue_struct *mld_wq;
71  static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
72  
73  static void igmp6_join_group(struct ifmcaddr6 *ma);
74  static void igmp6_leave_group(struct ifmcaddr6 *ma);
75  static void mld_mca_work(struct work_struct *work);
76  
77  static void mld_ifc_event(struct inet6_dev *idev);
78  static bool mld_in_v1_mode(const struct inet6_dev *idev);
79  static int sf_setstate(struct ifmcaddr6 *pmc);
80  static void sf_markstate(struct ifmcaddr6 *pmc);
81  static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
82  static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
83  			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
84  			  int delta);
85  static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
86  			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
87  			  int delta);
88  static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
89  			    struct inet6_dev *idev);
90  static int __ipv6_dev_mc_inc(struct net_device *dev,
91  			     const struct in6_addr *addr, unsigned int mode);
92  
93  #define MLD_QRV_DEFAULT		2
94  /* RFC3810, 9.2. Query Interval */
95  #define MLD_QI_DEFAULT		(125 * HZ)
96  /* RFC3810, 9.3. Query Response Interval */
97  #define MLD_QRI_DEFAULT		(10 * HZ)
98  
99  /* RFC3810, 8.1 Query Version Distinctions */
100  #define MLD_V1_QUERY_LEN	24
101  #define MLD_V2_QUERY_LEN_MIN	28
102  
103  #define IPV6_MLD_MAX_MSF	64
104  
105  int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
106  int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
107  
108  /*
109   *	socket join on multicast group
110   */
111  #define mc_dereference(e, idev) \
112  	rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
113  
114  #define sock_dereference(e, sk) \
115  	rcu_dereference_protected(e, lockdep_sock_is_held(sk))
116  
117  #define for_each_pmc_socklock(np, sk, pmc)			\
118  	for (pmc = sock_dereference((np)->ipv6_mc_list, sk);	\
119  	     pmc;						\
120  	     pmc = sock_dereference(pmc->next, sk))
121  
122  #define for_each_pmc_rcu(np, pmc)				\
123  	for (pmc = rcu_dereference((np)->ipv6_mc_list);		\
124  	     pmc;						\
125  	     pmc = rcu_dereference(pmc->next))
126  
127  #define for_each_psf_mclock(mc, psf)				\
128  	for (psf = mc_dereference((mc)->mca_sources, mc->idev);	\
129  	     psf;						\
130  	     psf = mc_dereference(psf->sf_next, mc->idev))
131  
132  #define for_each_psf_rcu(mc, psf)				\
133  	for (psf = rcu_dereference((mc)->mca_sources);		\
134  	     psf;						\
135  	     psf = rcu_dereference(psf->sf_next))
136  
137  #define for_each_psf_tomb(mc, psf)				\
138  	for (psf = mc_dereference((mc)->mca_tomb, mc->idev);	\
139  	     psf;						\
140  	     psf = mc_dereference(psf->sf_next, mc->idev))
141  
142  #define for_each_mc_mclock(idev, mc)				\
143  	for (mc = mc_dereference((idev)->mc_list, idev);	\
144  	     mc;						\
145  	     mc = mc_dereference(mc->next, idev))
146  
147  #define for_each_mc_rcu(idev, mc)				\
148  	for (mc = rcu_dereference((idev)->mc_list);             \
149  	     mc;                                                \
150  	     mc = rcu_dereference(mc->next))
151  
152  #define for_each_mc_tomb(idev, mc)				\
153  	for (mc = mc_dereference((idev)->mc_tomb, idev);	\
154  	     mc;						\
155  	     mc = mc_dereference(mc->next, idev))
156  
unsolicited_report_interval(struct inet6_dev * idev)157  static int unsolicited_report_interval(struct inet6_dev *idev)
158  {
159  	int iv;
160  
161  	if (mld_in_v1_mode(idev))
162  		iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
163  	else
164  		iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
165  
166  	return iv > 0 ? iv : 1;
167  }
168  
__ipv6_sock_mc_join(struct sock * sk,int ifindex,const struct in6_addr * addr,unsigned int mode)169  static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
170  			       const struct in6_addr *addr, unsigned int mode)
171  {
172  	struct net_device *dev = NULL;
173  	struct ipv6_mc_socklist *mc_lst;
174  	struct ipv6_pinfo *np = inet6_sk(sk);
175  	struct net *net = sock_net(sk);
176  	int err;
177  
178  	ASSERT_RTNL();
179  
180  	if (!ipv6_addr_is_multicast(addr))
181  		return -EINVAL;
182  
183  	for_each_pmc_socklock(np, sk, mc_lst) {
184  		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
185  		    ipv6_addr_equal(&mc_lst->addr, addr))
186  			return -EADDRINUSE;
187  	}
188  
189  	mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
190  
191  	if (!mc_lst)
192  		return -ENOMEM;
193  
194  	mc_lst->next = NULL;
195  	mc_lst->addr = *addr;
196  
197  	if (ifindex == 0) {
198  		struct rt6_info *rt;
199  		rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
200  		if (rt) {
201  			dev = rt->dst.dev;
202  			ip6_rt_put(rt);
203  		}
204  	} else
205  		dev = __dev_get_by_index(net, ifindex);
206  
207  	if (!dev) {
208  		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
209  		return -ENODEV;
210  	}
211  
212  	mc_lst->ifindex = dev->ifindex;
213  	mc_lst->sfmode = mode;
214  	RCU_INIT_POINTER(mc_lst->sflist, NULL);
215  
216  	/*
217  	 *	now add/increase the group membership on the device
218  	 */
219  
220  	err = __ipv6_dev_mc_inc(dev, addr, mode);
221  
222  	if (err) {
223  		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
224  		return err;
225  	}
226  
227  	mc_lst->next = np->ipv6_mc_list;
228  	rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
229  
230  	return 0;
231  }
232  
ipv6_sock_mc_join(struct sock * sk,int ifindex,const struct in6_addr * addr)233  int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
234  {
235  	return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
236  }
237  EXPORT_SYMBOL(ipv6_sock_mc_join);
238  
ipv6_sock_mc_join_ssm(struct sock * sk,int ifindex,const struct in6_addr * addr,unsigned int mode)239  int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
240  			  const struct in6_addr *addr, unsigned int mode)
241  {
242  	return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
243  }
244  
245  /*
246   *	socket leave on multicast group
247   */
ipv6_sock_mc_drop(struct sock * sk,int ifindex,const struct in6_addr * addr)248  int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
249  {
250  	struct ipv6_pinfo *np = inet6_sk(sk);
251  	struct ipv6_mc_socklist *mc_lst;
252  	struct ipv6_mc_socklist __rcu **lnk;
253  	struct net *net = sock_net(sk);
254  
255  	ASSERT_RTNL();
256  
257  	if (!ipv6_addr_is_multicast(addr))
258  		return -EINVAL;
259  
260  	for (lnk = &np->ipv6_mc_list;
261  	     (mc_lst = sock_dereference(*lnk, sk)) != NULL;
262  	      lnk = &mc_lst->next) {
263  		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
264  		    ipv6_addr_equal(&mc_lst->addr, addr)) {
265  			struct net_device *dev;
266  
267  			*lnk = mc_lst->next;
268  
269  			dev = __dev_get_by_index(net, mc_lst->ifindex);
270  			if (dev) {
271  				struct inet6_dev *idev = __in6_dev_get(dev);
272  
273  				ip6_mc_leave_src(sk, mc_lst, idev);
274  				if (idev)
275  					__ipv6_dev_mc_dec(idev, &mc_lst->addr);
276  			} else {
277  				ip6_mc_leave_src(sk, mc_lst, NULL);
278  			}
279  
280  			atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
281  			kfree_rcu(mc_lst, rcu);
282  			return 0;
283  		}
284  	}
285  
286  	return -EADDRNOTAVAIL;
287  }
288  EXPORT_SYMBOL(ipv6_sock_mc_drop);
289  
ip6_mc_find_dev_rtnl(struct net * net,const struct in6_addr * group,int ifindex)290  static struct inet6_dev *ip6_mc_find_dev_rtnl(struct net *net,
291  					      const struct in6_addr *group,
292  					      int ifindex)
293  {
294  	struct net_device *dev = NULL;
295  	struct inet6_dev *idev = NULL;
296  
297  	if (ifindex == 0) {
298  		struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
299  
300  		if (rt) {
301  			dev = rt->dst.dev;
302  			ip6_rt_put(rt);
303  		}
304  	} else {
305  		dev = __dev_get_by_index(net, ifindex);
306  	}
307  
308  	if (!dev)
309  		return NULL;
310  	idev = __in6_dev_get(dev);
311  	if (!idev)
312  		return NULL;
313  	if (idev->dead)
314  		return NULL;
315  	return idev;
316  }
317  
__ipv6_sock_mc_close(struct sock * sk)318  void __ipv6_sock_mc_close(struct sock *sk)
319  {
320  	struct ipv6_pinfo *np = inet6_sk(sk);
321  	struct ipv6_mc_socklist *mc_lst;
322  	struct net *net = sock_net(sk);
323  
324  	ASSERT_RTNL();
325  
326  	while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
327  		struct net_device *dev;
328  
329  		np->ipv6_mc_list = mc_lst->next;
330  
331  		dev = __dev_get_by_index(net, mc_lst->ifindex);
332  		if (dev) {
333  			struct inet6_dev *idev = __in6_dev_get(dev);
334  
335  			ip6_mc_leave_src(sk, mc_lst, idev);
336  			if (idev)
337  				__ipv6_dev_mc_dec(idev, &mc_lst->addr);
338  		} else {
339  			ip6_mc_leave_src(sk, mc_lst, NULL);
340  		}
341  
342  		atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
343  		kfree_rcu(mc_lst, rcu);
344  	}
345  }
346  
ipv6_sock_mc_close(struct sock * sk)347  void ipv6_sock_mc_close(struct sock *sk)
348  {
349  	struct ipv6_pinfo *np = inet6_sk(sk);
350  
351  	if (!rcu_access_pointer(np->ipv6_mc_list))
352  		return;
353  
354  	rtnl_lock();
355  	lock_sock(sk);
356  	__ipv6_sock_mc_close(sk);
357  	release_sock(sk);
358  	rtnl_unlock();
359  }
360  
ip6_mc_source(int add,int omode,struct sock * sk,struct group_source_req * pgsr)361  int ip6_mc_source(int add, int omode, struct sock *sk,
362  	struct group_source_req *pgsr)
363  {
364  	struct in6_addr *source, *group;
365  	struct ipv6_mc_socklist *pmc;
366  	struct inet6_dev *idev;
367  	struct ipv6_pinfo *inet6 = inet6_sk(sk);
368  	struct ip6_sf_socklist *psl;
369  	struct net *net = sock_net(sk);
370  	int i, j, rv;
371  	int leavegroup = 0;
372  	int err;
373  
374  	source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
375  	group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
376  
377  	if (!ipv6_addr_is_multicast(group))
378  		return -EINVAL;
379  
380  	idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface);
381  	if (!idev)
382  		return -ENODEV;
383  
384  	err = -EADDRNOTAVAIL;
385  
386  	mutex_lock(&idev->mc_lock);
387  	for_each_pmc_socklock(inet6, sk, pmc) {
388  		if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
389  			continue;
390  		if (ipv6_addr_equal(&pmc->addr, group))
391  			break;
392  	}
393  	if (!pmc) {		/* must have a prior join */
394  		err = -EINVAL;
395  		goto done;
396  	}
397  	/* if a source filter was set, must be the same mode as before */
398  	if (rcu_access_pointer(pmc->sflist)) {
399  		if (pmc->sfmode != omode) {
400  			err = -EINVAL;
401  			goto done;
402  		}
403  	} else if (pmc->sfmode != omode) {
404  		/* allow mode switches for empty-set filters */
405  		ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
406  		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
407  		pmc->sfmode = omode;
408  	}
409  
410  	psl = sock_dereference(pmc->sflist, sk);
411  	if (!add) {
412  		if (!psl)
413  			goto done;	/* err = -EADDRNOTAVAIL */
414  		rv = !0;
415  		for (i = 0; i < psl->sl_count; i++) {
416  			rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
417  			if (rv == 0)
418  				break;
419  		}
420  		if (rv)		/* source not found */
421  			goto done;	/* err = -EADDRNOTAVAIL */
422  
423  		/* special case - (INCLUDE, empty) == LEAVE_GROUP */
424  		if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
425  			leavegroup = 1;
426  			goto done;
427  		}
428  
429  		/* update the interface filter */
430  		ip6_mc_del_src(idev, group, omode, 1, source, 1);
431  
432  		for (j = i+1; j < psl->sl_count; j++)
433  			psl->sl_addr[j-1] = psl->sl_addr[j];
434  		psl->sl_count--;
435  		err = 0;
436  		goto done;
437  	}
438  	/* else, add a new source to the filter */
439  
440  	if (psl && psl->sl_count >= sysctl_mld_max_msf) {
441  		err = -ENOBUFS;
442  		goto done;
443  	}
444  	if (!psl || psl->sl_count == psl->sl_max) {
445  		struct ip6_sf_socklist *newpsl;
446  		int count = IP6_SFBLOCK;
447  
448  		if (psl)
449  			count += psl->sl_max;
450  		newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count),
451  				      GFP_KERNEL);
452  		if (!newpsl) {
453  			err = -ENOBUFS;
454  			goto done;
455  		}
456  		newpsl->sl_max = count;
457  		newpsl->sl_count = count - IP6_SFBLOCK;
458  		if (psl) {
459  			for (i = 0; i < psl->sl_count; i++)
460  				newpsl->sl_addr[i] = psl->sl_addr[i];
461  			atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
462  				   &sk->sk_omem_alloc);
463  		}
464  		rcu_assign_pointer(pmc->sflist, newpsl);
465  		kfree_rcu(psl, rcu);
466  		psl = newpsl;
467  	}
468  	rv = 1;	/* > 0 for insert logic below if sl_count is 0 */
469  	for (i = 0; i < psl->sl_count; i++) {
470  		rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
471  		if (rv == 0) /* There is an error in the address. */
472  			goto done;
473  	}
474  	for (j = psl->sl_count-1; j >= i; j--)
475  		psl->sl_addr[j+1] = psl->sl_addr[j];
476  	psl->sl_addr[i] = *source;
477  	psl->sl_count++;
478  	err = 0;
479  	/* update the interface list */
480  	ip6_mc_add_src(idev, group, omode, 1, source, 1);
481  done:
482  	mutex_unlock(&idev->mc_lock);
483  	if (leavegroup)
484  		err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
485  	return err;
486  }
487  
ip6_mc_msfilter(struct sock * sk,struct group_filter * gsf,struct sockaddr_storage * list)488  int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
489  		    struct sockaddr_storage *list)
490  {
491  	const struct in6_addr *group;
492  	struct ipv6_mc_socklist *pmc;
493  	struct inet6_dev *idev;
494  	struct ipv6_pinfo *inet6 = inet6_sk(sk);
495  	struct ip6_sf_socklist *newpsl, *psl;
496  	struct net *net = sock_net(sk);
497  	int leavegroup = 0;
498  	int i, err;
499  
500  	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
501  
502  	if (!ipv6_addr_is_multicast(group))
503  		return -EINVAL;
504  	if (gsf->gf_fmode != MCAST_INCLUDE &&
505  	    gsf->gf_fmode != MCAST_EXCLUDE)
506  		return -EINVAL;
507  
508  	idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface);
509  	if (!idev)
510  		return -ENODEV;
511  
512  	err = 0;
513  
514  	if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
515  		leavegroup = 1;
516  		goto done;
517  	}
518  
519  	for_each_pmc_socklock(inet6, sk, pmc) {
520  		if (pmc->ifindex != gsf->gf_interface)
521  			continue;
522  		if (ipv6_addr_equal(&pmc->addr, group))
523  			break;
524  	}
525  	if (!pmc) {		/* must have a prior join */
526  		err = -EINVAL;
527  		goto done;
528  	}
529  	if (gsf->gf_numsrc) {
530  		newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr,
531  						      gsf->gf_numsrc),
532  				      GFP_KERNEL);
533  		if (!newpsl) {
534  			err = -ENOBUFS;
535  			goto done;
536  		}
537  		newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
538  		for (i = 0; i < newpsl->sl_count; ++i, ++list) {
539  			struct sockaddr_in6 *psin6;
540  
541  			psin6 = (struct sockaddr_in6 *)list;
542  			newpsl->sl_addr[i] = psin6->sin6_addr;
543  		}
544  		mutex_lock(&idev->mc_lock);
545  		err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
546  				     newpsl->sl_count, newpsl->sl_addr, 0);
547  		if (err) {
548  			mutex_unlock(&idev->mc_lock);
549  			sock_kfree_s(sk, newpsl, struct_size(newpsl, sl_addr,
550  							     newpsl->sl_max));
551  			goto done;
552  		}
553  		mutex_unlock(&idev->mc_lock);
554  	} else {
555  		newpsl = NULL;
556  		mutex_lock(&idev->mc_lock);
557  		ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
558  		mutex_unlock(&idev->mc_lock);
559  	}
560  
561  	mutex_lock(&idev->mc_lock);
562  	psl = sock_dereference(pmc->sflist, sk);
563  	if (psl) {
564  		ip6_mc_del_src(idev, group, pmc->sfmode,
565  			       psl->sl_count, psl->sl_addr, 0);
566  		atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
567  			   &sk->sk_omem_alloc);
568  	} else {
569  		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
570  	}
571  	rcu_assign_pointer(pmc->sflist, newpsl);
572  	mutex_unlock(&idev->mc_lock);
573  	kfree_rcu(psl, rcu);
574  	pmc->sfmode = gsf->gf_fmode;
575  	err = 0;
576  done:
577  	if (leavegroup)
578  		err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
579  	return err;
580  }
581  
ip6_mc_msfget(struct sock * sk,struct group_filter * gsf,sockptr_t optval,size_t ss_offset)582  int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
583  		  sockptr_t optval, size_t ss_offset)
584  {
585  	struct ipv6_pinfo *inet6 = inet6_sk(sk);
586  	const struct in6_addr *group;
587  	struct ipv6_mc_socklist *pmc;
588  	struct ip6_sf_socklist *psl;
589  	unsigned int count;
590  	int i, copycount;
591  
592  	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
593  
594  	if (!ipv6_addr_is_multicast(group))
595  		return -EINVAL;
596  
597  	/* changes to the ipv6_mc_list require the socket lock and
598  	 * rtnl lock. We have the socket lock, so reading the list is safe.
599  	 */
600  
601  	for_each_pmc_socklock(inet6, sk, pmc) {
602  		if (pmc->ifindex != gsf->gf_interface)
603  			continue;
604  		if (ipv6_addr_equal(group, &pmc->addr))
605  			break;
606  	}
607  	if (!pmc)		/* must have a prior join */
608  		return -EADDRNOTAVAIL;
609  
610  	gsf->gf_fmode = pmc->sfmode;
611  	psl = sock_dereference(pmc->sflist, sk);
612  	count = psl ? psl->sl_count : 0;
613  
614  	copycount = min(count, gsf->gf_numsrc);
615  	gsf->gf_numsrc = count;
616  	for (i = 0; i < copycount; i++) {
617  		struct sockaddr_in6 *psin6;
618  		struct sockaddr_storage ss;
619  
620  		psin6 = (struct sockaddr_in6 *)&ss;
621  		memset(&ss, 0, sizeof(ss));
622  		psin6->sin6_family = AF_INET6;
623  		psin6->sin6_addr = psl->sl_addr[i];
624  		if (copy_to_sockptr_offset(optval, ss_offset, &ss, sizeof(ss)))
625  			return -EFAULT;
626  		ss_offset += sizeof(ss);
627  	}
628  	return 0;
629  }
630  
inet6_mc_check(const struct sock * sk,const struct in6_addr * mc_addr,const struct in6_addr * src_addr)631  bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
632  		    const struct in6_addr *src_addr)
633  {
634  	const struct ipv6_pinfo *np = inet6_sk(sk);
635  	const struct ipv6_mc_socklist *mc;
636  	const struct ip6_sf_socklist *psl;
637  	bool rv = true;
638  
639  	rcu_read_lock();
640  	for_each_pmc_rcu(np, mc) {
641  		if (ipv6_addr_equal(&mc->addr, mc_addr))
642  			break;
643  	}
644  	if (!mc) {
645  		rcu_read_unlock();
646  		return inet6_test_bit(MC6_ALL, sk);
647  	}
648  	psl = rcu_dereference(mc->sflist);
649  	if (!psl) {
650  		rv = mc->sfmode == MCAST_EXCLUDE;
651  	} else {
652  		int i;
653  
654  		for (i = 0; i < psl->sl_count; i++) {
655  			if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
656  				break;
657  		}
658  		if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
659  			rv = false;
660  		if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
661  			rv = false;
662  	}
663  	rcu_read_unlock();
664  
665  	return rv;
666  }
667  
668  /* called with mc_lock */
igmp6_group_added(struct ifmcaddr6 * mc)669  static void igmp6_group_added(struct ifmcaddr6 *mc)
670  {
671  	struct net_device *dev = mc->idev->dev;
672  	char buf[MAX_ADDR_LEN];
673  
674  	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
675  	    IPV6_ADDR_SCOPE_LINKLOCAL)
676  		return;
677  
678  	if (!(mc->mca_flags&MAF_LOADED)) {
679  		mc->mca_flags |= MAF_LOADED;
680  		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
681  			dev_mc_add(dev, buf);
682  	}
683  
684  	if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
685  		return;
686  
687  	if (mld_in_v1_mode(mc->idev)) {
688  		igmp6_join_group(mc);
689  		return;
690  	}
691  	/* else v2 */
692  
693  	/* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
694  	 * should not send filter-mode change record as the mode
695  	 * should be from IN() to IN(A).
696  	 */
697  	if (mc->mca_sfmode == MCAST_EXCLUDE)
698  		mc->mca_crcount = mc->idev->mc_qrv;
699  
700  	mld_ifc_event(mc->idev);
701  }
702  
703  /* called with mc_lock */
igmp6_group_dropped(struct ifmcaddr6 * mc)704  static void igmp6_group_dropped(struct ifmcaddr6 *mc)
705  {
706  	struct net_device *dev = mc->idev->dev;
707  	char buf[MAX_ADDR_LEN];
708  
709  	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
710  	    IPV6_ADDR_SCOPE_LINKLOCAL)
711  		return;
712  
713  	if (mc->mca_flags&MAF_LOADED) {
714  		mc->mca_flags &= ~MAF_LOADED;
715  		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
716  			dev_mc_del(dev, buf);
717  	}
718  
719  	if (mc->mca_flags & MAF_NOREPORT)
720  		return;
721  
722  	if (!mc->idev->dead)
723  		igmp6_leave_group(mc);
724  
725  	if (cancel_delayed_work(&mc->mca_work))
726  		refcount_dec(&mc->mca_refcnt);
727  }
728  
729  /*
730   * deleted ifmcaddr6 manipulation
731   * called with mc_lock
732   */
mld_add_delrec(struct inet6_dev * idev,struct ifmcaddr6 * im)733  static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
734  {
735  	struct ifmcaddr6 *pmc;
736  
737  	/* this is an "ifmcaddr6" for convenience; only the fields below
738  	 * are actually used. In particular, the refcnt and users are not
739  	 * used for management of the delete list. Using the same structure
740  	 * for deleted items allows change reports to use common code with
741  	 * non-deleted or query-response MCA's.
742  	 */
743  	pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
744  	if (!pmc)
745  		return;
746  
747  	pmc->idev = im->idev;
748  	in6_dev_hold(idev);
749  	pmc->mca_addr = im->mca_addr;
750  	pmc->mca_crcount = idev->mc_qrv;
751  	pmc->mca_sfmode = im->mca_sfmode;
752  	if (pmc->mca_sfmode == MCAST_INCLUDE) {
753  		struct ip6_sf_list *psf;
754  
755  		rcu_assign_pointer(pmc->mca_tomb,
756  				   mc_dereference(im->mca_tomb, idev));
757  		rcu_assign_pointer(pmc->mca_sources,
758  				   mc_dereference(im->mca_sources, idev));
759  		RCU_INIT_POINTER(im->mca_tomb, NULL);
760  		RCU_INIT_POINTER(im->mca_sources, NULL);
761  
762  		for_each_psf_mclock(pmc, psf)
763  			psf->sf_crcount = pmc->mca_crcount;
764  	}
765  
766  	rcu_assign_pointer(pmc->next, idev->mc_tomb);
767  	rcu_assign_pointer(idev->mc_tomb, pmc);
768  }
769  
770  /* called with mc_lock */
mld_del_delrec(struct inet6_dev * idev,struct ifmcaddr6 * im)771  static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
772  {
773  	struct ip6_sf_list *psf, *sources, *tomb;
774  	struct in6_addr *pmca = &im->mca_addr;
775  	struct ifmcaddr6 *pmc, *pmc_prev;
776  
777  	pmc_prev = NULL;
778  	for_each_mc_tomb(idev, pmc) {
779  		if (ipv6_addr_equal(&pmc->mca_addr, pmca))
780  			break;
781  		pmc_prev = pmc;
782  	}
783  	if (pmc) {
784  		if (pmc_prev)
785  			rcu_assign_pointer(pmc_prev->next, pmc->next);
786  		else
787  			rcu_assign_pointer(idev->mc_tomb, pmc->next);
788  	}
789  
790  	if (pmc) {
791  		im->idev = pmc->idev;
792  		if (im->mca_sfmode == MCAST_INCLUDE) {
793  			tomb = rcu_replace_pointer(im->mca_tomb,
794  						   mc_dereference(pmc->mca_tomb, pmc->idev),
795  						   lockdep_is_held(&im->idev->mc_lock));
796  			rcu_assign_pointer(pmc->mca_tomb, tomb);
797  
798  			sources = rcu_replace_pointer(im->mca_sources,
799  						      mc_dereference(pmc->mca_sources, pmc->idev),
800  						      lockdep_is_held(&im->idev->mc_lock));
801  			rcu_assign_pointer(pmc->mca_sources, sources);
802  			for_each_psf_mclock(im, psf)
803  				psf->sf_crcount = idev->mc_qrv;
804  		} else {
805  			im->mca_crcount = idev->mc_qrv;
806  		}
807  		in6_dev_put(pmc->idev);
808  		ip6_mc_clear_src(pmc);
809  		kfree_rcu(pmc, rcu);
810  	}
811  }
812  
813  /* called with mc_lock */
mld_clear_delrec(struct inet6_dev * idev)814  static void mld_clear_delrec(struct inet6_dev *idev)
815  {
816  	struct ifmcaddr6 *pmc, *nextpmc;
817  
818  	pmc = mc_dereference(idev->mc_tomb, idev);
819  	RCU_INIT_POINTER(idev->mc_tomb, NULL);
820  
821  	for (; pmc; pmc = nextpmc) {
822  		nextpmc = mc_dereference(pmc->next, idev);
823  		ip6_mc_clear_src(pmc);
824  		in6_dev_put(pmc->idev);
825  		kfree_rcu(pmc, rcu);
826  	}
827  
828  	/* clear dead sources, too */
829  	for_each_mc_mclock(idev, pmc) {
830  		struct ip6_sf_list *psf, *psf_next;
831  
832  		psf = mc_dereference(pmc->mca_tomb, idev);
833  		RCU_INIT_POINTER(pmc->mca_tomb, NULL);
834  		for (; psf; psf = psf_next) {
835  			psf_next = mc_dereference(psf->sf_next, idev);
836  			kfree_rcu(psf, rcu);
837  		}
838  	}
839  }
840  
mld_clear_query(struct inet6_dev * idev)841  static void mld_clear_query(struct inet6_dev *idev)
842  {
843  	struct sk_buff *skb;
844  
845  	spin_lock_bh(&idev->mc_query_lock);
846  	while ((skb = __skb_dequeue(&idev->mc_query_queue)))
847  		kfree_skb(skb);
848  	spin_unlock_bh(&idev->mc_query_lock);
849  }
850  
mld_clear_report(struct inet6_dev * idev)851  static void mld_clear_report(struct inet6_dev *idev)
852  {
853  	struct sk_buff *skb;
854  
855  	spin_lock_bh(&idev->mc_report_lock);
856  	while ((skb = __skb_dequeue(&idev->mc_report_queue)))
857  		kfree_skb(skb);
858  	spin_unlock_bh(&idev->mc_report_lock);
859  }
860  
mca_get(struct ifmcaddr6 * mc)861  static void mca_get(struct ifmcaddr6 *mc)
862  {
863  	refcount_inc(&mc->mca_refcnt);
864  }
865  
ma_put(struct ifmcaddr6 * mc)866  static void ma_put(struct ifmcaddr6 *mc)
867  {
868  	if (refcount_dec_and_test(&mc->mca_refcnt)) {
869  		in6_dev_put(mc->idev);
870  		kfree_rcu(mc, rcu);
871  	}
872  }
873  
874  /* called with mc_lock */
mca_alloc(struct inet6_dev * idev,const struct in6_addr * addr,unsigned int mode)875  static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
876  				   const struct in6_addr *addr,
877  				   unsigned int mode)
878  {
879  	struct ifmcaddr6 *mc;
880  
881  	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
882  	if (!mc)
883  		return NULL;
884  
885  	INIT_DELAYED_WORK(&mc->mca_work, mld_mca_work);
886  
887  	mc->mca_addr = *addr;
888  	mc->idev = idev; /* reference taken by caller */
889  	mc->mca_users = 1;
890  	/* mca_stamp should be updated upon changes */
891  	mc->mca_cstamp = mc->mca_tstamp = jiffies;
892  	refcount_set(&mc->mca_refcnt, 1);
893  
894  	mc->mca_sfmode = mode;
895  	mc->mca_sfcount[mode] = 1;
896  
897  	if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
898  	    IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
899  		mc->mca_flags |= MAF_NOREPORT;
900  
901  	return mc;
902  }
903  
904  /*
905   *	device multicast group inc (add if not found)
906   */
__ipv6_dev_mc_inc(struct net_device * dev,const struct in6_addr * addr,unsigned int mode)907  static int __ipv6_dev_mc_inc(struct net_device *dev,
908  			     const struct in6_addr *addr, unsigned int mode)
909  {
910  	struct ifmcaddr6 *mc;
911  	struct inet6_dev *idev;
912  
913  	ASSERT_RTNL();
914  
915  	/* we need to take a reference on idev */
916  	idev = in6_dev_get(dev);
917  
918  	if (!idev)
919  		return -EINVAL;
920  
921  	if (idev->dead) {
922  		in6_dev_put(idev);
923  		return -ENODEV;
924  	}
925  
926  	mutex_lock(&idev->mc_lock);
927  	for_each_mc_mclock(idev, mc) {
928  		if (ipv6_addr_equal(&mc->mca_addr, addr)) {
929  			mc->mca_users++;
930  			ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
931  			mutex_unlock(&idev->mc_lock);
932  			in6_dev_put(idev);
933  			return 0;
934  		}
935  	}
936  
937  	mc = mca_alloc(idev, addr, mode);
938  	if (!mc) {
939  		mutex_unlock(&idev->mc_lock);
940  		in6_dev_put(idev);
941  		return -ENOMEM;
942  	}
943  
944  	rcu_assign_pointer(mc->next, idev->mc_list);
945  	rcu_assign_pointer(idev->mc_list, mc);
946  
947  	mca_get(mc);
948  
949  	mld_del_delrec(idev, mc);
950  	igmp6_group_added(mc);
951  	mutex_unlock(&idev->mc_lock);
952  	ma_put(mc);
953  	return 0;
954  }
955  
ipv6_dev_mc_inc(struct net_device * dev,const struct in6_addr * addr)956  int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
957  {
958  	return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
959  }
960  EXPORT_SYMBOL(ipv6_dev_mc_inc);
961  
962  /*
963   * device multicast group del
964   */
__ipv6_dev_mc_dec(struct inet6_dev * idev,const struct in6_addr * addr)965  int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
966  {
967  	struct ifmcaddr6 *ma, __rcu **map;
968  
969  	ASSERT_RTNL();
970  
971  	mutex_lock(&idev->mc_lock);
972  	for (map = &idev->mc_list;
973  	     (ma = mc_dereference(*map, idev));
974  	     map = &ma->next) {
975  		if (ipv6_addr_equal(&ma->mca_addr, addr)) {
976  			if (--ma->mca_users == 0) {
977  				*map = ma->next;
978  
979  				igmp6_group_dropped(ma);
980  				ip6_mc_clear_src(ma);
981  				mutex_unlock(&idev->mc_lock);
982  
983  				ma_put(ma);
984  				return 0;
985  			}
986  			mutex_unlock(&idev->mc_lock);
987  			return 0;
988  		}
989  	}
990  
991  	mutex_unlock(&idev->mc_lock);
992  	return -ENOENT;
993  }
994  
ipv6_dev_mc_dec(struct net_device * dev,const struct in6_addr * addr)995  int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
996  {
997  	struct inet6_dev *idev;
998  	int err;
999  
1000  	ASSERT_RTNL();
1001  
1002  	idev = __in6_dev_get(dev);
1003  	if (!idev)
1004  		err = -ENODEV;
1005  	else
1006  		err = __ipv6_dev_mc_dec(idev, addr);
1007  
1008  	return err;
1009  }
1010  EXPORT_SYMBOL(ipv6_dev_mc_dec);
1011  
1012  /*
1013   *	check if the interface/address pair is valid
1014   */
ipv6_chk_mcast_addr(struct net_device * dev,const struct in6_addr * group,const struct in6_addr * src_addr)1015  bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
1016  			 const struct in6_addr *src_addr)
1017  {
1018  	struct inet6_dev *idev;
1019  	struct ifmcaddr6 *mc;
1020  	bool rv = false;
1021  
1022  	rcu_read_lock();
1023  	idev = __in6_dev_get(dev);
1024  	if (idev) {
1025  		for_each_mc_rcu(idev, mc) {
1026  			if (ipv6_addr_equal(&mc->mca_addr, group))
1027  				break;
1028  		}
1029  		if (mc) {
1030  			if (src_addr && !ipv6_addr_any(src_addr)) {
1031  				struct ip6_sf_list *psf;
1032  
1033  				for_each_psf_rcu(mc, psf) {
1034  					if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1035  						break;
1036  				}
1037  				if (psf)
1038  					rv = psf->sf_count[MCAST_INCLUDE] ||
1039  						psf->sf_count[MCAST_EXCLUDE] !=
1040  						mc->mca_sfcount[MCAST_EXCLUDE];
1041  				else
1042  					rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
1043  			} else
1044  				rv = true; /* don't filter unspecified source */
1045  		}
1046  	}
1047  	rcu_read_unlock();
1048  	return rv;
1049  }
1050  
1051  /* called with mc_lock */
mld_gq_start_work(struct inet6_dev * idev)1052  static void mld_gq_start_work(struct inet6_dev *idev)
1053  {
1054  	unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
1055  
1056  	idev->mc_gq_running = 1;
1057  	if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
1058  		in6_dev_hold(idev);
1059  }
1060  
1061  /* called with mc_lock */
mld_gq_stop_work(struct inet6_dev * idev)1062  static void mld_gq_stop_work(struct inet6_dev *idev)
1063  {
1064  	idev->mc_gq_running = 0;
1065  	if (cancel_delayed_work(&idev->mc_gq_work))
1066  		__in6_dev_put(idev);
1067  }
1068  
1069  /* called with mc_lock */
mld_ifc_start_work(struct inet6_dev * idev,unsigned long delay)1070  static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1071  {
1072  	unsigned long tv = get_random_u32_below(delay);
1073  
1074  	if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
1075  		in6_dev_hold(idev);
1076  }
1077  
1078  /* called with mc_lock */
mld_ifc_stop_work(struct inet6_dev * idev)1079  static void mld_ifc_stop_work(struct inet6_dev *idev)
1080  {
1081  	idev->mc_ifc_count = 0;
1082  	if (cancel_delayed_work(&idev->mc_ifc_work))
1083  		__in6_dev_put(idev);
1084  }
1085  
1086  /* called with mc_lock */
mld_dad_start_work(struct inet6_dev * idev,unsigned long delay)1087  static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1088  {
1089  	unsigned long tv = get_random_u32_below(delay);
1090  
1091  	if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
1092  		in6_dev_hold(idev);
1093  }
1094  
mld_dad_stop_work(struct inet6_dev * idev)1095  static void mld_dad_stop_work(struct inet6_dev *idev)
1096  {
1097  	if (cancel_delayed_work(&idev->mc_dad_work))
1098  		__in6_dev_put(idev);
1099  }
1100  
mld_query_stop_work(struct inet6_dev * idev)1101  static void mld_query_stop_work(struct inet6_dev *idev)
1102  {
1103  	spin_lock_bh(&idev->mc_query_lock);
1104  	if (cancel_delayed_work(&idev->mc_query_work))
1105  		__in6_dev_put(idev);
1106  	spin_unlock_bh(&idev->mc_query_lock);
1107  }
1108  
mld_report_stop_work(struct inet6_dev * idev)1109  static void mld_report_stop_work(struct inet6_dev *idev)
1110  {
1111  	if (cancel_delayed_work_sync(&idev->mc_report_work))
1112  		__in6_dev_put(idev);
1113  }
1114  
1115  /*
1116   * IGMP handling (alias multicast ICMPv6 messages)
1117   * called with mc_lock
1118   */
igmp6_group_queried(struct ifmcaddr6 * ma,unsigned long resptime)1119  static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1120  {
1121  	unsigned long delay = resptime;
1122  
1123  	/* Do not start work for these addresses */
1124  	if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1125  	    IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1126  		return;
1127  
1128  	if (cancel_delayed_work(&ma->mca_work)) {
1129  		refcount_dec(&ma->mca_refcnt);
1130  		delay = ma->mca_work.timer.expires - jiffies;
1131  	}
1132  
1133  	if (delay >= resptime)
1134  		delay = get_random_u32_below(resptime);
1135  
1136  	if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
1137  		refcount_inc(&ma->mca_refcnt);
1138  	ma->mca_flags |= MAF_TIMER_RUNNING;
1139  }
1140  
1141  /* mark EXCLUDE-mode sources
1142   * called with mc_lock
1143   */
mld_xmarksources(struct ifmcaddr6 * pmc,int nsrcs,const struct in6_addr * srcs)1144  static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1145  			     const struct in6_addr *srcs)
1146  {
1147  	struct ip6_sf_list *psf;
1148  	int i, scount;
1149  
1150  	scount = 0;
1151  	for_each_psf_mclock(pmc, psf) {
1152  		if (scount == nsrcs)
1153  			break;
1154  		for (i = 0; i < nsrcs; i++) {
1155  			/* skip inactive filters */
1156  			if (psf->sf_count[MCAST_INCLUDE] ||
1157  			    pmc->mca_sfcount[MCAST_EXCLUDE] !=
1158  			    psf->sf_count[MCAST_EXCLUDE])
1159  				break;
1160  			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1161  				scount++;
1162  				break;
1163  			}
1164  		}
1165  	}
1166  	pmc->mca_flags &= ~MAF_GSQUERY;
1167  	if (scount == nsrcs)	/* all sources excluded */
1168  		return false;
1169  	return true;
1170  }
1171  
1172  /* called with mc_lock */
mld_marksources(struct ifmcaddr6 * pmc,int nsrcs,const struct in6_addr * srcs)1173  static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1174  			    const struct in6_addr *srcs)
1175  {
1176  	struct ip6_sf_list *psf;
1177  	int i, scount;
1178  
1179  	if (pmc->mca_sfmode == MCAST_EXCLUDE)
1180  		return mld_xmarksources(pmc, nsrcs, srcs);
1181  
1182  	/* mark INCLUDE-mode sources */
1183  
1184  	scount = 0;
1185  	for_each_psf_mclock(pmc, psf) {
1186  		if (scount == nsrcs)
1187  			break;
1188  		for (i = 0; i < nsrcs; i++) {
1189  			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1190  				psf->sf_gsresp = 1;
1191  				scount++;
1192  				break;
1193  			}
1194  		}
1195  	}
1196  	if (!scount) {
1197  		pmc->mca_flags &= ~MAF_GSQUERY;
1198  		return false;
1199  	}
1200  	pmc->mca_flags |= MAF_GSQUERY;
1201  	return true;
1202  }
1203  
mld_force_mld_version(const struct inet6_dev * idev)1204  static int mld_force_mld_version(const struct inet6_dev *idev)
1205  {
1206  	const struct net *net = dev_net(idev->dev);
1207  	int all_force;
1208  
1209  	all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
1210  	/* Normally, both are 0 here. If enforcement to a particular is
1211  	 * being used, individual device enforcement will have a lower
1212  	 * precedence over 'all' device (.../conf/all/force_mld_version).
1213  	 */
1214  	return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
1215  }
1216  
mld_in_v2_mode_only(const struct inet6_dev * idev)1217  static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1218  {
1219  	return mld_force_mld_version(idev) == 2;
1220  }
1221  
mld_in_v1_mode_only(const struct inet6_dev * idev)1222  static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1223  {
1224  	return mld_force_mld_version(idev) == 1;
1225  }
1226  
mld_in_v1_mode(const struct inet6_dev * idev)1227  static bool mld_in_v1_mode(const struct inet6_dev *idev)
1228  {
1229  	if (mld_in_v2_mode_only(idev))
1230  		return false;
1231  	if (mld_in_v1_mode_only(idev))
1232  		return true;
1233  	if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1234  		return true;
1235  
1236  	return false;
1237  }
1238  
mld_set_v1_mode(struct inet6_dev * idev)1239  static void mld_set_v1_mode(struct inet6_dev *idev)
1240  {
1241  	/* RFC3810, relevant sections:
1242  	 *  - 9.1. Robustness Variable
1243  	 *  - 9.2. Query Interval
1244  	 *  - 9.3. Query Response Interval
1245  	 *  - 9.12. Older Version Querier Present Timeout
1246  	 */
1247  	unsigned long switchback;
1248  
1249  	switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1250  
1251  	idev->mc_v1_seen = jiffies + switchback;
1252  }
1253  
mld_update_qrv(struct inet6_dev * idev,const struct mld2_query * mlh2)1254  static void mld_update_qrv(struct inet6_dev *idev,
1255  			   const struct mld2_query *mlh2)
1256  {
1257  	/* RFC3810, relevant sections:
1258  	 *  - 5.1.8. QRV (Querier's Robustness Variable)
1259  	 *  - 9.1. Robustness Variable
1260  	 */
1261  
1262  	/* The value of the Robustness Variable MUST NOT be zero,
1263  	 * and SHOULD NOT be one. Catch this here if we ever run
1264  	 * into such a case in future.
1265  	 */
1266  	const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1267  	WARN_ON(idev->mc_qrv == 0);
1268  
1269  	if (mlh2->mld2q_qrv > 0)
1270  		idev->mc_qrv = mlh2->mld2q_qrv;
1271  
1272  	if (unlikely(idev->mc_qrv < min_qrv)) {
1273  		net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1274  				     idev->mc_qrv, min_qrv);
1275  		idev->mc_qrv = min_qrv;
1276  	}
1277  }
1278  
mld_update_qi(struct inet6_dev * idev,const struct mld2_query * mlh2)1279  static void mld_update_qi(struct inet6_dev *idev,
1280  			  const struct mld2_query *mlh2)
1281  {
1282  	/* RFC3810, relevant sections:
1283  	 *  - 5.1.9. QQIC (Querier's Query Interval Code)
1284  	 *  - 9.2. Query Interval
1285  	 *  - 9.12. Older Version Querier Present Timeout
1286  	 *    (the [Query Interval] in the last Query received)
1287  	 */
1288  	unsigned long mc_qqi;
1289  
1290  	if (mlh2->mld2q_qqic < 128) {
1291  		mc_qqi = mlh2->mld2q_qqic;
1292  	} else {
1293  		unsigned long mc_man, mc_exp;
1294  
1295  		mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1296  		mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1297  
1298  		mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1299  	}
1300  
1301  	idev->mc_qi = mc_qqi * HZ;
1302  }
1303  
mld_update_qri(struct inet6_dev * idev,const struct mld2_query * mlh2)1304  static void mld_update_qri(struct inet6_dev *idev,
1305  			   const struct mld2_query *mlh2)
1306  {
1307  	/* RFC3810, relevant sections:
1308  	 *  - 5.1.3. Maximum Response Code
1309  	 *  - 9.3. Query Response Interval
1310  	 */
1311  	idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1312  }
1313  
mld_process_v1(struct inet6_dev * idev,struct mld_msg * mld,unsigned long * max_delay,bool v1_query)1314  static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1315  			  unsigned long *max_delay, bool v1_query)
1316  {
1317  	unsigned long mldv1_md;
1318  
1319  	/* Ignore v1 queries */
1320  	if (mld_in_v2_mode_only(idev))
1321  		return -EINVAL;
1322  
1323  	mldv1_md = ntohs(mld->mld_maxdelay);
1324  
1325  	/* When in MLDv1 fallback and a MLDv2 router start-up being
1326  	 * unaware of current MLDv1 operation, the MRC == MRD mapping
1327  	 * only works when the exponential algorithm is not being
1328  	 * used (as MLDv1 is unaware of such things).
1329  	 *
1330  	 * According to the RFC author, the MLDv2 implementations
1331  	 * he's aware of all use a MRC < 32768 on start up queries.
1332  	 *
1333  	 * Thus, should we *ever* encounter something else larger
1334  	 * than that, just assume the maximum possible within our
1335  	 * reach.
1336  	 */
1337  	if (!v1_query)
1338  		mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1339  
1340  	*max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1341  
1342  	/* MLDv1 router present: we need to go into v1 mode *only*
1343  	 * when an MLDv1 query is received as per section 9.12. of
1344  	 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1345  	 * queries MUST be of exactly 24 octets.
1346  	 */
1347  	if (v1_query)
1348  		mld_set_v1_mode(idev);
1349  
1350  	/* cancel MLDv2 report work */
1351  	mld_gq_stop_work(idev);
1352  	/* cancel the interface change work */
1353  	mld_ifc_stop_work(idev);
1354  	/* clear deleted report items */
1355  	mld_clear_delrec(idev);
1356  
1357  	return 0;
1358  }
1359  
mld_process_v2(struct inet6_dev * idev,struct mld2_query * mld,unsigned long * max_delay)1360  static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1361  			   unsigned long *max_delay)
1362  {
1363  	*max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1364  
1365  	mld_update_qrv(idev, mld);
1366  	mld_update_qi(idev, mld);
1367  	mld_update_qri(idev, mld);
1368  
1369  	idev->mc_maxdelay = *max_delay;
1370  
1371  	return;
1372  }
1373  
1374  /* called with rcu_read_lock() */
igmp6_event_query(struct sk_buff * skb)1375  void igmp6_event_query(struct sk_buff *skb)
1376  {
1377  	struct inet6_dev *idev = __in6_dev_get(skb->dev);
1378  
1379  	if (!idev || idev->dead)
1380  		goto out;
1381  
1382  	spin_lock_bh(&idev->mc_query_lock);
1383  	if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
1384  		__skb_queue_tail(&idev->mc_query_queue, skb);
1385  		if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
1386  			in6_dev_hold(idev);
1387  		skb = NULL;
1388  	}
1389  	spin_unlock_bh(&idev->mc_query_lock);
1390  out:
1391  	kfree_skb(skb);
1392  }
1393  
__mld_query_work(struct sk_buff * skb)1394  static void __mld_query_work(struct sk_buff *skb)
1395  {
1396  	struct mld2_query *mlh2 = NULL;
1397  	const struct in6_addr *group;
1398  	unsigned long max_delay;
1399  	struct inet6_dev *idev;
1400  	struct ifmcaddr6 *ma;
1401  	struct mld_msg *mld;
1402  	int group_type;
1403  	int mark = 0;
1404  	int len, err;
1405  
1406  	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1407  		goto kfree_skb;
1408  
1409  	/* compute payload length excluding extension headers */
1410  	len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1411  	len -= skb_network_header_len(skb);
1412  
1413  	/* RFC3810 6.2
1414  	 * Upon reception of an MLD message that contains a Query, the node
1415  	 * checks if the source address of the message is a valid link-local
1416  	 * address, if the Hop Limit is set to 1, and if the Router Alert
1417  	 * option is present in the Hop-By-Hop Options header of the IPv6
1418  	 * packet.  If any of these checks fails, the packet is dropped.
1419  	 */
1420  	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1421  	    ipv6_hdr(skb)->hop_limit != 1 ||
1422  	    !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1423  	    IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1424  		goto kfree_skb;
1425  
1426  	idev = in6_dev_get(skb->dev);
1427  	if (!idev)
1428  		goto kfree_skb;
1429  
1430  	mld = (struct mld_msg *)icmp6_hdr(skb);
1431  	group = &mld->mld_mca;
1432  	group_type = ipv6_addr_type(group);
1433  
1434  	if (group_type != IPV6_ADDR_ANY &&
1435  	    !(group_type&IPV6_ADDR_MULTICAST))
1436  		goto out;
1437  
1438  	if (len < MLD_V1_QUERY_LEN) {
1439  		goto out;
1440  	} else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1441  		err = mld_process_v1(idev, mld, &max_delay,
1442  				     len == MLD_V1_QUERY_LEN);
1443  		if (err < 0)
1444  			goto out;
1445  	} else if (len >= MLD_V2_QUERY_LEN_MIN) {
1446  		int srcs_offset = sizeof(struct mld2_query) -
1447  				  sizeof(struct icmp6hdr);
1448  
1449  		if (!pskb_may_pull(skb, srcs_offset))
1450  			goto out;
1451  
1452  		mlh2 = (struct mld2_query *)skb_transport_header(skb);
1453  
1454  		mld_process_v2(idev, mlh2, &max_delay);
1455  
1456  		if (group_type == IPV6_ADDR_ANY) { /* general query */
1457  			if (mlh2->mld2q_nsrcs)
1458  				goto out; /* no sources allowed */
1459  
1460  			mld_gq_start_work(idev);
1461  			goto out;
1462  		}
1463  		/* mark sources to include, if group & source-specific */
1464  		if (mlh2->mld2q_nsrcs != 0) {
1465  			if (!pskb_may_pull(skb, srcs_offset +
1466  			    ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1467  				goto out;
1468  
1469  			mlh2 = (struct mld2_query *)skb_transport_header(skb);
1470  			mark = 1;
1471  		}
1472  	} else {
1473  		goto out;
1474  	}
1475  
1476  	if (group_type == IPV6_ADDR_ANY) {
1477  		for_each_mc_mclock(idev, ma) {
1478  			igmp6_group_queried(ma, max_delay);
1479  		}
1480  	} else {
1481  		for_each_mc_mclock(idev, ma) {
1482  			if (!ipv6_addr_equal(group, &ma->mca_addr))
1483  				continue;
1484  			if (ma->mca_flags & MAF_TIMER_RUNNING) {
1485  				/* gsquery <- gsquery && mark */
1486  				if (!mark)
1487  					ma->mca_flags &= ~MAF_GSQUERY;
1488  			} else {
1489  				/* gsquery <- mark */
1490  				if (mark)
1491  					ma->mca_flags |= MAF_GSQUERY;
1492  				else
1493  					ma->mca_flags &= ~MAF_GSQUERY;
1494  			}
1495  			if (!(ma->mca_flags & MAF_GSQUERY) ||
1496  			    mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1497  				igmp6_group_queried(ma, max_delay);
1498  			break;
1499  		}
1500  	}
1501  
1502  out:
1503  	in6_dev_put(idev);
1504  kfree_skb:
1505  	consume_skb(skb);
1506  }
1507  
mld_query_work(struct work_struct * work)1508  static void mld_query_work(struct work_struct *work)
1509  {
1510  	struct inet6_dev *idev = container_of(to_delayed_work(work),
1511  					      struct inet6_dev,
1512  					      mc_query_work);
1513  	struct sk_buff_head q;
1514  	struct sk_buff *skb;
1515  	bool rework = false;
1516  	int cnt = 0;
1517  
1518  	skb_queue_head_init(&q);
1519  
1520  	spin_lock_bh(&idev->mc_query_lock);
1521  	while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
1522  		__skb_queue_tail(&q, skb);
1523  
1524  		if (++cnt >= MLD_MAX_QUEUE) {
1525  			rework = true;
1526  			break;
1527  		}
1528  	}
1529  	spin_unlock_bh(&idev->mc_query_lock);
1530  
1531  	mutex_lock(&idev->mc_lock);
1532  	while ((skb = __skb_dequeue(&q)))
1533  		__mld_query_work(skb);
1534  	mutex_unlock(&idev->mc_lock);
1535  
1536  	if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
1537  		return;
1538  
1539  	in6_dev_put(idev);
1540  }
1541  
1542  /* called with rcu_read_lock() */
igmp6_event_report(struct sk_buff * skb)1543  void igmp6_event_report(struct sk_buff *skb)
1544  {
1545  	struct inet6_dev *idev = __in6_dev_get(skb->dev);
1546  
1547  	if (!idev || idev->dead)
1548  		goto out;
1549  
1550  	spin_lock_bh(&idev->mc_report_lock);
1551  	if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
1552  		__skb_queue_tail(&idev->mc_report_queue, skb);
1553  		if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
1554  			in6_dev_hold(idev);
1555  		skb = NULL;
1556  	}
1557  	spin_unlock_bh(&idev->mc_report_lock);
1558  out:
1559  	kfree_skb(skb);
1560  }
1561  
__mld_report_work(struct sk_buff * skb)1562  static void __mld_report_work(struct sk_buff *skb)
1563  {
1564  	struct inet6_dev *idev;
1565  	struct ifmcaddr6 *ma;
1566  	struct mld_msg *mld;
1567  	int addr_type;
1568  
1569  	/* Our own report looped back. Ignore it. */
1570  	if (skb->pkt_type == PACKET_LOOPBACK)
1571  		goto kfree_skb;
1572  
1573  	/* send our report if the MC router may not have heard this report */
1574  	if (skb->pkt_type != PACKET_MULTICAST &&
1575  	    skb->pkt_type != PACKET_BROADCAST)
1576  		goto kfree_skb;
1577  
1578  	if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1579  		goto kfree_skb;
1580  
1581  	mld = (struct mld_msg *)icmp6_hdr(skb);
1582  
1583  	/* Drop reports with not link local source */
1584  	addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1585  	if (addr_type != IPV6_ADDR_ANY &&
1586  	    !(addr_type&IPV6_ADDR_LINKLOCAL))
1587  		goto kfree_skb;
1588  
1589  	idev = in6_dev_get(skb->dev);
1590  	if (!idev)
1591  		goto kfree_skb;
1592  
1593  	/*
1594  	 *	Cancel the work for this group
1595  	 */
1596  
1597  	for_each_mc_mclock(idev, ma) {
1598  		if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1599  			if (cancel_delayed_work(&ma->mca_work))
1600  				refcount_dec(&ma->mca_refcnt);
1601  			ma->mca_flags &= ~(MAF_LAST_REPORTER |
1602  					   MAF_TIMER_RUNNING);
1603  			break;
1604  		}
1605  	}
1606  
1607  	in6_dev_put(idev);
1608  kfree_skb:
1609  	consume_skb(skb);
1610  }
1611  
mld_report_work(struct work_struct * work)1612  static void mld_report_work(struct work_struct *work)
1613  {
1614  	struct inet6_dev *idev = container_of(to_delayed_work(work),
1615  					      struct inet6_dev,
1616  					      mc_report_work);
1617  	struct sk_buff_head q;
1618  	struct sk_buff *skb;
1619  	bool rework = false;
1620  	int cnt = 0;
1621  
1622  	skb_queue_head_init(&q);
1623  	spin_lock_bh(&idev->mc_report_lock);
1624  	while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
1625  		__skb_queue_tail(&q, skb);
1626  
1627  		if (++cnt >= MLD_MAX_QUEUE) {
1628  			rework = true;
1629  			break;
1630  		}
1631  	}
1632  	spin_unlock_bh(&idev->mc_report_lock);
1633  
1634  	mutex_lock(&idev->mc_lock);
1635  	while ((skb = __skb_dequeue(&q)))
1636  		__mld_report_work(skb);
1637  	mutex_unlock(&idev->mc_lock);
1638  
1639  	if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
1640  		return;
1641  
1642  	in6_dev_put(idev);
1643  }
1644  
is_in(struct ifmcaddr6 * pmc,struct ip6_sf_list * psf,int type,int gdeleted,int sdeleted)1645  static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1646  		  int gdeleted, int sdeleted)
1647  {
1648  	switch (type) {
1649  	case MLD2_MODE_IS_INCLUDE:
1650  	case MLD2_MODE_IS_EXCLUDE:
1651  		if (gdeleted || sdeleted)
1652  			return false;
1653  		if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1654  			if (pmc->mca_sfmode == MCAST_INCLUDE)
1655  				return true;
1656  			/* don't include if this source is excluded
1657  			 * in all filters
1658  			 */
1659  			if (psf->sf_count[MCAST_INCLUDE])
1660  				return type == MLD2_MODE_IS_INCLUDE;
1661  			return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1662  				psf->sf_count[MCAST_EXCLUDE];
1663  		}
1664  		return false;
1665  	case MLD2_CHANGE_TO_INCLUDE:
1666  		if (gdeleted || sdeleted)
1667  			return false;
1668  		return psf->sf_count[MCAST_INCLUDE] != 0;
1669  	case MLD2_CHANGE_TO_EXCLUDE:
1670  		if (gdeleted || sdeleted)
1671  			return false;
1672  		if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1673  		    psf->sf_count[MCAST_INCLUDE])
1674  			return false;
1675  		return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1676  			psf->sf_count[MCAST_EXCLUDE];
1677  	case MLD2_ALLOW_NEW_SOURCES:
1678  		if (gdeleted || !psf->sf_crcount)
1679  			return false;
1680  		return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1681  	case MLD2_BLOCK_OLD_SOURCES:
1682  		if (pmc->mca_sfmode == MCAST_INCLUDE)
1683  			return gdeleted || (psf->sf_crcount && sdeleted);
1684  		return psf->sf_crcount && !gdeleted && !sdeleted;
1685  	}
1686  	return false;
1687  }
1688  
1689  static int
mld_scount(struct ifmcaddr6 * pmc,int type,int gdeleted,int sdeleted)1690  mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1691  {
1692  	struct ip6_sf_list *psf;
1693  	int scount = 0;
1694  
1695  	for_each_psf_mclock(pmc, psf) {
1696  		if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1697  			continue;
1698  		scount++;
1699  	}
1700  	return scount;
1701  }
1702  
ip6_mc_hdr(const struct sock * sk,struct sk_buff * skb,struct net_device * dev,const struct in6_addr * saddr,const struct in6_addr * daddr,int proto,int len)1703  static void ip6_mc_hdr(const struct sock *sk, struct sk_buff *skb,
1704  		       struct net_device *dev, const struct in6_addr *saddr,
1705  		       const struct in6_addr *daddr, int proto, int len)
1706  {
1707  	struct ipv6hdr *hdr;
1708  
1709  	skb->protocol = htons(ETH_P_IPV6);
1710  	skb->dev = dev;
1711  
1712  	skb_reset_network_header(skb);
1713  	skb_put(skb, sizeof(struct ipv6hdr));
1714  	hdr = ipv6_hdr(skb);
1715  
1716  	ip6_flow_hdr(hdr, 0, 0);
1717  
1718  	hdr->payload_len = htons(len);
1719  	hdr->nexthdr = proto;
1720  	hdr->hop_limit = READ_ONCE(inet6_sk(sk)->hop_limit);
1721  
1722  	hdr->saddr = *saddr;
1723  	hdr->daddr = *daddr;
1724  }
1725  
mld_newpack(struct inet6_dev * idev,unsigned int mtu)1726  static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1727  {
1728  	u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
1729  		     2, 0, 0, IPV6_TLV_PADN, 0 };
1730  	struct net_device *dev = idev->dev;
1731  	int hlen = LL_RESERVED_SPACE(dev);
1732  	int tlen = dev->needed_tailroom;
1733  	struct net *net = dev_net(dev);
1734  	const struct in6_addr *saddr;
1735  	struct in6_addr addr_buf;
1736  	struct mld2_report *pmr;
1737  	struct sk_buff *skb;
1738  	unsigned int size;
1739  	struct sock *sk;
1740  	int err;
1741  
1742  	sk = net->ipv6.igmp_sk;
1743  	/* we assume size > sizeof(ra) here
1744  	 * Also try to not allocate high-order pages for big MTU
1745  	 */
1746  	size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
1747  	skb = sock_alloc_send_skb(sk, size, 1, &err);
1748  	if (!skb)
1749  		return NULL;
1750  
1751  	skb->priority = TC_PRIO_CONTROL;
1752  	skb_reserve(skb, hlen);
1753  	skb_tailroom_reserve(skb, mtu, tlen);
1754  
1755  	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1756  		/* <draft-ietf-magma-mld-source-05.txt>:
1757  		 * use unspecified address as the source address
1758  		 * when a valid link-local address is not available.
1759  		 */
1760  		saddr = &in6addr_any;
1761  	} else
1762  		saddr = &addr_buf;
1763  
1764  	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1765  
1766  	skb_put_data(skb, ra, sizeof(ra));
1767  
1768  	skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1769  	skb_put(skb, sizeof(*pmr));
1770  	pmr = (struct mld2_report *)skb_transport_header(skb);
1771  	pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1772  	pmr->mld2r_resv1 = 0;
1773  	pmr->mld2r_cksum = 0;
1774  	pmr->mld2r_resv2 = 0;
1775  	pmr->mld2r_ngrec = 0;
1776  	return skb;
1777  }
1778  
mld_sendpack(struct sk_buff * skb)1779  static void mld_sendpack(struct sk_buff *skb)
1780  {
1781  	struct ipv6hdr *pip6 = ipv6_hdr(skb);
1782  	struct mld2_report *pmr =
1783  			      (struct mld2_report *)skb_transport_header(skb);
1784  	int payload_len, mldlen;
1785  	struct inet6_dev *idev;
1786  	struct net *net = dev_net(skb->dev);
1787  	int err;
1788  	struct flowi6 fl6;
1789  	struct dst_entry *dst;
1790  
1791  	rcu_read_lock();
1792  	idev = __in6_dev_get(skb->dev);
1793  	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
1794  
1795  	payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1796  		sizeof(*pip6);
1797  	mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1798  	pip6->payload_len = htons(payload_len);
1799  
1800  	pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1801  					   IPPROTO_ICMPV6,
1802  					   csum_partial(skb_transport_header(skb),
1803  							mldlen, 0));
1804  
1805  	icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1806  			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1807  			 skb->dev->ifindex);
1808  	dst = icmp6_dst_alloc(skb->dev, &fl6);
1809  
1810  	err = 0;
1811  	if (IS_ERR(dst)) {
1812  		err = PTR_ERR(dst);
1813  		dst = NULL;
1814  	}
1815  	skb_dst_set(skb, dst);
1816  	if (err)
1817  		goto err_out;
1818  
1819  	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1820  		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1821  		      dst_output);
1822  out:
1823  	if (!err) {
1824  		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1825  		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1826  	} else {
1827  		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1828  	}
1829  
1830  	rcu_read_unlock();
1831  	return;
1832  
1833  err_out:
1834  	kfree_skb(skb);
1835  	goto out;
1836  }
1837  
grec_size(struct ifmcaddr6 * pmc,int type,int gdel,int sdel)1838  static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1839  {
1840  	return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1841  }
1842  
add_grhead(struct sk_buff * skb,struct ifmcaddr6 * pmc,int type,struct mld2_grec ** ppgr,unsigned int mtu)1843  static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1844  	int type, struct mld2_grec **ppgr, unsigned int mtu)
1845  {
1846  	struct mld2_report *pmr;
1847  	struct mld2_grec *pgr;
1848  
1849  	if (!skb) {
1850  		skb = mld_newpack(pmc->idev, mtu);
1851  		if (!skb)
1852  			return NULL;
1853  	}
1854  	pgr = skb_put(skb, sizeof(struct mld2_grec));
1855  	pgr->grec_type = type;
1856  	pgr->grec_auxwords = 0;
1857  	pgr->grec_nsrcs = 0;
1858  	pgr->grec_mca = pmc->mca_addr;	/* structure copy */
1859  	pmr = (struct mld2_report *)skb_transport_header(skb);
1860  	pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1861  	*ppgr = pgr;
1862  	return skb;
1863  }
1864  
1865  #define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
1866  
1867  /* called with mc_lock */
add_grec(struct sk_buff * skb,struct ifmcaddr6 * pmc,int type,int gdeleted,int sdeleted,int crsend)1868  static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1869  				int type, int gdeleted, int sdeleted,
1870  				int crsend)
1871  {
1872  	struct ip6_sf_list *psf, *psf_prev, *psf_next;
1873  	int scount, stotal, first, isquery, truncate;
1874  	struct ip6_sf_list __rcu **psf_list;
1875  	struct inet6_dev *idev = pmc->idev;
1876  	struct net_device *dev = idev->dev;
1877  	struct mld2_grec *pgr = NULL;
1878  	struct mld2_report *pmr;
1879  	unsigned int mtu;
1880  
1881  	if (pmc->mca_flags & MAF_NOREPORT)
1882  		return skb;
1883  
1884  	mtu = READ_ONCE(dev->mtu);
1885  	if (mtu < IPV6_MIN_MTU)
1886  		return skb;
1887  
1888  	isquery = type == MLD2_MODE_IS_INCLUDE ||
1889  		  type == MLD2_MODE_IS_EXCLUDE;
1890  	truncate = type == MLD2_MODE_IS_EXCLUDE ||
1891  		    type == MLD2_CHANGE_TO_EXCLUDE;
1892  
1893  	stotal = scount = 0;
1894  
1895  	psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1896  
1897  	if (!rcu_access_pointer(*psf_list))
1898  		goto empty_source;
1899  
1900  	pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1901  
1902  	/* EX and TO_EX get a fresh packet, if needed */
1903  	if (truncate) {
1904  		if (pmr && pmr->mld2r_ngrec &&
1905  		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1906  			if (skb)
1907  				mld_sendpack(skb);
1908  			skb = mld_newpack(idev, mtu);
1909  		}
1910  	}
1911  	first = 1;
1912  	psf_prev = NULL;
1913  	for (psf = mc_dereference(*psf_list, idev);
1914  	     psf;
1915  	     psf = psf_next) {
1916  		struct in6_addr *psrc;
1917  
1918  		psf_next = mc_dereference(psf->sf_next, idev);
1919  
1920  		if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1921  			psf_prev = psf;
1922  			continue;
1923  		}
1924  
1925  		/* Based on RFC3810 6.1. Should not send source-list change
1926  		 * records when there is a filter mode change.
1927  		 */
1928  		if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1929  		     (!gdeleted && pmc->mca_crcount)) &&
1930  		    (type == MLD2_ALLOW_NEW_SOURCES ||
1931  		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1932  			goto decrease_sf_crcount;
1933  
1934  		/* clear marks on query responses */
1935  		if (isquery)
1936  			psf->sf_gsresp = 0;
1937  
1938  		if (AVAILABLE(skb) < sizeof(*psrc) +
1939  		    first*sizeof(struct mld2_grec)) {
1940  			if (truncate && !first)
1941  				break;	 /* truncate these */
1942  			if (pgr)
1943  				pgr->grec_nsrcs = htons(scount);
1944  			if (skb)
1945  				mld_sendpack(skb);
1946  			skb = mld_newpack(idev, mtu);
1947  			first = 1;
1948  			scount = 0;
1949  		}
1950  		if (first) {
1951  			skb = add_grhead(skb, pmc, type, &pgr, mtu);
1952  			first = 0;
1953  		}
1954  		if (!skb)
1955  			return NULL;
1956  		psrc = skb_put(skb, sizeof(*psrc));
1957  		*psrc = psf->sf_addr;
1958  		scount++; stotal++;
1959  		if ((type == MLD2_ALLOW_NEW_SOURCES ||
1960  		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1961  decrease_sf_crcount:
1962  			psf->sf_crcount--;
1963  			if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1964  				if (psf_prev)
1965  					rcu_assign_pointer(psf_prev->sf_next,
1966  							   mc_dereference(psf->sf_next, idev));
1967  				else
1968  					rcu_assign_pointer(*psf_list,
1969  							   mc_dereference(psf->sf_next, idev));
1970  				kfree_rcu(psf, rcu);
1971  				continue;
1972  			}
1973  		}
1974  		psf_prev = psf;
1975  	}
1976  
1977  empty_source:
1978  	if (!stotal) {
1979  		if (type == MLD2_ALLOW_NEW_SOURCES ||
1980  		    type == MLD2_BLOCK_OLD_SOURCES)
1981  			return skb;
1982  		if (pmc->mca_crcount || isquery || crsend) {
1983  			/* make sure we have room for group header */
1984  			if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1985  				mld_sendpack(skb);
1986  				skb = NULL; /* add_grhead will get a new one */
1987  			}
1988  			skb = add_grhead(skb, pmc, type, &pgr, mtu);
1989  		}
1990  	}
1991  	if (pgr)
1992  		pgr->grec_nsrcs = htons(scount);
1993  
1994  	if (isquery)
1995  		pmc->mca_flags &= ~MAF_GSQUERY;	/* clear query state */
1996  	return skb;
1997  }
1998  
1999  /* called with mc_lock */
mld_send_report(struct inet6_dev * idev,struct ifmcaddr6 * pmc)2000  static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2001  {
2002  	struct sk_buff *skb = NULL;
2003  	int type;
2004  
2005  	if (!pmc) {
2006  		for_each_mc_mclock(idev, pmc) {
2007  			if (pmc->mca_flags & MAF_NOREPORT)
2008  				continue;
2009  			if (pmc->mca_sfcount[MCAST_EXCLUDE])
2010  				type = MLD2_MODE_IS_EXCLUDE;
2011  			else
2012  				type = MLD2_MODE_IS_INCLUDE;
2013  			skb = add_grec(skb, pmc, type, 0, 0, 0);
2014  		}
2015  	} else {
2016  		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2017  			type = MLD2_MODE_IS_EXCLUDE;
2018  		else
2019  			type = MLD2_MODE_IS_INCLUDE;
2020  		skb = add_grec(skb, pmc, type, 0, 0, 0);
2021  	}
2022  	if (skb)
2023  		mld_sendpack(skb);
2024  }
2025  
2026  /*
2027   * remove zero-count source records from a source filter list
2028   * called with mc_lock
2029   */
mld_clear_zeros(struct ip6_sf_list __rcu ** ppsf,struct inet6_dev * idev)2030  static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2031  {
2032  	struct ip6_sf_list *psf_prev, *psf_next, *psf;
2033  
2034  	psf_prev = NULL;
2035  	for (psf = mc_dereference(*ppsf, idev);
2036  	     psf;
2037  	     psf = psf_next) {
2038  		psf_next = mc_dereference(psf->sf_next, idev);
2039  		if (psf->sf_crcount == 0) {
2040  			if (psf_prev)
2041  				rcu_assign_pointer(psf_prev->sf_next,
2042  						   mc_dereference(psf->sf_next, idev));
2043  			else
2044  				rcu_assign_pointer(*ppsf,
2045  						   mc_dereference(psf->sf_next, idev));
2046  			kfree_rcu(psf, rcu);
2047  		} else {
2048  			psf_prev = psf;
2049  		}
2050  	}
2051  }
2052  
2053  /* called with mc_lock */
mld_send_cr(struct inet6_dev * idev)2054  static void mld_send_cr(struct inet6_dev *idev)
2055  {
2056  	struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
2057  	struct sk_buff *skb = NULL;
2058  	int type, dtype;
2059  
2060  	/* deleted MCA's */
2061  	pmc_prev = NULL;
2062  	for (pmc = mc_dereference(idev->mc_tomb, idev);
2063  	     pmc;
2064  	     pmc = pmc_next) {
2065  		pmc_next = mc_dereference(pmc->next, idev);
2066  		if (pmc->mca_sfmode == MCAST_INCLUDE) {
2067  			type = MLD2_BLOCK_OLD_SOURCES;
2068  			dtype = MLD2_BLOCK_OLD_SOURCES;
2069  			skb = add_grec(skb, pmc, type, 1, 0, 0);
2070  			skb = add_grec(skb, pmc, dtype, 1, 1, 0);
2071  		}
2072  		if (pmc->mca_crcount) {
2073  			if (pmc->mca_sfmode == MCAST_EXCLUDE) {
2074  				type = MLD2_CHANGE_TO_INCLUDE;
2075  				skb = add_grec(skb, pmc, type, 1, 0, 0);
2076  			}
2077  			pmc->mca_crcount--;
2078  			if (pmc->mca_crcount == 0) {
2079  				mld_clear_zeros(&pmc->mca_tomb, idev);
2080  				mld_clear_zeros(&pmc->mca_sources, idev);
2081  			}
2082  		}
2083  		if (pmc->mca_crcount == 0 &&
2084  		    !rcu_access_pointer(pmc->mca_tomb) &&
2085  		    !rcu_access_pointer(pmc->mca_sources)) {
2086  			if (pmc_prev)
2087  				rcu_assign_pointer(pmc_prev->next, pmc_next);
2088  			else
2089  				rcu_assign_pointer(idev->mc_tomb, pmc_next);
2090  			in6_dev_put(pmc->idev);
2091  			kfree_rcu(pmc, rcu);
2092  		} else
2093  			pmc_prev = pmc;
2094  	}
2095  
2096  	/* change recs */
2097  	for_each_mc_mclock(idev, pmc) {
2098  		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2099  			type = MLD2_BLOCK_OLD_SOURCES;
2100  			dtype = MLD2_ALLOW_NEW_SOURCES;
2101  		} else {
2102  			type = MLD2_ALLOW_NEW_SOURCES;
2103  			dtype = MLD2_BLOCK_OLD_SOURCES;
2104  		}
2105  		skb = add_grec(skb, pmc, type, 0, 0, 0);
2106  		skb = add_grec(skb, pmc, dtype, 0, 1, 0);	/* deleted sources */
2107  
2108  		/* filter mode changes */
2109  		if (pmc->mca_crcount) {
2110  			if (pmc->mca_sfmode == MCAST_EXCLUDE)
2111  				type = MLD2_CHANGE_TO_EXCLUDE;
2112  			else
2113  				type = MLD2_CHANGE_TO_INCLUDE;
2114  			skb = add_grec(skb, pmc, type, 0, 0, 0);
2115  			pmc->mca_crcount--;
2116  		}
2117  	}
2118  	if (!skb)
2119  		return;
2120  	(void) mld_sendpack(skb);
2121  }
2122  
igmp6_send(struct in6_addr * addr,struct net_device * dev,int type)2123  static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
2124  {
2125  	struct net *net = dev_net(dev);
2126  	struct sock *sk = net->ipv6.igmp_sk;
2127  	struct inet6_dev *idev;
2128  	struct sk_buff *skb;
2129  	struct mld_msg *hdr;
2130  	const struct in6_addr *snd_addr, *saddr;
2131  	struct in6_addr addr_buf;
2132  	int hlen = LL_RESERVED_SPACE(dev);
2133  	int tlen = dev->needed_tailroom;
2134  	int err, len, payload_len, full_len;
2135  	u8 ra[8] = { IPPROTO_ICMPV6, 0,
2136  		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
2137  		     IPV6_TLV_PADN, 0 };
2138  	struct flowi6 fl6;
2139  	struct dst_entry *dst;
2140  
2141  	if (type == ICMPV6_MGM_REDUCTION)
2142  		snd_addr = &in6addr_linklocal_allrouters;
2143  	else
2144  		snd_addr = addr;
2145  
2146  	len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2147  	payload_len = len + sizeof(ra);
2148  	full_len = sizeof(struct ipv6hdr) + payload_len;
2149  
2150  	rcu_read_lock();
2151  	IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTREQUESTS);
2152  	rcu_read_unlock();
2153  
2154  	skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
2155  
2156  	if (!skb) {
2157  		rcu_read_lock();
2158  		IP6_INC_STATS(net, __in6_dev_get(dev),
2159  			      IPSTATS_MIB_OUTDISCARDS);
2160  		rcu_read_unlock();
2161  		return;
2162  	}
2163  	skb->priority = TC_PRIO_CONTROL;
2164  	skb_reserve(skb, hlen);
2165  
2166  	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2167  		/* <draft-ietf-magma-mld-source-05.txt>:
2168  		 * use unspecified address as the source address
2169  		 * when a valid link-local address is not available.
2170  		 */
2171  		saddr = &in6addr_any;
2172  	} else
2173  		saddr = &addr_buf;
2174  
2175  	ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2176  
2177  	skb_put_data(skb, ra, sizeof(ra));
2178  
2179  	hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2180  	hdr->mld_type = type;
2181  	hdr->mld_mca = *addr;
2182  
2183  	hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2184  					 IPPROTO_ICMPV6,
2185  					 csum_partial(hdr, len, 0));
2186  
2187  	rcu_read_lock();
2188  	idev = __in6_dev_get(skb->dev);
2189  
2190  	icmpv6_flow_init(sk, &fl6, type,
2191  			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2192  			 skb->dev->ifindex);
2193  	dst = icmp6_dst_alloc(skb->dev, &fl6);
2194  	if (IS_ERR(dst)) {
2195  		err = PTR_ERR(dst);
2196  		goto err_out;
2197  	}
2198  
2199  	skb_dst_set(skb, dst);
2200  	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2201  		      net, sk, skb, NULL, skb->dev,
2202  		      dst_output);
2203  out:
2204  	if (!err) {
2205  		ICMP6MSGOUT_INC_STATS(net, idev, type);
2206  		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2207  	} else
2208  		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2209  
2210  	rcu_read_unlock();
2211  	return;
2212  
2213  err_out:
2214  	kfree_skb(skb);
2215  	goto out;
2216  }
2217  
2218  /* called with mc_lock */
mld_send_initial_cr(struct inet6_dev * idev)2219  static void mld_send_initial_cr(struct inet6_dev *idev)
2220  {
2221  	struct sk_buff *skb;
2222  	struct ifmcaddr6 *pmc;
2223  	int type;
2224  
2225  	if (mld_in_v1_mode(idev))
2226  		return;
2227  
2228  	skb = NULL;
2229  	for_each_mc_mclock(idev, pmc) {
2230  		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2231  			type = MLD2_CHANGE_TO_EXCLUDE;
2232  		else
2233  			type = MLD2_ALLOW_NEW_SOURCES;
2234  		skb = add_grec(skb, pmc, type, 0, 0, 1);
2235  	}
2236  	if (skb)
2237  		mld_sendpack(skb);
2238  }
2239  
ipv6_mc_dad_complete(struct inet6_dev * idev)2240  void ipv6_mc_dad_complete(struct inet6_dev *idev)
2241  {
2242  	mutex_lock(&idev->mc_lock);
2243  	idev->mc_dad_count = idev->mc_qrv;
2244  	if (idev->mc_dad_count) {
2245  		mld_send_initial_cr(idev);
2246  		idev->mc_dad_count--;
2247  		if (idev->mc_dad_count)
2248  			mld_dad_start_work(idev,
2249  					   unsolicited_report_interval(idev));
2250  	}
2251  	mutex_unlock(&idev->mc_lock);
2252  }
2253  
mld_dad_work(struct work_struct * work)2254  static void mld_dad_work(struct work_struct *work)
2255  {
2256  	struct inet6_dev *idev = container_of(to_delayed_work(work),
2257  					      struct inet6_dev,
2258  					      mc_dad_work);
2259  	mutex_lock(&idev->mc_lock);
2260  	mld_send_initial_cr(idev);
2261  	if (idev->mc_dad_count) {
2262  		idev->mc_dad_count--;
2263  		if (idev->mc_dad_count)
2264  			mld_dad_start_work(idev,
2265  					   unsolicited_report_interval(idev));
2266  	}
2267  	mutex_unlock(&idev->mc_lock);
2268  	in6_dev_put(idev);
2269  }
2270  
2271  /* called with mc_lock */
ip6_mc_del1_src(struct ifmcaddr6 * pmc,int sfmode,const struct in6_addr * psfsrc)2272  static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2273  	const struct in6_addr *psfsrc)
2274  {
2275  	struct ip6_sf_list *psf, *psf_prev;
2276  	int rv = 0;
2277  
2278  	psf_prev = NULL;
2279  	for_each_psf_mclock(pmc, psf) {
2280  		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2281  			break;
2282  		psf_prev = psf;
2283  	}
2284  	if (!psf || psf->sf_count[sfmode] == 0) {
2285  		/* source filter not found, or count wrong =>  bug */
2286  		return -ESRCH;
2287  	}
2288  	psf->sf_count[sfmode]--;
2289  	if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2290  		struct inet6_dev *idev = pmc->idev;
2291  
2292  		/* no more filters for this source */
2293  		if (psf_prev)
2294  			rcu_assign_pointer(psf_prev->sf_next,
2295  					   mc_dereference(psf->sf_next, idev));
2296  		else
2297  			rcu_assign_pointer(pmc->mca_sources,
2298  					   mc_dereference(psf->sf_next, idev));
2299  
2300  		if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2301  		    !mld_in_v1_mode(idev)) {
2302  			psf->sf_crcount = idev->mc_qrv;
2303  			rcu_assign_pointer(psf->sf_next,
2304  					   mc_dereference(pmc->mca_tomb, idev));
2305  			rcu_assign_pointer(pmc->mca_tomb, psf);
2306  			rv = 1;
2307  		} else {
2308  			kfree_rcu(psf, rcu);
2309  		}
2310  	}
2311  	return rv;
2312  }
2313  
2314  /* called with mc_lock */
ip6_mc_del_src(struct inet6_dev * idev,const struct in6_addr * pmca,int sfmode,int sfcount,const struct in6_addr * psfsrc,int delta)2315  static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2316  			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2317  			  int delta)
2318  {
2319  	struct ifmcaddr6 *pmc;
2320  	int	changerec = 0;
2321  	int	i, err;
2322  
2323  	if (!idev)
2324  		return -ENODEV;
2325  
2326  	for_each_mc_mclock(idev, pmc) {
2327  		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2328  			break;
2329  	}
2330  	if (!pmc)
2331  		return -ESRCH;
2332  
2333  	sf_markstate(pmc);
2334  	if (!delta) {
2335  		if (!pmc->mca_sfcount[sfmode])
2336  			return -EINVAL;
2337  
2338  		pmc->mca_sfcount[sfmode]--;
2339  	}
2340  	err = 0;
2341  	for (i = 0; i < sfcount; i++) {
2342  		int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2343  
2344  		changerec |= rv > 0;
2345  		if (!err && rv < 0)
2346  			err = rv;
2347  	}
2348  	if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2349  	    pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2350  	    pmc->mca_sfcount[MCAST_INCLUDE]) {
2351  		struct ip6_sf_list *psf;
2352  
2353  		/* filter mode change */
2354  		pmc->mca_sfmode = MCAST_INCLUDE;
2355  		pmc->mca_crcount = idev->mc_qrv;
2356  		idev->mc_ifc_count = pmc->mca_crcount;
2357  		for_each_psf_mclock(pmc, psf)
2358  			psf->sf_crcount = 0;
2359  		mld_ifc_event(pmc->idev);
2360  	} else if (sf_setstate(pmc) || changerec) {
2361  		mld_ifc_event(pmc->idev);
2362  	}
2363  
2364  	return err;
2365  }
2366  
2367  /*
2368   * Add multicast single-source filter to the interface list
2369   * called with mc_lock
2370   */
ip6_mc_add1_src(struct ifmcaddr6 * pmc,int sfmode,const struct in6_addr * psfsrc)2371  static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2372  	const struct in6_addr *psfsrc)
2373  {
2374  	struct ip6_sf_list *psf, *psf_prev;
2375  
2376  	psf_prev = NULL;
2377  	for_each_psf_mclock(pmc, psf) {
2378  		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2379  			break;
2380  		psf_prev = psf;
2381  	}
2382  	if (!psf) {
2383  		psf = kzalloc(sizeof(*psf), GFP_KERNEL);
2384  		if (!psf)
2385  			return -ENOBUFS;
2386  
2387  		psf->sf_addr = *psfsrc;
2388  		if (psf_prev) {
2389  			rcu_assign_pointer(psf_prev->sf_next, psf);
2390  		} else {
2391  			rcu_assign_pointer(pmc->mca_sources, psf);
2392  		}
2393  	}
2394  	psf->sf_count[sfmode]++;
2395  	return 0;
2396  }
2397  
2398  /* called with mc_lock */
sf_markstate(struct ifmcaddr6 * pmc)2399  static void sf_markstate(struct ifmcaddr6 *pmc)
2400  {
2401  	struct ip6_sf_list *psf;
2402  	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2403  
2404  	for_each_psf_mclock(pmc, psf) {
2405  		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2406  			psf->sf_oldin = mca_xcount ==
2407  				psf->sf_count[MCAST_EXCLUDE] &&
2408  				!psf->sf_count[MCAST_INCLUDE];
2409  		} else {
2410  			psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2411  		}
2412  	}
2413  }
2414  
2415  /* called with mc_lock */
sf_setstate(struct ifmcaddr6 * pmc)2416  static int sf_setstate(struct ifmcaddr6 *pmc)
2417  {
2418  	struct ip6_sf_list *psf, *dpsf;
2419  	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2420  	int qrv = pmc->idev->mc_qrv;
2421  	int new_in, rv;
2422  
2423  	rv = 0;
2424  	for_each_psf_mclock(pmc, psf) {
2425  		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2426  			new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2427  				!psf->sf_count[MCAST_INCLUDE];
2428  		} else
2429  			new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2430  		if (new_in) {
2431  			if (!psf->sf_oldin) {
2432  				struct ip6_sf_list *prev = NULL;
2433  
2434  				for_each_psf_tomb(pmc, dpsf) {
2435  					if (ipv6_addr_equal(&dpsf->sf_addr,
2436  					    &psf->sf_addr))
2437  						break;
2438  					prev = dpsf;
2439  				}
2440  				if (dpsf) {
2441  					if (prev)
2442  						rcu_assign_pointer(prev->sf_next,
2443  								   mc_dereference(dpsf->sf_next,
2444  										  pmc->idev));
2445  					else
2446  						rcu_assign_pointer(pmc->mca_tomb,
2447  								   mc_dereference(dpsf->sf_next,
2448  										  pmc->idev));
2449  					kfree_rcu(dpsf, rcu);
2450  				}
2451  				psf->sf_crcount = qrv;
2452  				rv++;
2453  			}
2454  		} else if (psf->sf_oldin) {
2455  			psf->sf_crcount = 0;
2456  			/*
2457  			 * add or update "delete" records if an active filter
2458  			 * is now inactive
2459  			 */
2460  
2461  			for_each_psf_tomb(pmc, dpsf)
2462  				if (ipv6_addr_equal(&dpsf->sf_addr,
2463  				    &psf->sf_addr))
2464  					break;
2465  			if (!dpsf) {
2466  				dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL);
2467  				if (!dpsf)
2468  					continue;
2469  				*dpsf = *psf;
2470  				rcu_assign_pointer(dpsf->sf_next,
2471  						   mc_dereference(pmc->mca_tomb, pmc->idev));
2472  				rcu_assign_pointer(pmc->mca_tomb, dpsf);
2473  			}
2474  			dpsf->sf_crcount = qrv;
2475  			rv++;
2476  		}
2477  	}
2478  	return rv;
2479  }
2480  
2481  /*
2482   * Add multicast source filter list to the interface list
2483   * called with mc_lock
2484   */
ip6_mc_add_src(struct inet6_dev * idev,const struct in6_addr * pmca,int sfmode,int sfcount,const struct in6_addr * psfsrc,int delta)2485  static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2486  			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2487  			  int delta)
2488  {
2489  	struct ifmcaddr6 *pmc;
2490  	int	isexclude;
2491  	int	i, err;
2492  
2493  	if (!idev)
2494  		return -ENODEV;
2495  
2496  	for_each_mc_mclock(idev, pmc) {
2497  		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2498  			break;
2499  	}
2500  	if (!pmc)
2501  		return -ESRCH;
2502  
2503  	sf_markstate(pmc);
2504  	isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2505  	if (!delta)
2506  		pmc->mca_sfcount[sfmode]++;
2507  	err = 0;
2508  	for (i = 0; i < sfcount; i++) {
2509  		err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2510  		if (err)
2511  			break;
2512  	}
2513  	if (err) {
2514  		int j;
2515  
2516  		if (!delta)
2517  			pmc->mca_sfcount[sfmode]--;
2518  		for (j = 0; j < i; j++)
2519  			ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2520  	} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2521  		struct ip6_sf_list *psf;
2522  
2523  		/* filter mode change */
2524  		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2525  			pmc->mca_sfmode = MCAST_EXCLUDE;
2526  		else if (pmc->mca_sfcount[MCAST_INCLUDE])
2527  			pmc->mca_sfmode = MCAST_INCLUDE;
2528  		/* else no filters; keep old mode for reports */
2529  
2530  		pmc->mca_crcount = idev->mc_qrv;
2531  		idev->mc_ifc_count = pmc->mca_crcount;
2532  		for_each_psf_mclock(pmc, psf)
2533  			psf->sf_crcount = 0;
2534  		mld_ifc_event(idev);
2535  	} else if (sf_setstate(pmc)) {
2536  		mld_ifc_event(idev);
2537  	}
2538  	return err;
2539  }
2540  
2541  /* called with mc_lock */
ip6_mc_clear_src(struct ifmcaddr6 * pmc)2542  static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2543  {
2544  	struct ip6_sf_list *psf, *nextpsf;
2545  
2546  	for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2547  	     psf;
2548  	     psf = nextpsf) {
2549  		nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2550  		kfree_rcu(psf, rcu);
2551  	}
2552  	RCU_INIT_POINTER(pmc->mca_tomb, NULL);
2553  	for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2554  	     psf;
2555  	     psf = nextpsf) {
2556  		nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2557  		kfree_rcu(psf, rcu);
2558  	}
2559  	RCU_INIT_POINTER(pmc->mca_sources, NULL);
2560  	pmc->mca_sfmode = MCAST_EXCLUDE;
2561  	pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2562  	pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2563  }
2564  
2565  /* called with mc_lock */
igmp6_join_group(struct ifmcaddr6 * ma)2566  static void igmp6_join_group(struct ifmcaddr6 *ma)
2567  {
2568  	unsigned long delay;
2569  
2570  	if (ma->mca_flags & MAF_NOREPORT)
2571  		return;
2572  
2573  	igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2574  
2575  	delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
2576  
2577  	if (cancel_delayed_work(&ma->mca_work)) {
2578  		refcount_dec(&ma->mca_refcnt);
2579  		delay = ma->mca_work.timer.expires - jiffies;
2580  	}
2581  
2582  	if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
2583  		refcount_inc(&ma->mca_refcnt);
2584  	ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2585  }
2586  
ip6_mc_leave_src(struct sock * sk,struct ipv6_mc_socklist * iml,struct inet6_dev * idev)2587  static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2588  			    struct inet6_dev *idev)
2589  {
2590  	struct ip6_sf_socklist *psl;
2591  	int err;
2592  
2593  	psl = sock_dereference(iml->sflist, sk);
2594  
2595  	if (idev)
2596  		mutex_lock(&idev->mc_lock);
2597  
2598  	if (!psl) {
2599  		/* any-source empty exclude case */
2600  		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2601  	} else {
2602  		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2603  				     psl->sl_count, psl->sl_addr, 0);
2604  		RCU_INIT_POINTER(iml->sflist, NULL);
2605  		atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
2606  			   &sk->sk_omem_alloc);
2607  		kfree_rcu(psl, rcu);
2608  	}
2609  
2610  	if (idev)
2611  		mutex_unlock(&idev->mc_lock);
2612  
2613  	return err;
2614  }
2615  
2616  /* called with mc_lock */
igmp6_leave_group(struct ifmcaddr6 * ma)2617  static void igmp6_leave_group(struct ifmcaddr6 *ma)
2618  {
2619  	if (mld_in_v1_mode(ma->idev)) {
2620  		if (ma->mca_flags & MAF_LAST_REPORTER) {
2621  			igmp6_send(&ma->mca_addr, ma->idev->dev,
2622  				ICMPV6_MGM_REDUCTION);
2623  		}
2624  	} else {
2625  		mld_add_delrec(ma->idev, ma);
2626  		mld_ifc_event(ma->idev);
2627  	}
2628  }
2629  
mld_gq_work(struct work_struct * work)2630  static void mld_gq_work(struct work_struct *work)
2631  {
2632  	struct inet6_dev *idev = container_of(to_delayed_work(work),
2633  					      struct inet6_dev,
2634  					      mc_gq_work);
2635  
2636  	mutex_lock(&idev->mc_lock);
2637  	mld_send_report(idev, NULL);
2638  	idev->mc_gq_running = 0;
2639  	mutex_unlock(&idev->mc_lock);
2640  
2641  	in6_dev_put(idev);
2642  }
2643  
mld_ifc_work(struct work_struct * work)2644  static void mld_ifc_work(struct work_struct *work)
2645  {
2646  	struct inet6_dev *idev = container_of(to_delayed_work(work),
2647  					      struct inet6_dev,
2648  					      mc_ifc_work);
2649  
2650  	mutex_lock(&idev->mc_lock);
2651  	mld_send_cr(idev);
2652  
2653  	if (idev->mc_ifc_count) {
2654  		idev->mc_ifc_count--;
2655  		if (idev->mc_ifc_count)
2656  			mld_ifc_start_work(idev,
2657  					   unsolicited_report_interval(idev));
2658  	}
2659  	mutex_unlock(&idev->mc_lock);
2660  	in6_dev_put(idev);
2661  }
2662  
2663  /* called with mc_lock */
mld_ifc_event(struct inet6_dev * idev)2664  static void mld_ifc_event(struct inet6_dev *idev)
2665  {
2666  	if (mld_in_v1_mode(idev))
2667  		return;
2668  
2669  	idev->mc_ifc_count = idev->mc_qrv;
2670  	mld_ifc_start_work(idev, 1);
2671  }
2672  
mld_mca_work(struct work_struct * work)2673  static void mld_mca_work(struct work_struct *work)
2674  {
2675  	struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
2676  					    struct ifmcaddr6, mca_work);
2677  
2678  	mutex_lock(&ma->idev->mc_lock);
2679  	if (mld_in_v1_mode(ma->idev))
2680  		igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2681  	else
2682  		mld_send_report(ma->idev, ma);
2683  	ma->mca_flags |=  MAF_LAST_REPORTER;
2684  	ma->mca_flags &= ~MAF_TIMER_RUNNING;
2685  	mutex_unlock(&ma->idev->mc_lock);
2686  
2687  	ma_put(ma);
2688  }
2689  
2690  /* Device changing type */
2691  
ipv6_mc_unmap(struct inet6_dev * idev)2692  void ipv6_mc_unmap(struct inet6_dev *idev)
2693  {
2694  	struct ifmcaddr6 *i;
2695  
2696  	/* Install multicast list, except for all-nodes (already installed) */
2697  
2698  	mutex_lock(&idev->mc_lock);
2699  	for_each_mc_mclock(idev, i)
2700  		igmp6_group_dropped(i);
2701  	mutex_unlock(&idev->mc_lock);
2702  }
2703  
ipv6_mc_remap(struct inet6_dev * idev)2704  void ipv6_mc_remap(struct inet6_dev *idev)
2705  {
2706  	ipv6_mc_up(idev);
2707  }
2708  
2709  /* Device going down */
ipv6_mc_down(struct inet6_dev * idev)2710  void ipv6_mc_down(struct inet6_dev *idev)
2711  {
2712  	struct ifmcaddr6 *i;
2713  
2714  	mutex_lock(&idev->mc_lock);
2715  	/* Withdraw multicast list */
2716  	for_each_mc_mclock(idev, i)
2717  		igmp6_group_dropped(i);
2718  	mutex_unlock(&idev->mc_lock);
2719  
2720  	/* Should stop work after group drop. or we will
2721  	 * start work again in mld_ifc_event()
2722  	 */
2723  	mld_query_stop_work(idev);
2724  	mld_report_stop_work(idev);
2725  
2726  	mutex_lock(&idev->mc_lock);
2727  	mld_ifc_stop_work(idev);
2728  	mld_gq_stop_work(idev);
2729  	mutex_unlock(&idev->mc_lock);
2730  
2731  	mld_dad_stop_work(idev);
2732  }
2733  
ipv6_mc_reset(struct inet6_dev * idev)2734  static void ipv6_mc_reset(struct inet6_dev *idev)
2735  {
2736  	idev->mc_qrv = sysctl_mld_qrv;
2737  	idev->mc_qi = MLD_QI_DEFAULT;
2738  	idev->mc_qri = MLD_QRI_DEFAULT;
2739  	idev->mc_v1_seen = 0;
2740  	idev->mc_maxdelay = unsolicited_report_interval(idev);
2741  }
2742  
2743  /* Device going up */
2744  
ipv6_mc_up(struct inet6_dev * idev)2745  void ipv6_mc_up(struct inet6_dev *idev)
2746  {
2747  	struct ifmcaddr6 *i;
2748  
2749  	/* Install multicast list, except for all-nodes (already installed) */
2750  
2751  	ipv6_mc_reset(idev);
2752  	mutex_lock(&idev->mc_lock);
2753  	for_each_mc_mclock(idev, i) {
2754  		mld_del_delrec(idev, i);
2755  		igmp6_group_added(i);
2756  	}
2757  	mutex_unlock(&idev->mc_lock);
2758  }
2759  
2760  /* IPv6 device initialization. */
2761  
ipv6_mc_init_dev(struct inet6_dev * idev)2762  void ipv6_mc_init_dev(struct inet6_dev *idev)
2763  {
2764  	idev->mc_gq_running = 0;
2765  	INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2766  	RCU_INIT_POINTER(idev->mc_tomb, NULL);
2767  	idev->mc_ifc_count = 0;
2768  	INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2769  	INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2770  	INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2771  	INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2772  	skb_queue_head_init(&idev->mc_query_queue);
2773  	skb_queue_head_init(&idev->mc_report_queue);
2774  	spin_lock_init(&idev->mc_query_lock);
2775  	spin_lock_init(&idev->mc_report_lock);
2776  	mutex_init(&idev->mc_lock);
2777  	ipv6_mc_reset(idev);
2778  }
2779  
2780  /*
2781   *	Device is about to be destroyed: clean up.
2782   */
2783  
ipv6_mc_destroy_dev(struct inet6_dev * idev)2784  void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2785  {
2786  	struct ifmcaddr6 *i;
2787  
2788  	/* Deactivate works */
2789  	ipv6_mc_down(idev);
2790  	mutex_lock(&idev->mc_lock);
2791  	mld_clear_delrec(idev);
2792  	mutex_unlock(&idev->mc_lock);
2793  	mld_clear_query(idev);
2794  	mld_clear_report(idev);
2795  
2796  	/* Delete all-nodes address. */
2797  	/* We cannot call ipv6_dev_mc_dec() directly, our caller in
2798  	 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2799  	 * fail.
2800  	 */
2801  	__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2802  
2803  	if (idev->cnf.forwarding)
2804  		__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2805  
2806  	mutex_lock(&idev->mc_lock);
2807  	while ((i = mc_dereference(idev->mc_list, idev))) {
2808  		rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2809  
2810  		ip6_mc_clear_src(i);
2811  		ma_put(i);
2812  	}
2813  	mutex_unlock(&idev->mc_lock);
2814  }
2815  
ipv6_mc_rejoin_groups(struct inet6_dev * idev)2816  static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2817  {
2818  	struct ifmcaddr6 *pmc;
2819  
2820  	ASSERT_RTNL();
2821  
2822  	mutex_lock(&idev->mc_lock);
2823  	if (mld_in_v1_mode(idev)) {
2824  		for_each_mc_mclock(idev, pmc)
2825  			igmp6_join_group(pmc);
2826  	} else {
2827  		mld_send_report(idev, NULL);
2828  	}
2829  	mutex_unlock(&idev->mc_lock);
2830  }
2831  
ipv6_mc_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)2832  static int ipv6_mc_netdev_event(struct notifier_block *this,
2833  				unsigned long event,
2834  				void *ptr)
2835  {
2836  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2837  	struct inet6_dev *idev = __in6_dev_get(dev);
2838  
2839  	switch (event) {
2840  	case NETDEV_RESEND_IGMP:
2841  		if (idev)
2842  			ipv6_mc_rejoin_groups(idev);
2843  		break;
2844  	default:
2845  		break;
2846  	}
2847  
2848  	return NOTIFY_DONE;
2849  }
2850  
2851  static struct notifier_block igmp6_netdev_notifier = {
2852  	.notifier_call = ipv6_mc_netdev_event,
2853  };
2854  
2855  #ifdef CONFIG_PROC_FS
2856  struct igmp6_mc_iter_state {
2857  	struct seq_net_private p;
2858  	struct net_device *dev;
2859  	struct inet6_dev *idev;
2860  };
2861  
2862  #define igmp6_mc_seq_private(seq)	((struct igmp6_mc_iter_state *)(seq)->private)
2863  
igmp6_mc_get_first(struct seq_file * seq)2864  static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2865  {
2866  	struct ifmcaddr6 *im = NULL;
2867  	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2868  	struct net *net = seq_file_net(seq);
2869  
2870  	state->idev = NULL;
2871  	for_each_netdev_rcu(net, state->dev) {
2872  		struct inet6_dev *idev;
2873  		idev = __in6_dev_get(state->dev);
2874  		if (!idev)
2875  			continue;
2876  
2877  		im = rcu_dereference(idev->mc_list);
2878  		if (im) {
2879  			state->idev = idev;
2880  			break;
2881  		}
2882  	}
2883  	return im;
2884  }
2885  
igmp6_mc_get_next(struct seq_file * seq,struct ifmcaddr6 * im)2886  static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2887  {
2888  	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2889  
2890  	im = rcu_dereference(im->next);
2891  	while (!im) {
2892  		state->dev = next_net_device_rcu(state->dev);
2893  		if (!state->dev) {
2894  			state->idev = NULL;
2895  			break;
2896  		}
2897  		state->idev = __in6_dev_get(state->dev);
2898  		if (!state->idev)
2899  			continue;
2900  		im = rcu_dereference(state->idev->mc_list);
2901  	}
2902  	return im;
2903  }
2904  
igmp6_mc_get_idx(struct seq_file * seq,loff_t pos)2905  static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2906  {
2907  	struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2908  	if (im)
2909  		while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2910  			--pos;
2911  	return pos ? NULL : im;
2912  }
2913  
igmp6_mc_seq_start(struct seq_file * seq,loff_t * pos)2914  static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2915  	__acquires(RCU)
2916  {
2917  	rcu_read_lock();
2918  	return igmp6_mc_get_idx(seq, *pos);
2919  }
2920  
igmp6_mc_seq_next(struct seq_file * seq,void * v,loff_t * pos)2921  static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2922  {
2923  	struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2924  
2925  	++*pos;
2926  	return im;
2927  }
2928  
igmp6_mc_seq_stop(struct seq_file * seq,void * v)2929  static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2930  	__releases(RCU)
2931  {
2932  	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2933  
2934  	if (likely(state->idev))
2935  		state->idev = NULL;
2936  	state->dev = NULL;
2937  	rcu_read_unlock();
2938  }
2939  
igmp6_mc_seq_show(struct seq_file * seq,void * v)2940  static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2941  {
2942  	struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2943  	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2944  
2945  	seq_printf(seq,
2946  		   "%-4d %-15s %pi6 %5d %08X %ld\n",
2947  		   state->dev->ifindex, state->dev->name,
2948  		   &im->mca_addr,
2949  		   im->mca_users, im->mca_flags,
2950  		   (im->mca_flags & MAF_TIMER_RUNNING) ?
2951  		   jiffies_to_clock_t(im->mca_work.timer.expires - jiffies) : 0);
2952  	return 0;
2953  }
2954  
2955  static const struct seq_operations igmp6_mc_seq_ops = {
2956  	.start	=	igmp6_mc_seq_start,
2957  	.next	=	igmp6_mc_seq_next,
2958  	.stop	=	igmp6_mc_seq_stop,
2959  	.show	=	igmp6_mc_seq_show,
2960  };
2961  
2962  struct igmp6_mcf_iter_state {
2963  	struct seq_net_private p;
2964  	struct net_device *dev;
2965  	struct inet6_dev *idev;
2966  	struct ifmcaddr6 *im;
2967  };
2968  
2969  #define igmp6_mcf_seq_private(seq)	((struct igmp6_mcf_iter_state *)(seq)->private)
2970  
igmp6_mcf_get_first(struct seq_file * seq)2971  static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2972  {
2973  	struct ip6_sf_list *psf = NULL;
2974  	struct ifmcaddr6 *im = NULL;
2975  	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2976  	struct net *net = seq_file_net(seq);
2977  
2978  	state->idev = NULL;
2979  	state->im = NULL;
2980  	for_each_netdev_rcu(net, state->dev) {
2981  		struct inet6_dev *idev;
2982  		idev = __in6_dev_get(state->dev);
2983  		if (unlikely(idev == NULL))
2984  			continue;
2985  
2986  		im = rcu_dereference(idev->mc_list);
2987  		if (likely(im)) {
2988  			psf = rcu_dereference(im->mca_sources);
2989  			if (likely(psf)) {
2990  				state->im = im;
2991  				state->idev = idev;
2992  				break;
2993  			}
2994  		}
2995  	}
2996  	return psf;
2997  }
2998  
igmp6_mcf_get_next(struct seq_file * seq,struct ip6_sf_list * psf)2999  static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
3000  {
3001  	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3002  
3003  	psf = rcu_dereference(psf->sf_next);
3004  	while (!psf) {
3005  		state->im = rcu_dereference(state->im->next);
3006  		while (!state->im) {
3007  			state->dev = next_net_device_rcu(state->dev);
3008  			if (!state->dev) {
3009  				state->idev = NULL;
3010  				goto out;
3011  			}
3012  			state->idev = __in6_dev_get(state->dev);
3013  			if (!state->idev)
3014  				continue;
3015  			state->im = rcu_dereference(state->idev->mc_list);
3016  		}
3017  		psf = rcu_dereference(state->im->mca_sources);
3018  	}
3019  out:
3020  	return psf;
3021  }
3022  
igmp6_mcf_get_idx(struct seq_file * seq,loff_t pos)3023  static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
3024  {
3025  	struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
3026  	if (psf)
3027  		while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
3028  			--pos;
3029  	return pos ? NULL : psf;
3030  }
3031  
igmp6_mcf_seq_start(struct seq_file * seq,loff_t * pos)3032  static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
3033  	__acquires(RCU)
3034  {
3035  	rcu_read_lock();
3036  	return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
3037  }
3038  
igmp6_mcf_seq_next(struct seq_file * seq,void * v,loff_t * pos)3039  static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3040  {
3041  	struct ip6_sf_list *psf;
3042  	if (v == SEQ_START_TOKEN)
3043  		psf = igmp6_mcf_get_first(seq);
3044  	else
3045  		psf = igmp6_mcf_get_next(seq, v);
3046  	++*pos;
3047  	return psf;
3048  }
3049  
igmp6_mcf_seq_stop(struct seq_file * seq,void * v)3050  static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
3051  	__releases(RCU)
3052  {
3053  	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3054  
3055  	if (likely(state->im))
3056  		state->im = NULL;
3057  	if (likely(state->idev))
3058  		state->idev = NULL;
3059  
3060  	state->dev = NULL;
3061  	rcu_read_unlock();
3062  }
3063  
igmp6_mcf_seq_show(struct seq_file * seq,void * v)3064  static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
3065  {
3066  	struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
3067  	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3068  
3069  	if (v == SEQ_START_TOKEN) {
3070  		seq_puts(seq, "Idx Device                Multicast Address                   Source Address    INC    EXC\n");
3071  	} else {
3072  		seq_printf(seq,
3073  			   "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
3074  			   state->dev->ifindex, state->dev->name,
3075  			   &state->im->mca_addr,
3076  			   &psf->sf_addr,
3077  			   psf->sf_count[MCAST_INCLUDE],
3078  			   psf->sf_count[MCAST_EXCLUDE]);
3079  	}
3080  	return 0;
3081  }
3082  
3083  static const struct seq_operations igmp6_mcf_seq_ops = {
3084  	.start	=	igmp6_mcf_seq_start,
3085  	.next	=	igmp6_mcf_seq_next,
3086  	.stop	=	igmp6_mcf_seq_stop,
3087  	.show	=	igmp6_mcf_seq_show,
3088  };
3089  
igmp6_proc_init(struct net * net)3090  static int __net_init igmp6_proc_init(struct net *net)
3091  {
3092  	int err;
3093  
3094  	err = -ENOMEM;
3095  	if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
3096  			sizeof(struct igmp6_mc_iter_state)))
3097  		goto out;
3098  	if (!proc_create_net("mcfilter6", 0444, net->proc_net,
3099  			&igmp6_mcf_seq_ops,
3100  			sizeof(struct igmp6_mcf_iter_state)))
3101  		goto out_proc_net_igmp6;
3102  
3103  	err = 0;
3104  out:
3105  	return err;
3106  
3107  out_proc_net_igmp6:
3108  	remove_proc_entry("igmp6", net->proc_net);
3109  	goto out;
3110  }
3111  
igmp6_proc_exit(struct net * net)3112  static void __net_exit igmp6_proc_exit(struct net *net)
3113  {
3114  	remove_proc_entry("mcfilter6", net->proc_net);
3115  	remove_proc_entry("igmp6", net->proc_net);
3116  }
3117  #else
igmp6_proc_init(struct net * net)3118  static inline int igmp6_proc_init(struct net *net)
3119  {
3120  	return 0;
3121  }
igmp6_proc_exit(struct net * net)3122  static inline void igmp6_proc_exit(struct net *net)
3123  {
3124  }
3125  #endif
3126  
igmp6_net_init(struct net * net)3127  static int __net_init igmp6_net_init(struct net *net)
3128  {
3129  	int err;
3130  
3131  	err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
3132  				   SOCK_RAW, IPPROTO_ICMPV6, net);
3133  	if (err < 0) {
3134  		pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
3135  		       err);
3136  		goto out;
3137  	}
3138  
3139  	inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
3140  	net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL;
3141  
3142  	err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
3143  				   SOCK_RAW, IPPROTO_ICMPV6, net);
3144  	if (err < 0) {
3145  		pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
3146  		       err);
3147  		goto out_sock_create;
3148  	}
3149  
3150  	err = igmp6_proc_init(net);
3151  	if (err)
3152  		goto out_sock_create_autojoin;
3153  
3154  	return 0;
3155  
3156  out_sock_create_autojoin:
3157  	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3158  out_sock_create:
3159  	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3160  out:
3161  	return err;
3162  }
3163  
igmp6_net_exit(struct net * net)3164  static void __net_exit igmp6_net_exit(struct net *net)
3165  {
3166  	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3167  	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3168  	igmp6_proc_exit(net);
3169  }
3170  
3171  static struct pernet_operations igmp6_net_ops = {
3172  	.init = igmp6_net_init,
3173  	.exit = igmp6_net_exit,
3174  };
3175  
igmp6_init(void)3176  int __init igmp6_init(void)
3177  {
3178  	int err;
3179  
3180  	err = register_pernet_subsys(&igmp6_net_ops);
3181  	if (err)
3182  		return err;
3183  
3184  	mld_wq = create_workqueue("mld");
3185  	if (!mld_wq) {
3186  		unregister_pernet_subsys(&igmp6_net_ops);
3187  		return -ENOMEM;
3188  	}
3189  
3190  	return err;
3191  }
3192  
igmp6_late_init(void)3193  int __init igmp6_late_init(void)
3194  {
3195  	return register_netdevice_notifier(&igmp6_netdev_notifier);
3196  }
3197  
igmp6_cleanup(void)3198  void igmp6_cleanup(void)
3199  {
3200  	unregister_pernet_subsys(&igmp6_net_ops);
3201  	destroy_workqueue(mld_wq);
3202  }
3203  
igmp6_late_cleanup(void)3204  void igmp6_late_cleanup(void)
3205  {
3206  	unregister_netdevice_notifier(&igmp6_netdev_notifier);
3207  }
3208