1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * linux/ipc/namespace.c
4   * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
5   */
6  
7  #include <linux/ipc.h>
8  #include <linux/msg.h>
9  #include <linux/ipc_namespace.h>
10  #include <linux/rcupdate.h>
11  #include <linux/nsproxy.h>
12  #include <linux/slab.h>
13  #include <linux/cred.h>
14  #include <linux/fs.h>
15  #include <linux/mount.h>
16  #include <linux/user_namespace.h>
17  #include <linux/proc_ns.h>
18  #include <linux/sched/task.h>
19  
20  #include "util.h"
21  
22  /*
23   * The work queue is used to avoid the cost of synchronize_rcu in kern_unmount.
24   */
25  static void free_ipc(struct work_struct *unused);
26  static DECLARE_WORK(free_ipc_work, free_ipc);
27  
inc_ipc_namespaces(struct user_namespace * ns)28  static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns)
29  {
30  	return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES);
31  }
32  
dec_ipc_namespaces(struct ucounts * ucounts)33  static void dec_ipc_namespaces(struct ucounts *ucounts)
34  {
35  	dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES);
36  }
37  
create_ipc_ns(struct user_namespace * user_ns,struct ipc_namespace * old_ns)38  static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
39  					   struct ipc_namespace *old_ns)
40  {
41  	struct ipc_namespace *ns;
42  	struct ucounts *ucounts;
43  	int err;
44  
45  	err = -ENOSPC;
46   again:
47  	ucounts = inc_ipc_namespaces(user_ns);
48  	if (!ucounts) {
49  		/*
50  		 * IPC namespaces are freed asynchronously, by free_ipc_work.
51  		 * If frees were pending, flush_work will wait, and
52  		 * return true. Fail the allocation if no frees are pending.
53  		 */
54  		if (flush_work(&free_ipc_work))
55  			goto again;
56  		goto fail;
57  	}
58  
59  	err = -ENOMEM;
60  	ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT);
61  	if (ns == NULL)
62  		goto fail_dec;
63  
64  	err = ns_alloc_inum(&ns->ns);
65  	if (err)
66  		goto fail_free;
67  	ns->ns.ops = &ipcns_operations;
68  
69  	refcount_set(&ns->ns.count, 1);
70  	ns->user_ns = get_user_ns(user_ns);
71  	ns->ucounts = ucounts;
72  
73  	err = mq_init_ns(ns);
74  	if (err)
75  		goto fail_put;
76  
77  	err = -ENOMEM;
78  	if (!setup_mq_sysctls(ns))
79  		goto fail_put;
80  
81  	if (!setup_ipc_sysctls(ns))
82  		goto fail_mq;
83  
84  	err = msg_init_ns(ns);
85  	if (err)
86  		goto fail_put;
87  
88  	sem_init_ns(ns);
89  	shm_init_ns(ns);
90  
91  	return ns;
92  
93  fail_mq:
94  	retire_mq_sysctls(ns);
95  
96  fail_put:
97  	put_user_ns(ns->user_ns);
98  	ns_free_inum(&ns->ns);
99  fail_free:
100  	kfree(ns);
101  fail_dec:
102  	dec_ipc_namespaces(ucounts);
103  fail:
104  	return ERR_PTR(err);
105  }
106  
copy_ipcs(unsigned long flags,struct user_namespace * user_ns,struct ipc_namespace * ns)107  struct ipc_namespace *copy_ipcs(unsigned long flags,
108  	struct user_namespace *user_ns, struct ipc_namespace *ns)
109  {
110  	if (!(flags & CLONE_NEWIPC))
111  		return get_ipc_ns(ns);
112  	return create_ipc_ns(user_ns, ns);
113  }
114  
115  /*
116   * free_ipcs - free all ipcs of one type
117   * @ns:   the namespace to remove the ipcs from
118   * @ids:  the table of ipcs to free
119   * @free: the function called to free each individual ipc
120   *
121   * Called for each kind of ipc when an ipc_namespace exits.
122   */
free_ipcs(struct ipc_namespace * ns,struct ipc_ids * ids,void (* free)(struct ipc_namespace *,struct kern_ipc_perm *))123  void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
124  	       void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
125  {
126  	struct kern_ipc_perm *perm;
127  	int next_id;
128  	int total, in_use;
129  
130  	down_write(&ids->rwsem);
131  
132  	in_use = ids->in_use;
133  
134  	for (total = 0, next_id = 0; total < in_use; next_id++) {
135  		perm = idr_find(&ids->ipcs_idr, next_id);
136  		if (perm == NULL)
137  			continue;
138  		rcu_read_lock();
139  		ipc_lock_object(perm);
140  		free(ns, perm);
141  		total++;
142  	}
143  	up_write(&ids->rwsem);
144  }
145  
free_ipc_ns(struct ipc_namespace * ns)146  static void free_ipc_ns(struct ipc_namespace *ns)
147  {
148  	/*
149  	 * Caller needs to wait for an RCU grace period to have passed
150  	 * after making the mount point inaccessible to new accesses.
151  	 */
152  	mntput(ns->mq_mnt);
153  	sem_exit_ns(ns);
154  	msg_exit_ns(ns);
155  	shm_exit_ns(ns);
156  
157  	retire_mq_sysctls(ns);
158  	retire_ipc_sysctls(ns);
159  
160  	dec_ipc_namespaces(ns->ucounts);
161  	put_user_ns(ns->user_ns);
162  	ns_free_inum(&ns->ns);
163  	kfree(ns);
164  }
165  
166  static LLIST_HEAD(free_ipc_list);
free_ipc(struct work_struct * unused)167  static void free_ipc(struct work_struct *unused)
168  {
169  	struct llist_node *node = llist_del_all(&free_ipc_list);
170  	struct ipc_namespace *n, *t;
171  
172  	llist_for_each_entry_safe(n, t, node, mnt_llist)
173  		mnt_make_shortterm(n->mq_mnt);
174  
175  	/* Wait for any last users to have gone away. */
176  	synchronize_rcu();
177  
178  	llist_for_each_entry_safe(n, t, node, mnt_llist)
179  		free_ipc_ns(n);
180  }
181  
182  /*
183   * put_ipc_ns - drop a reference to an ipc namespace.
184   * @ns: the namespace to put
185   *
186   * If this is the last task in the namespace exiting, and
187   * it is dropping the refcount to 0, then it can race with
188   * a task in another ipc namespace but in a mounts namespace
189   * which has this ipcns's mqueuefs mounted, doing some action
190   * with one of the mqueuefs files.  That can raise the refcount.
191   * So dropping the refcount, and raising the refcount when
192   * accessing it through the VFS, are protected with mq_lock.
193   *
194   * (Clearly, a task raising the refcount on its own ipc_ns
195   * needn't take mq_lock since it can't race with the last task
196   * in the ipcns exiting).
197   */
put_ipc_ns(struct ipc_namespace * ns)198  void put_ipc_ns(struct ipc_namespace *ns)
199  {
200  	if (refcount_dec_and_lock(&ns->ns.count, &mq_lock)) {
201  		mq_clear_sbinfo(ns);
202  		spin_unlock(&mq_lock);
203  
204  		if (llist_add(&ns->mnt_llist, &free_ipc_list))
205  			schedule_work(&free_ipc_work);
206  	}
207  }
208  
to_ipc_ns(struct ns_common * ns)209  static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
210  {
211  	return container_of(ns, struct ipc_namespace, ns);
212  }
213  
ipcns_get(struct task_struct * task)214  static struct ns_common *ipcns_get(struct task_struct *task)
215  {
216  	struct ipc_namespace *ns = NULL;
217  	struct nsproxy *nsproxy;
218  
219  	task_lock(task);
220  	nsproxy = task->nsproxy;
221  	if (nsproxy)
222  		ns = get_ipc_ns(nsproxy->ipc_ns);
223  	task_unlock(task);
224  
225  	return ns ? &ns->ns : NULL;
226  }
227  
ipcns_put(struct ns_common * ns)228  static void ipcns_put(struct ns_common *ns)
229  {
230  	return put_ipc_ns(to_ipc_ns(ns));
231  }
232  
ipcns_install(struct nsset * nsset,struct ns_common * new)233  static int ipcns_install(struct nsset *nsset, struct ns_common *new)
234  {
235  	struct nsproxy *nsproxy = nsset->nsproxy;
236  	struct ipc_namespace *ns = to_ipc_ns(new);
237  	if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
238  	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
239  		return -EPERM;
240  
241  	put_ipc_ns(nsproxy->ipc_ns);
242  	nsproxy->ipc_ns = get_ipc_ns(ns);
243  	return 0;
244  }
245  
ipcns_owner(struct ns_common * ns)246  static struct user_namespace *ipcns_owner(struct ns_common *ns)
247  {
248  	return to_ipc_ns(ns)->user_ns;
249  }
250  
251  const struct proc_ns_operations ipcns_operations = {
252  	.name		= "ipc",
253  	.type		= CLONE_NEWIPC,
254  	.get		= ipcns_get,
255  	.put		= ipcns_put,
256  	.install	= ipcns_install,
257  	.owner		= ipcns_owner,
258  };
259