1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/syscalls.h>
4 #include <linux/fdtable.h>
5 #include <linux/string.h>
6 #include <linux/random.h>
7 #include <linux/module.h>
8 #include <linux/ptrace.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/cache.h>
12 #include <linux/bug.h>
13 #include <linux/err.h>
14 #include <linux/kcmp.h>
15 #include <linux/capability.h>
16 #include <linux/list.h>
17 #include <linux/eventpoll.h>
18 #include <linux/file.h>
19 
20 #include <asm/unistd.h>
21 
22 /*
23  * We don't expose the real in-memory order of objects for security reasons.
24  * But still the comparison results should be suitable for sorting. So we
25  * obfuscate kernel pointers values and compare the production instead.
26  *
27  * The obfuscation is done in two steps. First we xor the kernel pointer with
28  * a random value, which puts pointer into a new position in a reordered space.
29  * Secondly we multiply the xor production with a large odd random number to
30  * permute its bits even more (the odd multiplier guarantees that the product
31  * is unique ever after the high bits are truncated, since any odd number is
32  * relative prime to 2^n).
33  *
34  * Note also that the obfuscation itself is invisible to userspace and if needed
35  * it can be changed to an alternate scheme.
36  */
37 static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
38 
kptr_obfuscate(long v,int type)39 static long kptr_obfuscate(long v, int type)
40 {
41 	return (v ^ cookies[type][0]) * cookies[type][1];
42 }
43 
44 /*
45  * 0 - equal, i.e. v1 = v2
46  * 1 - less than, i.e. v1 < v2
47  * 2 - greater than, i.e. v1 > v2
48  * 3 - not equal but ordering unavailable (reserved for future)
49  */
kcmp_ptr(void * v1,void * v2,enum kcmp_type type)50 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
51 {
52 	long t1, t2;
53 
54 	t1 = kptr_obfuscate((long)v1, type);
55 	t2 = kptr_obfuscate((long)v2, type);
56 
57 	return (t1 < t2) | ((t1 > t2) << 1);
58 }
59 
60 /* The caller must have pinned the task */
61 static struct file *
get_file_raw_ptr(struct task_struct * task,unsigned int idx)62 get_file_raw_ptr(struct task_struct *task, unsigned int idx)
63 {
64 	struct file *file;
65 
66 	rcu_read_lock();
67 	file = task_lookup_fdget_rcu(task, idx);
68 	rcu_read_unlock();
69 	if (file)
70 		fput(file);
71 
72 	return file;
73 }
74 
kcmp_unlock(struct rw_semaphore * l1,struct rw_semaphore * l2)75 static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
76 {
77 	if (likely(l2 != l1))
78 		up_read(l2);
79 	up_read(l1);
80 }
81 
kcmp_lock(struct rw_semaphore * l1,struct rw_semaphore * l2)82 static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
83 {
84 	int err;
85 
86 	if (l2 > l1)
87 		swap(l1, l2);
88 
89 	err = down_read_killable(l1);
90 	if (!err && likely(l1 != l2)) {
91 		err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
92 		if (err)
93 			up_read(l1);
94 	}
95 
96 	return err;
97 }
98 
99 #ifdef CONFIG_EPOLL
kcmp_epoll_target(struct task_struct * task1,struct task_struct * task2,unsigned long idx1,struct kcmp_epoll_slot __user * uslot)100 static int kcmp_epoll_target(struct task_struct *task1,
101 			     struct task_struct *task2,
102 			     unsigned long idx1,
103 			     struct kcmp_epoll_slot __user *uslot)
104 {
105 	struct file *filp, *filp_epoll, *filp_tgt;
106 	struct kcmp_epoll_slot slot;
107 
108 	if (copy_from_user(&slot, uslot, sizeof(slot)))
109 		return -EFAULT;
110 
111 	filp = get_file_raw_ptr(task1, idx1);
112 	if (!filp)
113 		return -EBADF;
114 
115 	filp_epoll = fget_task(task2, slot.efd);
116 	if (!filp_epoll)
117 		return -EBADF;
118 
119 	filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
120 	fput(filp_epoll);
121 
122 	if (IS_ERR(filp_tgt))
123 		return PTR_ERR(filp_tgt);
124 
125 	return kcmp_ptr(filp, filp_tgt, KCMP_FILE);
126 }
127 #else
kcmp_epoll_target(struct task_struct * task1,struct task_struct * task2,unsigned long idx1,struct kcmp_epoll_slot __user * uslot)128 static int kcmp_epoll_target(struct task_struct *task1,
129 			     struct task_struct *task2,
130 			     unsigned long idx1,
131 			     struct kcmp_epoll_slot __user *uslot)
132 {
133 	return -EOPNOTSUPP;
134 }
135 #endif
136 
SYSCALL_DEFINE5(kcmp,pid_t,pid1,pid_t,pid2,int,type,unsigned long,idx1,unsigned long,idx2)137 SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
138 		unsigned long, idx1, unsigned long, idx2)
139 {
140 	struct task_struct *task1, *task2;
141 	int ret;
142 
143 	rcu_read_lock();
144 
145 	/*
146 	 * Tasks are looked up in caller's PID namespace only.
147 	 */
148 	task1 = find_task_by_vpid(pid1);
149 	task2 = find_task_by_vpid(pid2);
150 	if (!task1 || !task2)
151 		goto err_no_task;
152 
153 	get_task_struct(task1);
154 	get_task_struct(task2);
155 
156 	rcu_read_unlock();
157 
158 	/*
159 	 * One should have enough rights to inspect task details.
160 	 */
161 	ret = kcmp_lock(&task1->signal->exec_update_lock,
162 			&task2->signal->exec_update_lock);
163 	if (ret)
164 		goto err;
165 	if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
166 	    !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
167 		ret = -EPERM;
168 		goto err_unlock;
169 	}
170 
171 	switch (type) {
172 	case KCMP_FILE: {
173 		struct file *filp1, *filp2;
174 
175 		filp1 = get_file_raw_ptr(task1, idx1);
176 		filp2 = get_file_raw_ptr(task2, idx2);
177 
178 		if (filp1 && filp2)
179 			ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
180 		else
181 			ret = -EBADF;
182 		break;
183 	}
184 	case KCMP_VM:
185 		ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
186 		break;
187 	case KCMP_FILES:
188 		ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
189 		break;
190 	case KCMP_FS:
191 		ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
192 		break;
193 	case KCMP_SIGHAND:
194 		ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
195 		break;
196 	case KCMP_IO:
197 		ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
198 		break;
199 	case KCMP_SYSVSEM:
200 #ifdef CONFIG_SYSVIPC
201 		ret = kcmp_ptr(task1->sysvsem.undo_list,
202 			       task2->sysvsem.undo_list,
203 			       KCMP_SYSVSEM);
204 #else
205 		ret = -EOPNOTSUPP;
206 #endif
207 		break;
208 	case KCMP_EPOLL_TFD:
209 		ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
210 		break;
211 	default:
212 		ret = -EINVAL;
213 		break;
214 	}
215 
216 err_unlock:
217 	kcmp_unlock(&task1->signal->exec_update_lock,
218 		    &task2->signal->exec_update_lock);
219 err:
220 	put_task_struct(task1);
221 	put_task_struct(task2);
222 
223 	return ret;
224 
225 err_no_task:
226 	rcu_read_unlock();
227 	return -ESRCH;
228 }
229 
kcmp_cookies_init(void)230 static __init int kcmp_cookies_init(void)
231 {
232 	int i;
233 
234 	get_random_bytes(cookies, sizeof(cookies));
235 
236 	for (i = 0; i < KCMP_TYPES; i++)
237 		cookies[i][1] |= (~(~0UL >>  1) | 1);
238 
239 	return 0;
240 }
241 arch_initcall(kcmp_cookies_init);
242