1 // SPDX-License-Identifier: GPL-2.0
2 /****************************************************************************/
3 /*
4  *  linux/fs/binfmt_flat.c
5  *
6  *	Copyright (C) 2000-2003 David McCullough <davidm@snapgear.com>
7  *	Copyright (C) 2002 Greg Ungerer <gerg@snapgear.com>
8  *	Copyright (C) 2002 SnapGear, by Paul Dale <pauli@snapgear.com>
9  *	Copyright (C) 2000, 2001 Lineo, by David McCullough <davidm@lineo.com>
10  *  based heavily on:
11  *
12  *  linux/fs/binfmt_aout.c:
13  *      Copyright (C) 1991, 1992, 1996  Linus Torvalds
14  *  linux/fs/binfmt_flat.c for 2.0 kernel
15  *	    Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>
16  *	JAN/99 -- coded full program relocation (gerg@snapgear.com)
17  */
18 
19 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/mm.h>
25 #include <linux/mman.h>
26 #include <linux/errno.h>
27 #include <linux/signal.h>
28 #include <linux/string.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/ptrace.h>
32 #include <linux/user.h>
33 #include <linux/slab.h>
34 #include <linux/binfmts.h>
35 #include <linux/personality.h>
36 #include <linux/init.h>
37 #include <linux/flat.h>
38 #include <linux/uaccess.h>
39 #include <linux/vmalloc.h>
40 
41 #include <asm/byteorder.h>
42 #include <linux/unaligned.h>
43 #include <asm/cacheflush.h>
44 #include <asm/page.h>
45 #include <asm/flat.h>
46 
47 #ifndef flat_get_relocate_addr
48 #define flat_get_relocate_addr(rel)	(rel)
49 #endif
50 
51 /****************************************************************************/
52 
53 /*
54  * User data (data section and bss) needs to be aligned.
55  * We pick 0x20 here because it is the max value elf2flt has always
56  * used in producing FLAT files, and because it seems to be large
57  * enough to make all the gcc alignment related tests happy.
58  */
59 #define FLAT_DATA_ALIGN	(0x20)
60 
61 /*
62  * User data (stack) also needs to be aligned.
63  * Here we can be a bit looser than the data sections since this
64  * needs to only meet arch ABI requirements.
65  */
66 #define FLAT_STACK_ALIGN	max_t(unsigned long, sizeof(void *), ARCH_SLAB_MINALIGN)
67 
68 #define RELOC_FAILED 0xff00ff01		/* Relocation incorrect somewhere */
69 #define UNLOADED_LIB 0x7ff000ff		/* Placeholder for unused library */
70 
71 #define MAX_SHARED_LIBS			(1)
72 
73 #ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET
74 #define DATA_START_OFFSET_WORDS		(0)
75 #define MAX_SHARED_LIBS_UPDATE		(0)
76 #else
77 #define DATA_START_OFFSET_WORDS		(MAX_SHARED_LIBS)
78 #define MAX_SHARED_LIBS_UPDATE		(MAX_SHARED_LIBS)
79 #endif
80 
81 struct lib_info {
82 	struct {
83 		unsigned long start_code;		/* Start of text segment */
84 		unsigned long start_data;		/* Start of data segment */
85 		unsigned long start_brk;		/* End of data segment */
86 		unsigned long text_len;			/* Length of text segment */
87 		unsigned long entry;			/* Start address for this module */
88 		unsigned long build_date;		/* When this one was compiled */
89 		bool loaded;				/* Has this library been loaded? */
90 	} lib_list[MAX_SHARED_LIBS];
91 };
92 
93 static int load_flat_binary(struct linux_binprm *);
94 
95 static struct linux_binfmt flat_format = {
96 	.module		= THIS_MODULE,
97 	.load_binary	= load_flat_binary,
98 };
99 
100 
101 /****************************************************************************/
102 /*
103  * create_flat_tables() parses the env- and arg-strings in new user
104  * memory and creates the pointer tables from them, and puts their
105  * addresses on the "stack", recording the new stack pointer value.
106  */
107 
create_flat_tables(struct linux_binprm * bprm,unsigned long arg_start)108 static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start)
109 {
110 	char __user *p;
111 	unsigned long __user *sp;
112 	long i, len;
113 
114 	p = (char __user *)arg_start;
115 	sp = (unsigned long __user *)current->mm->start_stack;
116 
117 	sp -= bprm->envc + 1;
118 	sp -= bprm->argc + 1;
119 	if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK))
120 		sp -= 2; /* argvp + envp */
121 	sp -= 1;  /* &argc */
122 
123 	current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN;
124 	sp = (unsigned long __user *)current->mm->start_stack;
125 
126 	if (put_user(bprm->argc, sp++))
127 		return -EFAULT;
128 	if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) {
129 		unsigned long argv, envp;
130 		argv = (unsigned long)(sp + 2);
131 		envp = (unsigned long)(sp + 2 + bprm->argc + 1);
132 		if (put_user(argv, sp++) || put_user(envp, sp++))
133 			return -EFAULT;
134 	}
135 
136 	current->mm->arg_start = (unsigned long)p;
137 	for (i = bprm->argc; i > 0; i--) {
138 		if (put_user((unsigned long)p, sp++))
139 			return -EFAULT;
140 		len = strnlen_user(p, MAX_ARG_STRLEN);
141 		if (!len || len > MAX_ARG_STRLEN)
142 			return -EINVAL;
143 		p += len;
144 	}
145 	if (put_user(0, sp++))
146 		return -EFAULT;
147 	current->mm->arg_end = (unsigned long)p;
148 
149 	current->mm->env_start = (unsigned long) p;
150 	for (i = bprm->envc; i > 0; i--) {
151 		if (put_user((unsigned long)p, sp++))
152 			return -EFAULT;
153 		len = strnlen_user(p, MAX_ARG_STRLEN);
154 		if (!len || len > MAX_ARG_STRLEN)
155 			return -EINVAL;
156 		p += len;
157 	}
158 	if (put_user(0, sp++))
159 		return -EFAULT;
160 	current->mm->env_end = (unsigned long)p;
161 
162 	return 0;
163 }
164 
165 /****************************************************************************/
166 
167 #ifdef CONFIG_BINFMT_ZFLAT
168 
169 #include <linux/zlib.h>
170 
171 #define LBUFSIZE	4000
172 
173 /* gzip flag byte */
174 #define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
175 #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
176 #define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
177 #define ORIG_NAME    0x08 /* bit 3 set: original file name present */
178 #define COMMENT      0x10 /* bit 4 set: file comment present */
179 #define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
180 #define RESERVED     0xC0 /* bit 6,7:   reserved */
181 
decompress_exec(struct linux_binprm * bprm,loff_t fpos,char * dst,long len,int fd)182 static int decompress_exec(struct linux_binprm *bprm, loff_t fpos, char *dst,
183 		long len, int fd)
184 {
185 	unsigned char *buf;
186 	z_stream strm;
187 	int ret, retval;
188 
189 	pr_debug("decompress_exec(offset=%llx,buf=%p,len=%lx)\n", fpos, dst, len);
190 
191 	memset(&strm, 0, sizeof(strm));
192 	strm.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
193 	if (!strm.workspace)
194 		return -ENOMEM;
195 
196 	buf = kmalloc(LBUFSIZE, GFP_KERNEL);
197 	if (!buf) {
198 		retval = -ENOMEM;
199 		goto out_free;
200 	}
201 
202 	/* Read in first chunk of data and parse gzip header. */
203 	ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos);
204 
205 	strm.next_in = buf;
206 	strm.avail_in = ret;
207 	strm.total_in = 0;
208 
209 	retval = -ENOEXEC;
210 
211 	/* Check minimum size -- gzip header */
212 	if (ret < 10) {
213 		pr_debug("file too small?\n");
214 		goto out_free_buf;
215 	}
216 
217 	/* Check gzip magic number */
218 	if ((buf[0] != 037) || ((buf[1] != 0213) && (buf[1] != 0236))) {
219 		pr_debug("unknown compression magic?\n");
220 		goto out_free_buf;
221 	}
222 
223 	/* Check gzip method */
224 	if (buf[2] != 8) {
225 		pr_debug("unknown compression method?\n");
226 		goto out_free_buf;
227 	}
228 	/* Check gzip flags */
229 	if ((buf[3] & ENCRYPTED) || (buf[3] & CONTINUATION) ||
230 	    (buf[3] & RESERVED)) {
231 		pr_debug("unknown flags?\n");
232 		goto out_free_buf;
233 	}
234 
235 	ret = 10;
236 	if (buf[3] & EXTRA_FIELD) {
237 		ret += 2 + buf[10] + (buf[11] << 8);
238 		if (unlikely(ret >= LBUFSIZE)) {
239 			pr_debug("buffer overflow (EXTRA)?\n");
240 			goto out_free_buf;
241 		}
242 	}
243 	if (buf[3] & ORIG_NAME) {
244 		while (ret < LBUFSIZE && buf[ret++] != 0)
245 			;
246 		if (unlikely(ret == LBUFSIZE)) {
247 			pr_debug("buffer overflow (ORIG_NAME)?\n");
248 			goto out_free_buf;
249 		}
250 	}
251 	if (buf[3] & COMMENT) {
252 		while (ret < LBUFSIZE && buf[ret++] != 0)
253 			;
254 		if (unlikely(ret == LBUFSIZE)) {
255 			pr_debug("buffer overflow (COMMENT)?\n");
256 			goto out_free_buf;
257 		}
258 	}
259 
260 	strm.next_in += ret;
261 	strm.avail_in -= ret;
262 
263 	strm.next_out = dst;
264 	strm.avail_out = len;
265 	strm.total_out = 0;
266 
267 	if (zlib_inflateInit2(&strm, -MAX_WBITS) != Z_OK) {
268 		pr_debug("zlib init failed?\n");
269 		goto out_free_buf;
270 	}
271 
272 	while ((ret = zlib_inflate(&strm, Z_NO_FLUSH)) == Z_OK) {
273 		ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos);
274 		if (ret <= 0)
275 			break;
276 		len -= ret;
277 
278 		strm.next_in = buf;
279 		strm.avail_in = ret;
280 		strm.total_in = 0;
281 	}
282 
283 	if (ret < 0) {
284 		pr_debug("decompression failed (%d), %s\n",
285 			ret, strm.msg);
286 		goto out_zlib;
287 	}
288 
289 	retval = 0;
290 out_zlib:
291 	zlib_inflateEnd(&strm);
292 out_free_buf:
293 	kfree(buf);
294 out_free:
295 	kfree(strm.workspace);
296 	return retval;
297 }
298 
299 #endif /* CONFIG_BINFMT_ZFLAT */
300 
301 /****************************************************************************/
302 
303 static unsigned long
calc_reloc(unsigned long r,struct lib_info * p)304 calc_reloc(unsigned long r, struct lib_info *p)
305 {
306 	unsigned long addr;
307 	unsigned long start_brk;
308 	unsigned long start_data;
309 	unsigned long text_len;
310 	unsigned long start_code;
311 
312 	start_brk = p->lib_list[0].start_brk;
313 	start_data = p->lib_list[0].start_data;
314 	start_code = p->lib_list[0].start_code;
315 	text_len = p->lib_list[0].text_len;
316 
317 	if (r > start_brk - start_data + text_len) {
318 		pr_err("reloc outside program 0x%lx (0 - 0x%lx/0x%lx)",
319 		       r, start_brk-start_data+text_len, text_len);
320 		goto failed;
321 	}
322 
323 	if (r < text_len)			/* In text segment */
324 		addr = r + start_code;
325 	else					/* In data segment */
326 		addr = r - text_len + start_data;
327 
328 	/* Range checked already above so doing the range tests is redundant...*/
329 	return addr;
330 
331 failed:
332 	pr_cont(", killing %s!\n", current->comm);
333 	send_sig(SIGSEGV, current, 0);
334 
335 	return RELOC_FAILED;
336 }
337 
338 /****************************************************************************/
339 
340 #ifdef CONFIG_BINFMT_FLAT_OLD
old_reloc(unsigned long rl)341 static void old_reloc(unsigned long rl)
342 {
343 	static const char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" };
344 	flat_v2_reloc_t	r;
345 	unsigned long __user *ptr;
346 	unsigned long val;
347 
348 	r.value = rl;
349 #if defined(CONFIG_COLDFIRE)
350 	ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset);
351 #else
352 	ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset);
353 #endif
354 	get_user(val, ptr);
355 
356 	pr_debug("Relocation of variable at DATASEG+%x "
357 		 "(address %p, currently %lx) into segment %s\n",
358 		 r.reloc.offset, ptr, val, segment[r.reloc.type]);
359 
360 	switch (r.reloc.type) {
361 	case OLD_FLAT_RELOC_TYPE_TEXT:
362 		val += current->mm->start_code;
363 		break;
364 	case OLD_FLAT_RELOC_TYPE_DATA:
365 		val += current->mm->start_data;
366 		break;
367 	case OLD_FLAT_RELOC_TYPE_BSS:
368 		val += current->mm->end_data;
369 		break;
370 	default:
371 		pr_err("Unknown relocation type=%x\n", r.reloc.type);
372 		break;
373 	}
374 	put_user(val, ptr);
375 
376 	pr_debug("Relocation became %lx\n", val);
377 }
378 #endif /* CONFIG_BINFMT_FLAT_OLD */
379 
380 /****************************************************************************/
381 
skip_got_header(u32 __user * rp)382 static inline u32 __user *skip_got_header(u32 __user *rp)
383 {
384 	if (IS_ENABLED(CONFIG_RISCV)) {
385 		/*
386 		 * RISC-V has a 16 byte GOT PLT header for elf64-riscv
387 		 * and 8 byte GOT PLT header for elf32-riscv.
388 		 * Skip the whole GOT PLT header, since it is reserved
389 		 * for the dynamic linker (ld.so).
390 		 */
391 		u32 rp_val0, rp_val1;
392 
393 		if (get_user(rp_val0, rp))
394 			return rp;
395 		if (get_user(rp_val1, rp + 1))
396 			return rp;
397 
398 		if (rp_val0 == 0xffffffff && rp_val1 == 0xffffffff)
399 			rp += 4;
400 		else if (rp_val0 == 0xffffffff)
401 			rp += 2;
402 	}
403 	return rp;
404 }
405 
load_flat_file(struct linux_binprm * bprm,struct lib_info * libinfo,unsigned long * extra_stack)406 static int load_flat_file(struct linux_binprm *bprm,
407 		struct lib_info *libinfo, unsigned long *extra_stack)
408 {
409 	struct flat_hdr *hdr;
410 	unsigned long textpos, datapos, realdatastart;
411 	u32 text_len, data_len, bss_len, stack_len, full_data, flags;
412 	unsigned long len, memp, memp_size, extra, rlim;
413 	__be32 __user *reloc;
414 	u32 __user *rp;
415 	int i, rev, relocs;
416 	loff_t fpos;
417 	unsigned long start_code, end_code;
418 	ssize_t result;
419 	int ret;
420 
421 	hdr = ((struct flat_hdr *) bprm->buf);		/* exec-header */
422 
423 	text_len  = ntohl(hdr->data_start);
424 	data_len  = ntohl(hdr->data_end) - ntohl(hdr->data_start);
425 	bss_len   = ntohl(hdr->bss_end) - ntohl(hdr->data_end);
426 	stack_len = ntohl(hdr->stack_size);
427 	if (extra_stack) {
428 		stack_len += *extra_stack;
429 		*extra_stack = stack_len;
430 	}
431 	relocs    = ntohl(hdr->reloc_count);
432 	flags     = ntohl(hdr->flags);
433 	rev       = ntohl(hdr->rev);
434 	full_data = data_len + relocs * sizeof(unsigned long);
435 
436 	if (strncmp(hdr->magic, "bFLT", 4)) {
437 		/*
438 		 * Previously, here was a printk to tell people
439 		 *   "BINFMT_FLAT: bad header magic".
440 		 * But for the kernel which also use ELF FD-PIC format, this
441 		 * error message is confusing.
442 		 * because a lot of people do not manage to produce good
443 		 */
444 		ret = -ENOEXEC;
445 		goto err;
446 	}
447 
448 	if (flags & FLAT_FLAG_KTRACE)
449 		pr_info("Loading file: %s\n", bprm->filename);
450 
451 #ifdef CONFIG_BINFMT_FLAT_OLD
452 	if (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION) {
453 		pr_err("bad flat file version 0x%x (supported 0x%lx and 0x%lx)\n",
454 		       rev, FLAT_VERSION, OLD_FLAT_VERSION);
455 		ret = -ENOEXEC;
456 		goto err;
457 	}
458 
459 	/*
460 	 * fix up the flags for the older format,  there were all kinds
461 	 * of endian hacks,  this only works for the simple cases
462 	 */
463 	if (rev == OLD_FLAT_VERSION &&
464 	   (flags || IS_ENABLED(CONFIG_BINFMT_FLAT_OLD_ALWAYS_RAM)))
465 		flags = FLAT_FLAG_RAM;
466 
467 #else /* CONFIG_BINFMT_FLAT_OLD */
468 	if (rev != FLAT_VERSION) {
469 		pr_err("bad flat file version 0x%x (supported 0x%lx)\n",
470 		       rev, FLAT_VERSION);
471 		ret = -ENOEXEC;
472 		goto err;
473 	}
474 #endif /* !CONFIG_BINFMT_FLAT_OLD */
475 
476 	/*
477 	 * Make sure the header params are sane.
478 	 * 28 bits (256 MB) is way more than reasonable in this case.
479 	 * If some top bits are set we have probable binary corruption.
480 	*/
481 	if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) {
482 		pr_err("bad header\n");
483 		ret = -ENOEXEC;
484 		goto err;
485 	}
486 
487 #ifndef CONFIG_BINFMT_ZFLAT
488 	if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) {
489 		pr_err("Support for ZFLAT executables is not enabled.\n");
490 		ret = -ENOEXEC;
491 		goto err;
492 	}
493 #endif
494 
495 	/*
496 	 * Check initial limits. This avoids letting people circumvent
497 	 * size limits imposed on them by creating programs with large
498 	 * arrays in the data or bss.
499 	 */
500 	rlim = rlimit(RLIMIT_DATA);
501 	if (rlim >= RLIM_INFINITY)
502 		rlim = ~0;
503 	if (data_len + bss_len > rlim) {
504 		ret = -ENOMEM;
505 		goto err;
506 	}
507 
508 	/* Flush all traces of the currently running executable */
509 	ret = begin_new_exec(bprm);
510 	if (ret)
511 		goto err;
512 
513 	/* OK, This is the point of no return */
514 	set_personality(PER_LINUX_32BIT);
515 	setup_new_exec(bprm);
516 
517 	/*
518 	 * calculate the extra space we need to map in
519 	 */
520 	extra = max_t(unsigned long, bss_len + stack_len,
521 			relocs * sizeof(unsigned long));
522 
523 	/*
524 	 * there are a couple of cases here,  the separate code/data
525 	 * case,  and then the fully copied to RAM case which lumps
526 	 * it all together.
527 	 */
528 	if (!IS_ENABLED(CONFIG_MMU) && !(flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP))) {
529 		/*
530 		 * this should give us a ROM ptr,  but if it doesn't we don't
531 		 * really care
532 		 */
533 		pr_debug("ROM mapping of file (we hope)\n");
534 
535 		textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
536 				  MAP_PRIVATE, 0);
537 		if (!textpos || IS_ERR_VALUE(textpos)) {
538 			ret = textpos;
539 			if (!textpos)
540 				ret = -ENOMEM;
541 			pr_err("Unable to mmap process text, errno %d\n", ret);
542 			goto err;
543 		}
544 
545 		len = data_len + extra +
546 			DATA_START_OFFSET_WORDS * sizeof(unsigned long);
547 		len = PAGE_ALIGN(len);
548 		realdatastart = vm_mmap(NULL, 0, len,
549 			PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
550 
551 		if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
552 			ret = realdatastart;
553 			if (!realdatastart)
554 				ret = -ENOMEM;
555 			pr_err("Unable to allocate RAM for process data, "
556 			       "errno %d\n", ret);
557 			vm_munmap(textpos, text_len);
558 			goto err;
559 		}
560 		datapos = ALIGN(realdatastart +
561 				DATA_START_OFFSET_WORDS * sizeof(unsigned long),
562 				FLAT_DATA_ALIGN);
563 
564 		pr_debug("Allocated data+bss+stack (%u bytes): %lx\n",
565 			 data_len + bss_len + stack_len, datapos);
566 
567 		fpos = ntohl(hdr->data_start);
568 #ifdef CONFIG_BINFMT_ZFLAT
569 		if (flags & FLAT_FLAG_GZDATA) {
570 			result = decompress_exec(bprm, fpos, (char *)datapos,
571 						 full_data, 0);
572 		} else
573 #endif
574 		{
575 			result = read_code(bprm->file, datapos, fpos,
576 					full_data);
577 		}
578 		if (IS_ERR_VALUE(result)) {
579 			ret = result;
580 			pr_err("Unable to read data+bss, errno %d\n", ret);
581 			vm_munmap(textpos, text_len);
582 			vm_munmap(realdatastart, len);
583 			goto err;
584 		}
585 
586 		reloc = (__be32 __user *)
587 			(datapos + (ntohl(hdr->reloc_start) - text_len));
588 		memp = realdatastart;
589 		memp_size = len;
590 	} else {
591 
592 		len = text_len + data_len + extra +
593 			DATA_START_OFFSET_WORDS * sizeof(u32);
594 		len = PAGE_ALIGN(len);
595 		textpos = vm_mmap(NULL, 0, len,
596 			PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
597 
598 		if (!textpos || IS_ERR_VALUE(textpos)) {
599 			ret = textpos;
600 			if (!textpos)
601 				ret = -ENOMEM;
602 			pr_err("Unable to allocate RAM for process text/data, "
603 			       "errno %d\n", ret);
604 			goto err;
605 		}
606 
607 		realdatastart = textpos + ntohl(hdr->data_start);
608 		datapos = ALIGN(realdatastart +
609 				DATA_START_OFFSET_WORDS * sizeof(u32),
610 				FLAT_DATA_ALIGN);
611 
612 		reloc = (__be32 __user *)
613 			(datapos + (ntohl(hdr->reloc_start) - text_len));
614 		memp = textpos;
615 		memp_size = len;
616 #ifdef CONFIG_BINFMT_ZFLAT
617 		/*
618 		 * load it all in and treat it like a RAM load from now on
619 		 */
620 		if (flags & FLAT_FLAG_GZIP) {
621 #ifndef CONFIG_MMU
622 			result = decompress_exec(bprm, sizeof(struct flat_hdr),
623 					 (((char *)textpos) + sizeof(struct flat_hdr)),
624 					 (text_len + full_data
625 						  - sizeof(struct flat_hdr)),
626 					 0);
627 			memmove((void *) datapos, (void *) realdatastart,
628 					full_data);
629 #else
630 			/*
631 			 * This is used on MMU systems mainly for testing.
632 			 * Let's use a kernel buffer to simplify things.
633 			 */
634 			long unz_text_len = text_len - sizeof(struct flat_hdr);
635 			long unz_len = unz_text_len + full_data;
636 			char *unz_data = vmalloc(unz_len);
637 			if (!unz_data) {
638 				result = -ENOMEM;
639 			} else {
640 				result = decompress_exec(bprm, sizeof(struct flat_hdr),
641 							 unz_data, unz_len, 0);
642 				if (result == 0 &&
643 				    (copy_to_user((void __user *)textpos + sizeof(struct flat_hdr),
644 						  unz_data, unz_text_len) ||
645 				     copy_to_user((void __user *)datapos,
646 						  unz_data + unz_text_len, full_data)))
647 					result = -EFAULT;
648 				vfree(unz_data);
649 			}
650 #endif
651 		} else if (flags & FLAT_FLAG_GZDATA) {
652 			result = read_code(bprm->file, textpos, 0, text_len);
653 			if (!IS_ERR_VALUE(result)) {
654 #ifndef CONFIG_MMU
655 				result = decompress_exec(bprm, text_len, (char *) datapos,
656 						 full_data, 0);
657 #else
658 				char *unz_data = vmalloc(full_data);
659 				if (!unz_data) {
660 					result = -ENOMEM;
661 				} else {
662 					result = decompress_exec(bprm, text_len,
663 						       unz_data, full_data, 0);
664 					if (result == 0 &&
665 					    copy_to_user((void __user *)datapos,
666 							 unz_data, full_data))
667 						result = -EFAULT;
668 					vfree(unz_data);
669 				}
670 #endif
671 			}
672 		} else
673 #endif /* CONFIG_BINFMT_ZFLAT */
674 		{
675 			result = read_code(bprm->file, textpos, 0, text_len);
676 			if (!IS_ERR_VALUE(result))
677 				result = read_code(bprm->file, datapos,
678 						   ntohl(hdr->data_start),
679 						   full_data);
680 		}
681 		if (IS_ERR_VALUE(result)) {
682 			ret = result;
683 			pr_err("Unable to read code+data+bss, errno %d\n", ret);
684 			vm_munmap(textpos, text_len + data_len + extra +
685 				  DATA_START_OFFSET_WORDS * sizeof(u32));
686 			goto err;
687 		}
688 	}
689 
690 	start_code = textpos + sizeof(struct flat_hdr);
691 	end_code = textpos + text_len;
692 	text_len -= sizeof(struct flat_hdr); /* the real code len */
693 
694 	/* The main program needs a little extra setup in the task structure */
695 	current->mm->start_code = start_code;
696 	current->mm->end_code = end_code;
697 	current->mm->start_data = datapos;
698 	current->mm->end_data = datapos + data_len;
699 	/*
700 	 * set up the brk stuff, uses any slack left in data/bss/stack
701 	 * allocation.  We put the brk after the bss (between the bss
702 	 * and stack) like other platforms.
703 	 * Userspace code relies on the stack pointer starting out at
704 	 * an address right at the end of a page.
705 	 */
706 	current->mm->start_brk = datapos + data_len + bss_len;
707 	current->mm->brk = (current->mm->start_brk + 3) & ~3;
708 #ifndef CONFIG_MMU
709 	current->mm->context.end_brk = memp + memp_size - stack_len;
710 #endif
711 
712 	if (flags & FLAT_FLAG_KTRACE) {
713 		pr_info("Mapping is %lx, Entry point is %x, data_start is %x\n",
714 			textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
715 		pr_info("%s %s: TEXT=%lx-%lx DATA=%lx-%lx BSS=%lx-%lx\n",
716 			"Load", bprm->filename,
717 			start_code, end_code, datapos, datapos + data_len,
718 			datapos + data_len, (datapos + data_len + bss_len + 3) & ~3);
719 	}
720 
721 	/* Store the current module values into the global library structure */
722 	libinfo->lib_list[0].start_code = start_code;
723 	libinfo->lib_list[0].start_data = datapos;
724 	libinfo->lib_list[0].start_brk = datapos + data_len + bss_len;
725 	libinfo->lib_list[0].text_len = text_len;
726 	libinfo->lib_list[0].loaded = 1;
727 	libinfo->lib_list[0].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos;
728 	libinfo->lib_list[0].build_date = ntohl(hdr->build_date);
729 
730 	/*
731 	 * We just load the allocations into some temporary memory to
732 	 * help simplify all this mumbo jumbo
733 	 *
734 	 * We've got two different sections of relocation entries.
735 	 * The first is the GOT which resides at the beginning of the data segment
736 	 * and is terminated with a -1.  This one can be relocated in place.
737 	 * The second is the extra relocation entries tacked after the image's
738 	 * data segment. These require a little more processing as the entry is
739 	 * really an offset into the image which contains an offset into the
740 	 * image.
741 	 */
742 	if (flags & FLAT_FLAG_GOTPIC) {
743 		rp = skip_got_header((u32 __user *) datapos);
744 		for (; ; rp++) {
745 			u32 addr, rp_val;
746 			if (get_user(rp_val, rp))
747 				return -EFAULT;
748 			if (rp_val == 0xffffffff)
749 				break;
750 			if (rp_val) {
751 				addr = calc_reloc(rp_val, libinfo);
752 				if (addr == RELOC_FAILED) {
753 					ret = -ENOEXEC;
754 					goto err;
755 				}
756 				if (put_user(addr, rp))
757 					return -EFAULT;
758 			}
759 		}
760 	}
761 
762 	/*
763 	 * Now run through the relocation entries.
764 	 * We've got to be careful here as C++ produces relocatable zero
765 	 * entries in the constructor and destructor tables which are then
766 	 * tested for being not zero (which will always occur unless we're
767 	 * based from address zero).  This causes an endless loop as __start
768 	 * is at zero.  The solution used is to not relocate zero addresses.
769 	 * This has the negative side effect of not allowing a global data
770 	 * reference to be statically initialised to _stext (I've moved
771 	 * __start to address 4 so that is okay).
772 	 */
773 	if (rev > OLD_FLAT_VERSION) {
774 		for (i = 0; i < relocs; i++) {
775 			u32 addr, relval;
776 			__be32 tmp;
777 
778 			/*
779 			 * Get the address of the pointer to be
780 			 * relocated (of course, the address has to be
781 			 * relocated first).
782 			 */
783 			if (get_user(tmp, reloc + i))
784 				return -EFAULT;
785 			relval = ntohl(tmp);
786 			addr = flat_get_relocate_addr(relval);
787 			rp = (u32 __user *)calc_reloc(addr, libinfo);
788 			if (rp == (u32 __user *)RELOC_FAILED) {
789 				ret = -ENOEXEC;
790 				goto err;
791 			}
792 
793 			/* Get the pointer's value.  */
794 			ret = flat_get_addr_from_rp(rp, relval, flags, &addr);
795 			if (unlikely(ret))
796 				goto err;
797 
798 			if (addr != 0) {
799 				/*
800 				 * Do the relocation.  PIC relocs in the data section are
801 				 * already in target order
802 				 */
803 				if ((flags & FLAT_FLAG_GOTPIC) == 0) {
804 					/*
805 					 * Meh, the same value can have a different
806 					 * byte order based on a flag..
807 					 */
808 					addr = ntohl((__force __be32)addr);
809 				}
810 				addr = calc_reloc(addr, libinfo);
811 				if (addr == RELOC_FAILED) {
812 					ret = -ENOEXEC;
813 					goto err;
814 				}
815 
816 				/* Write back the relocated pointer.  */
817 				ret = flat_put_addr_at_rp(rp, addr, relval);
818 				if (unlikely(ret))
819 					goto err;
820 			}
821 		}
822 #ifdef CONFIG_BINFMT_FLAT_OLD
823 	} else {
824 		for (i = 0; i < relocs; i++) {
825 			__be32 relval;
826 			if (get_user(relval, reloc + i))
827 				return -EFAULT;
828 			old_reloc(ntohl(relval));
829 		}
830 #endif /* CONFIG_BINFMT_FLAT_OLD */
831 	}
832 
833 	flush_icache_user_range(start_code, end_code);
834 
835 	/* zero the BSS,  BRK and stack areas */
836 	if (clear_user((void __user *)(datapos + data_len), bss_len +
837 		       (memp + memp_size - stack_len -		/* end brk */
838 		       libinfo->lib_list[0].start_brk) +	/* start brk */
839 		       stack_len))
840 		return -EFAULT;
841 
842 	return 0;
843 err:
844 	return ret;
845 }
846 
847 
848 /****************************************************************************/
849 
850 /*
851  * These are the functions used to load flat style executables and shared
852  * libraries.  There is no binary dependent code anywhere else.
853  */
854 
load_flat_binary(struct linux_binprm * bprm)855 static int load_flat_binary(struct linux_binprm *bprm)
856 {
857 	struct lib_info libinfo;
858 	struct pt_regs *regs = current_pt_regs();
859 	unsigned long stack_len = 0;
860 	unsigned long start_addr;
861 	int res;
862 	int i, j;
863 
864 	memset(&libinfo, 0, sizeof(libinfo));
865 
866 	/*
867 	 * We have to add the size of our arguments to our stack size
868 	 * otherwise it's too easy for users to create stack overflows
869 	 * by passing in a huge argument list.  And yes,  we have to be
870 	 * pedantic and include space for the argv/envp array as it may have
871 	 * a lot of entries.
872 	 */
873 #ifndef CONFIG_MMU
874 	stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */
875 #endif
876 	stack_len += (bprm->argc + 1) * sizeof(char *);   /* the argv array */
877 	stack_len += (bprm->envc + 1) * sizeof(char *);   /* the envp array */
878 	stack_len = ALIGN(stack_len, FLAT_STACK_ALIGN);
879 
880 	res = load_flat_file(bprm, &libinfo, &stack_len);
881 	if (res < 0)
882 		return res;
883 
884 	/* Update data segment pointers for all libraries */
885 	for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) {
886 		if (!libinfo.lib_list[i].loaded)
887 			continue;
888 		for (j = 0; j < MAX_SHARED_LIBS; j++) {
889 			unsigned long val = libinfo.lib_list[j].loaded ?
890 				libinfo.lib_list[j].start_data : UNLOADED_LIB;
891 			unsigned long __user *p = (unsigned long __user *)
892 				libinfo.lib_list[i].start_data;
893 			p -= j + 1;
894 			if (put_user(val, p))
895 				return -EFAULT;
896 		}
897 	}
898 
899 	set_binfmt(&flat_format);
900 
901 #ifdef CONFIG_MMU
902 	res = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
903 	if (!res)
904 		res = create_flat_tables(bprm, bprm->p);
905 #else
906 	/* Stash our initial stack pointer into the mm structure */
907 	current->mm->start_stack =
908 		((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
909 	pr_debug("sp=%lx\n", current->mm->start_stack);
910 
911 	/* copy the arg pages onto the stack */
912 	res = transfer_args_to_stack(bprm, &current->mm->start_stack);
913 	if (!res)
914 		res = create_flat_tables(bprm, current->mm->start_stack);
915 #endif
916 	if (res)
917 		return res;
918 
919 	/* Fake some return addresses to ensure the call chain will
920 	 * initialise library in order for us.  We are required to call
921 	 * lib 1 first, then 2, ... and finally the main program (id 0).
922 	 */
923 	start_addr = libinfo.lib_list[0].entry;
924 
925 #ifdef FLAT_PLAT_INIT
926 	FLAT_PLAT_INIT(regs);
927 #endif
928 
929 	finalize_exec(bprm);
930 	pr_debug("start_thread(regs=0x%p, entry=0x%lx, start_stack=0x%lx)\n",
931 		 regs, start_addr, current->mm->start_stack);
932 	start_thread(regs, start_addr, current->mm->start_stack);
933 
934 	return 0;
935 }
936 
937 /****************************************************************************/
938 
init_flat_binfmt(void)939 static int __init init_flat_binfmt(void)
940 {
941 	register_binfmt(&flat_format);
942 	return 0;
943 }
944 core_initcall(init_flat_binfmt);
945 
946 /****************************************************************************/
947