1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/buildid.h>
4 #include <linux/cache.h>
5 #include <linux/elf.h>
6 #include <linux/kernel.h>
7 #include <linux/pagemap.h>
8 #include <linux/secretmem.h>
9 
10 #define BUILD_ID 3
11 
12 #define MAX_PHDR_CNT 256
13 
14 struct freader {
15 	void *buf;
16 	u32 buf_sz;
17 	int err;
18 	union {
19 		struct {
20 			struct file *file;
21 			struct folio *folio;
22 			void *addr;
23 			loff_t folio_off;
24 			bool may_fault;
25 		};
26 		struct {
27 			const char *data;
28 			u64 data_sz;
29 		};
30 	};
31 };
32 
freader_init_from_file(struct freader * r,void * buf,u32 buf_sz,struct file * file,bool may_fault)33 static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
34 				   struct file *file, bool may_fault)
35 {
36 	memset(r, 0, sizeof(*r));
37 	r->buf = buf;
38 	r->buf_sz = buf_sz;
39 	r->file = file;
40 	r->may_fault = may_fault;
41 }
42 
freader_init_from_mem(struct freader * r,const char * data,u64 data_sz)43 static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz)
44 {
45 	memset(r, 0, sizeof(*r));
46 	r->data = data;
47 	r->data_sz = data_sz;
48 }
49 
freader_put_folio(struct freader * r)50 static void freader_put_folio(struct freader *r)
51 {
52 	if (!r->folio)
53 		return;
54 	kunmap_local(r->addr);
55 	folio_put(r->folio);
56 	r->folio = NULL;
57 }
58 
freader_get_folio(struct freader * r,loff_t file_off)59 static int freader_get_folio(struct freader *r, loff_t file_off)
60 {
61 	/* check if we can just reuse current folio */
62 	if (r->folio && file_off >= r->folio_off &&
63 	    file_off < r->folio_off + folio_size(r->folio))
64 		return 0;
65 
66 	freader_put_folio(r);
67 
68 	/* reject secretmem folios created with memfd_secret() */
69 	if (secretmem_mapping(r->file->f_mapping))
70 		return -EFAULT;
71 
72 	r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
73 
74 	/* if sleeping is allowed, wait for the page, if necessary */
75 	if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
76 		filemap_invalidate_lock_shared(r->file->f_mapping);
77 		r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
78 					    NULL, r->file);
79 		filemap_invalidate_unlock_shared(r->file->f_mapping);
80 	}
81 
82 	if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
83 		if (!IS_ERR(r->folio))
84 			folio_put(r->folio);
85 		r->folio = NULL;
86 		return -EFAULT;
87 	}
88 
89 	r->folio_off = folio_pos(r->folio);
90 	r->addr = kmap_local_folio(r->folio, 0);
91 
92 	return 0;
93 }
94 
freader_fetch(struct freader * r,loff_t file_off,size_t sz)95 static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz)
96 {
97 	size_t folio_sz;
98 
99 	/* provided internal temporary buffer should be sized correctly */
100 	if (WARN_ON(r->buf && sz > r->buf_sz)) {
101 		r->err = -E2BIG;
102 		return NULL;
103 	}
104 
105 	if (unlikely(file_off + sz < file_off)) {
106 		r->err = -EOVERFLOW;
107 		return NULL;
108 	}
109 
110 	/* working with memory buffer is much more straightforward */
111 	if (!r->buf) {
112 		if (file_off + sz > r->data_sz) {
113 			r->err = -ERANGE;
114 			return NULL;
115 		}
116 		return r->data + file_off;
117 	}
118 
119 	/* fetch or reuse folio for given file offset */
120 	r->err = freader_get_folio(r, file_off);
121 	if (r->err)
122 		return NULL;
123 
124 	/* if requested data is crossing folio boundaries, we have to copy
125 	 * everything into our local buffer to keep a simple linear memory
126 	 * access interface
127 	 */
128 	folio_sz = folio_size(r->folio);
129 	if (file_off + sz > r->folio_off + folio_sz) {
130 		int part_sz = r->folio_off + folio_sz - file_off;
131 
132 		/* copy the part that resides in the current folio */
133 		memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz);
134 
135 		/* fetch next folio */
136 		r->err = freader_get_folio(r, r->folio_off + folio_sz);
137 		if (r->err)
138 			return NULL;
139 
140 		/* copy the rest of requested data */
141 		memcpy(r->buf + part_sz, r->addr, sz - part_sz);
142 
143 		return r->buf;
144 	}
145 
146 	/* if data fits in a single folio, just return direct pointer */
147 	return r->addr + (file_off - r->folio_off);
148 }
149 
freader_cleanup(struct freader * r)150 static void freader_cleanup(struct freader *r)
151 {
152 	if (!r->buf)
153 		return; /* non-file-backed mode */
154 
155 	freader_put_folio(r);
156 }
157 
158 /*
159  * Parse build id from the note segment. This logic can be shared between
160  * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
161  * identical.
162  */
parse_build_id(struct freader * r,unsigned char * build_id,__u32 * size,loff_t note_off,Elf32_Word note_size)163 static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size,
164 			  loff_t note_off, Elf32_Word note_size)
165 {
166 	const char note_name[] = "GNU";
167 	const size_t note_name_sz = sizeof(note_name);
168 	u32 build_id_off, new_off, note_end, name_sz, desc_sz;
169 	const Elf32_Nhdr *nhdr;
170 	const char *data;
171 
172 	if (check_add_overflow(note_off, note_size, &note_end))
173 		return -EINVAL;
174 
175 	while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) {
176 		nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz);
177 		if (!nhdr)
178 			return r->err;
179 
180 		name_sz = READ_ONCE(nhdr->n_namesz);
181 		desc_sz = READ_ONCE(nhdr->n_descsz);
182 
183 		new_off = note_off + sizeof(Elf32_Nhdr);
184 		if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
185 		    check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
186 		    new_off > note_end)
187 			break;
188 
189 		if (nhdr->n_type == BUILD_ID &&
190 		    name_sz == note_name_sz &&
191 		    memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
192 		    desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
193 			build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4);
194 
195 			/* freader_fetch() will invalidate nhdr pointer */
196 			data = freader_fetch(r, build_id_off, desc_sz);
197 			if (!data)
198 				return r->err;
199 
200 			memcpy(build_id, data, desc_sz);
201 			memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
202 			if (size)
203 				*size = desc_sz;
204 			return 0;
205 		}
206 
207 		note_off = new_off;
208 	}
209 
210 	return -EINVAL;
211 }
212 
213 /* Parse build ID from 32-bit ELF */
get_build_id_32(struct freader * r,unsigned char * build_id,__u32 * size)214 static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size)
215 {
216 	const Elf32_Ehdr *ehdr;
217 	const Elf32_Phdr *phdr;
218 	__u32 phnum, phoff, i;
219 
220 	ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr));
221 	if (!ehdr)
222 		return r->err;
223 
224 	/* subsequent freader_fetch() calls invalidate pointers, so remember locally */
225 	phnum = READ_ONCE(ehdr->e_phnum);
226 	phoff = READ_ONCE(ehdr->e_phoff);
227 
228 	/* set upper bound on amount of segments (phdrs) we iterate */
229 	if (phnum > MAX_PHDR_CNT)
230 		phnum = MAX_PHDR_CNT;
231 
232 	/* check that phoff is not large enough to cause an overflow */
233 	if (phoff + phnum * sizeof(Elf32_Phdr) < phoff)
234 		return -EINVAL;
235 
236 	for (i = 0; i < phnum; ++i) {
237 		phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr));
238 		if (!phdr)
239 			return r->err;
240 
241 		if (phdr->p_type == PT_NOTE &&
242 		    !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
243 				    READ_ONCE(phdr->p_filesz)))
244 			return 0;
245 	}
246 	return -EINVAL;
247 }
248 
249 /* Parse build ID from 64-bit ELF */
get_build_id_64(struct freader * r,unsigned char * build_id,__u32 * size)250 static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size)
251 {
252 	const Elf64_Ehdr *ehdr;
253 	const Elf64_Phdr *phdr;
254 	__u32 phnum, i;
255 	__u64 phoff;
256 
257 	ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr));
258 	if (!ehdr)
259 		return r->err;
260 
261 	/* subsequent freader_fetch() calls invalidate pointers, so remember locally */
262 	phnum = READ_ONCE(ehdr->e_phnum);
263 	phoff = READ_ONCE(ehdr->e_phoff);
264 
265 	/* set upper bound on amount of segments (phdrs) we iterate */
266 	if (phnum > MAX_PHDR_CNT)
267 		phnum = MAX_PHDR_CNT;
268 
269 	/* check that phoff is not large enough to cause an overflow */
270 	if (phoff + phnum * sizeof(Elf64_Phdr) < phoff)
271 		return -EINVAL;
272 
273 	for (i = 0; i < phnum; ++i) {
274 		phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr));
275 		if (!phdr)
276 			return r->err;
277 
278 		if (phdr->p_type == PT_NOTE &&
279 		    !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
280 				    READ_ONCE(phdr->p_filesz)))
281 			return 0;
282 	}
283 
284 	return -EINVAL;
285 }
286 
287 /* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */
288 #define MAX_FREADER_BUF_SZ 64
289 
__build_id_parse(struct vm_area_struct * vma,unsigned char * build_id,__u32 * size,bool may_fault)290 static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
291 			    __u32 *size, bool may_fault)
292 {
293 	const Elf32_Ehdr *ehdr;
294 	struct freader r;
295 	char buf[MAX_FREADER_BUF_SZ];
296 	int ret;
297 
298 	/* only works for page backed storage  */
299 	if (!vma->vm_file)
300 		return -EINVAL;
301 
302 	freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault);
303 
304 	/* fetch first 18 bytes of ELF header for checks */
305 	ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type));
306 	if (!ehdr) {
307 		ret = r.err;
308 		goto out;
309 	}
310 
311 	ret = -EINVAL;
312 
313 	/* compare magic x7f "ELF" */
314 	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
315 		goto out;
316 
317 	/* only support executable file and shared object file */
318 	if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
319 		goto out;
320 
321 	if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
322 		ret = get_build_id_32(&r, build_id, size);
323 	else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
324 		ret = get_build_id_64(&r, build_id, size);
325 out:
326 	freader_cleanup(&r);
327 	return ret;
328 }
329 
330 /*
331  * Parse build ID of ELF file mapped to vma
332  * @vma:      vma object
333  * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
334  * @size:     returns actual build id size in case of success
335  *
336  * Assumes no page fault can be taken, so if relevant portions of ELF file are
337  * not already paged in, fetching of build ID fails.
338  *
339  * Return: 0 on success; negative error, otherwise
340  */
build_id_parse_nofault(struct vm_area_struct * vma,unsigned char * build_id,__u32 * size)341 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
342 {
343 	return __build_id_parse(vma, build_id, size, false /* !may_fault */);
344 }
345 
346 /*
347  * Parse build ID of ELF file mapped to VMA
348  * @vma:      vma object
349  * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
350  * @size:     returns actual build id size in case of success
351  *
352  * Assumes faultable context and can cause page faults to bring in file data
353  * into page cache.
354  *
355  * Return: 0 on success; negative error, otherwise
356  */
build_id_parse(struct vm_area_struct * vma,unsigned char * build_id,__u32 * size)357 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
358 {
359 	return __build_id_parse(vma, build_id, size, true /* may_fault */);
360 }
361 
362 /**
363  * build_id_parse_buf - Get build ID from a buffer
364  * @buf:      ELF note section(s) to parse
365  * @buf_size: Size of @buf in bytes
366  * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long
367  *
368  * Return: 0 on success, -EINVAL otherwise
369  */
build_id_parse_buf(const void * buf,unsigned char * build_id,u32 buf_size)370 int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
371 {
372 	struct freader r;
373 	int err;
374 
375 	freader_init_from_mem(&r, buf, buf_size);
376 
377 	err = parse_build_id(&r, build_id, NULL, 0, buf_size);
378 
379 	freader_cleanup(&r);
380 	return err;
381 }
382 
383 #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
384 unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
385 
386 /**
387  * init_vmlinux_build_id - Compute and stash the running kernel's build ID
388  */
init_vmlinux_build_id(void)389 void __init init_vmlinux_build_id(void)
390 {
391 	extern const void __start_notes;
392 	extern const void __stop_notes;
393 	unsigned int size = &__stop_notes - &__start_notes;
394 
395 	build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
396 }
397 #endif
398