Lines Matching +full:segment +full:- +full:no +full:- +full:remap
1 // SPDX-License-Identifier: GPL-2.0-only
74 INIT_LIST_HEAD(&cb->next); in register_vmcore_cb()
76 list_add_tail(&cb->next, &vmcore_cb_list); in register_vmcore_cb()
90 list_del_rcu(&cb->next); in unregister_vmcore_cb()
111 if (unlikely(!cb->pfn_is_ram)) in pfn_is_ram()
113 ret = cb->pfn_is_ram(cb, pfn); in pfn_is_ram()
147 if (count > (PAGE_SIZE - offset)) in read_from_oldmem()
148 nr_bytes = PAGE_SIZE - offset; in read_from_oldmem()
166 return -EFAULT; in read_from_oldmem()
170 count -= nr_bytes; in read_from_oldmem()
252 if (start < offset + dump->size) { in vmcoredd_copy_dumps()
253 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_copy_dumps()
254 buf = dump->buf + start - offset; in vmcoredd_copy_dumps()
256 ret = -EFAULT; in vmcoredd_copy_dumps()
260 size -= tsz; in vmcoredd_copy_dumps()
267 offset += dump->size; in vmcoredd_copy_dumps()
287 if (start < offset + dump->size) { in vmcoredd_mmap_dumps()
288 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_mmap_dumps()
289 buf = dump->buf + start - offset; in vmcoredd_mmap_dumps()
292 ret = -EFAULT; in vmcoredd_mmap_dumps()
296 size -= tsz; in vmcoredd_mmap_dumps()
304 offset += dump->size; in vmcoredd_mmap_dumps()
327 iov_iter_truncate(iter, vmcore_size - *fpos); in __read_vmcore()
331 tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter)); in __read_vmcore()
333 return -EFAULT; in __read_vmcore()
342 /* Read ELF note segment */ in __read_vmcore()
348 * completely and we will end up with zero-filled data in __read_vmcore()
350 * then try to decode this zero-filled data as valid notes in __read_vmcore()
352 * the other elf notes ensure that zero-filled data can be in __read_vmcore()
358 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - in __read_vmcore()
360 start = *fpos - elfcorebuf_sz; in __read_vmcore()
362 return -EFAULT; in __read_vmcore()
374 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, in __read_vmcore()
376 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz; in __read_vmcore()
378 return -EFAULT; in __read_vmcore()
391 if (*fpos < m->offset + m->size) { in __read_vmcore()
393 m->offset + m->size - *fpos, in __read_vmcore()
395 start = m->paddr + *fpos - m->offset; in __read_vmcore()
414 return __read_vmcore(iter, &iocb->ki_pos); in read_vmcore()
427 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in mmap_vmcore_fault()
428 pgoff_t index = vmf->pgoff; in mmap_vmcore_fault()
453 vmf->page = page; in mmap_vmcore_fault()
461 * vmcore_alloc_buf - allocate buffer in vmalloc memory
465 * the buffer to user-space by means of remap_vmalloc_range().
468 * disabled and there's no need to allow users to mmap the buffer.
482 * non-contiguous objects (ELF header, ELF note segment and memory
484 * virtually contiguous user-space in ELF layout.
493 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
502 * Returns zero on success, -EAGAIN on failure.
519 * We hit a page which is not ram. Remap the continuous in remap_oldmem_pfn_checked()
520 * region between pos_start and pos-1 and replace in remap_oldmem_pfn_checked()
521 * the non-ram page at pos with the zero page. in remap_oldmem_pfn_checked()
524 /* Remap continuous region */ in remap_oldmem_pfn_checked()
525 map_size = (pos - pos_start) << PAGE_SHIFT; in remap_oldmem_pfn_checked()
532 /* Remap the zero page */ in remap_oldmem_pfn_checked()
542 /* Remap the rest */ in remap_oldmem_pfn_checked()
543 map_size = (pos - pos_start) << PAGE_SHIFT; in remap_oldmem_pfn_checked()
550 do_munmap(vma->vm_mm, from, len, NULL); in remap_oldmem_pfn_checked()
551 return -EAGAIN; in remap_oldmem_pfn_checked()
575 size_t size = vma->vm_end - vma->vm_start; in mmap_vmcore()
579 start = (u64)vma->vm_pgoff << PAGE_SHIFT; in mmap_vmcore()
583 return -EINVAL; in mmap_vmcore()
585 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) in mmap_vmcore()
586 return -EPERM; in mmap_vmcore()
589 vma->vm_ops = &vmcore_mmap_ops; in mmap_vmcore()
596 tsz = min(elfcorebuf_sz - (size_t)start, size); in mmap_vmcore()
598 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, in mmap_vmcore()
599 vma->vm_page_prot)) in mmap_vmcore()
600 return -EAGAIN; in mmap_vmcore()
601 size -= tsz; in mmap_vmcore()
614 * completely and we will end up with zero-filled data in mmap_vmcore()
616 * then try to decode this zero-filled data as valid notes in mmap_vmcore()
618 * the other elf notes ensure that zero-filled data can be in mmap_vmcore()
628 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - in mmap_vmcore()
630 start_off = start - elfcorebuf_sz; in mmap_vmcore()
631 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len, in mmap_vmcore()
635 size -= tsz; in mmap_vmcore()
646 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); in mmap_vmcore()
647 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; in mmap_vmcore()
648 if (remap_vmalloc_range_partial(vma, vma->vm_start + len, in mmap_vmcore()
652 size -= tsz; in mmap_vmcore()
661 if (start < m->offset + m->size) { in mmap_vmcore()
665 m->offset + m->size - start, size); in mmap_vmcore()
666 paddr = m->paddr + start - m->offset; in mmap_vmcore()
667 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, in mmap_vmcore()
669 vma->vm_page_prot)) in mmap_vmcore()
671 size -= tsz; in mmap_vmcore()
682 do_munmap(vma->vm_mm, vma->vm_start, len, NULL); in mmap_vmcore()
683 return -EAGAIN; in mmap_vmcore()
688 return -ENOSYS; in mmap_vmcore()
712 size += m->size; in get_vmcore_size()
718 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
724 * note segment.
733 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in update_note_header_size_elf64()
736 if (phdr_ptr->p_type != PT_NOTE) in update_note_header_size_elf64()
738 max_sz = phdr_ptr->p_memsz; in update_note_header_size_elf64()
739 offset = phdr_ptr->p_offset; in update_note_header_size_elf64()
742 return -ENOMEM; in update_note_header_size_elf64()
749 while (nhdr_ptr->n_namesz != 0) { in update_note_header_size_elf64()
751 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + in update_note_header_size_elf64()
752 (((u64)nhdr_ptr->n_descsz + 3) & ~3); in update_note_header_size_elf64()
755 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); in update_note_header_size_elf64()
762 phdr_ptr->p_memsz = real_sz; in update_note_header_size_elf64()
772 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
773 * headers and sum of real size of their ELF note segment headers and
782 * @sz_ptnote in its phdr->p_mem.
786 * and each of PT_NOTE program headers has actual ELF note segment
798 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in get_note_number_and_size_elf64()
799 if (phdr_ptr->p_type != PT_NOTE) in get_note_number_and_size_elf64()
802 *sz_ptnote += phdr_ptr->p_memsz; in get_note_number_and_size_elf64()
809 * copy_notes_elf64 - copy ELF note segments in a given buffer
814 * This function is used to copy ELF note segment in the 1st kernel
817 * real ELF note segment headers and data.
821 * and each of PT_NOTE program headers has actual ELF note segment
831 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in copy_notes_elf64()
833 if (phdr_ptr->p_type != PT_NOTE) in copy_notes_elf64()
835 offset = phdr_ptr->p_offset; in copy_notes_elf64()
836 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, in copy_notes_elf64()
840 notes_buf += phdr_ptr->p_memsz; in copy_notes_elf64()
869 return -ENOMEM; in merge_note_headers_elf64()
879 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); in merge_note_headers_elf64()
891 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); in merge_note_headers_elf64()
892 *elfsz = *elfsz - i; in merge_note_headers_elf64()
893 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); in merge_note_headers_elf64()
898 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; in merge_note_headers_elf64()
909 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
915 * note segment.
924 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in update_note_header_size_elf32()
927 if (phdr_ptr->p_type != PT_NOTE) in update_note_header_size_elf32()
929 max_sz = phdr_ptr->p_memsz; in update_note_header_size_elf32()
930 offset = phdr_ptr->p_offset; in update_note_header_size_elf32()
933 return -ENOMEM; in update_note_header_size_elf32()
940 while (nhdr_ptr->n_namesz != 0) { in update_note_header_size_elf32()
942 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + in update_note_header_size_elf32()
943 (((u64)nhdr_ptr->n_descsz + 3) & ~3); in update_note_header_size_elf32()
946 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); in update_note_header_size_elf32()
953 phdr_ptr->p_memsz = real_sz; in update_note_header_size_elf32()
963 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
964 * headers and sum of real size of their ELF note segment headers and
973 * @sz_ptnote in its phdr->p_mem.
977 * and each of PT_NOTE program headers has actual ELF note segment
989 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in get_note_number_and_size_elf32()
990 if (phdr_ptr->p_type != PT_NOTE) in get_note_number_and_size_elf32()
993 *sz_ptnote += phdr_ptr->p_memsz; in get_note_number_and_size_elf32()
1000 * copy_notes_elf32 - copy ELF note segments in a given buffer
1005 * This function is used to copy ELF note segment in the 1st kernel
1008 * real ELF note segment headers and data.
1012 * and each of PT_NOTE program headers has actual ELF note segment
1022 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in copy_notes_elf32()
1024 if (phdr_ptr->p_type != PT_NOTE) in copy_notes_elf32()
1026 offset = phdr_ptr->p_offset; in copy_notes_elf32()
1027 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, in copy_notes_elf32()
1031 notes_buf += phdr_ptr->p_memsz; in copy_notes_elf32()
1060 return -ENOMEM; in merge_note_headers_elf32()
1070 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); in merge_note_headers_elf32()
1082 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); in merge_note_headers_elf32()
1083 *elfsz = *elfsz - i; in merge_note_headers_elf32()
1084 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); in merge_note_headers_elf32()
1089 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; in merge_note_headers_elf32()
1115 /* Skip ELF header, program headers and ELF note segment. */ in process_ptload_program_headers_elf64()
1118 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in process_ptload_program_headers_elf64()
1121 if (phdr_ptr->p_type != PT_LOAD) in process_ptload_program_headers_elf64()
1124 paddr = phdr_ptr->p_offset; in process_ptload_program_headers_elf64()
1126 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); in process_ptload_program_headers_elf64()
1127 size = end - start; in process_ptload_program_headers_elf64()
1132 return -ENOMEM; in process_ptload_program_headers_elf64()
1133 new->paddr = start; in process_ptload_program_headers_elf64()
1134 new->size = size; in process_ptload_program_headers_elf64()
1135 list_add_tail(&new->list, vc_list); in process_ptload_program_headers_elf64()
1138 phdr_ptr->p_offset = vmcore_off + (paddr - start); in process_ptload_program_headers_elf64()
1158 /* Skip ELF header, program headers and ELF note segment. */ in process_ptload_program_headers_elf32()
1161 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in process_ptload_program_headers_elf32()
1164 if (phdr_ptr->p_type != PT_LOAD) in process_ptload_program_headers_elf32()
1167 paddr = phdr_ptr->p_offset; in process_ptload_program_headers_elf32()
1169 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); in process_ptload_program_headers_elf32()
1170 size = end - start; in process_ptload_program_headers_elf32()
1175 return -ENOMEM; in process_ptload_program_headers_elf32()
1176 new->paddr = start; in process_ptload_program_headers_elf32()
1177 new->size = size; in process_ptload_program_headers_elf32()
1178 list_add_tail(&new->list, vc_list); in process_ptload_program_headers_elf32()
1181 phdr_ptr->p_offset = vmcore_off + (paddr - start); in process_ptload_program_headers_elf32()
1194 /* Skip ELF header, program headers and ELF note segment. */ in set_vmcore_list_offsets()
1198 m->offset = vmcore_off; in set_vmcore_list_offsets()
1199 vmcore_off += m->size; in set_vmcore_list_offsets()
1235 return -EINVAL; in parse_crash_elf64_headers()
1245 return -ENOMEM; in parse_crash_elf64_headers()
1291 return -EINVAL; in parse_crash_elf32_headers()
1300 return -ENOMEM; in parse_crash_elf32_headers()
1334 return -EINVAL; in parse_crash_elf_headers()
1347 return -EINVAL; in parse_crash_elf_headers()
1359 * vmcoredd_write_header - Write vmcore device dump header at the
1372 vdd_hdr->n_namesz = sizeof(vdd_hdr->name); in vmcoredd_write_header()
1373 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name); in vmcoredd_write_header()
1374 vdd_hdr->n_type = NT_VMCOREDD; in vmcoredd_write_header()
1376 strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME); in vmcoredd_write_header()
1377 strscpy_pad(vdd_hdr->dump_name, data->dump_name); in vmcoredd_write_header()
1381 * vmcoredd_update_program_headers - Update all ELF program headers
1404 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { in vmcoredd_update_program_headers()
1405 if (phdr->p_type == PT_NOTE) { in vmcoredd_update_program_headers()
1407 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; in vmcoredd_update_program_headers()
1408 phdr->p_filesz = phdr->p_memsz; in vmcoredd_update_program_headers()
1412 start = rounddown(phdr->p_offset, PAGE_SIZE); in vmcoredd_update_program_headers()
1413 end = roundup(phdr->p_offset + phdr->p_memsz, in vmcoredd_update_program_headers()
1415 size = end - start; in vmcoredd_update_program_headers()
1416 phdr->p_offset = vmcore_off + (phdr->p_offset - start); in vmcoredd_update_program_headers()
1424 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { in vmcoredd_update_program_headers()
1425 if (phdr->p_type == PT_NOTE) { in vmcoredd_update_program_headers()
1427 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; in vmcoredd_update_program_headers()
1428 phdr->p_filesz = phdr->p_memsz; in vmcoredd_update_program_headers()
1432 start = rounddown(phdr->p_offset, PAGE_SIZE); in vmcoredd_update_program_headers()
1433 end = roundup(phdr->p_offset + phdr->p_memsz, in vmcoredd_update_program_headers()
1435 size = end - start; in vmcoredd_update_program_headers()
1436 phdr->p_offset = vmcore_off + (phdr->p_offset - start); in vmcoredd_update_program_headers()
1443 * vmcoredd_update_size - Update the total size of the device dumps and update
1463 proc_vmcore->size = vmcore_size; in vmcoredd_update_size()
1467 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1483 return -EINVAL; in vmcore_add_device_dump()
1486 if (!data || !strlen(data->dump_name) || in vmcore_add_device_dump()
1487 !data->vmcoredd_callback || !data->size) in vmcore_add_device_dump()
1488 return -EINVAL; in vmcore_add_device_dump()
1492 ret = -ENOMEM; in vmcore_add_device_dump()
1497 data_size = roundup(sizeof(struct vmcoredd_header) + data->size, in vmcore_add_device_dump()
1503 ret = -ENOMEM; in vmcore_add_device_dump()
1507 vmcoredd_write_header(buf, data, data_size - in vmcore_add_device_dump()
1511 ret = data->vmcoredd_callback(data, buf + in vmcore_add_device_dump()
1516 dump->buf = buf; in vmcore_add_device_dump()
1517 dump->size = data_size; in vmcore_add_device_dump()
1521 list_add_tail(&dump->list, &vmcoredd_list); in vmcore_add_device_dump()
1546 list_del(&dump->list); in vmcore_free_device_dumps()
1547 vfree(dump->buf); in vmcore_free_device_dumps()
1580 proc_vmcore->size = vmcore_size; in vmcore_init()
1598 list_del(&m->list); in vmcore_cleanup()