Lines Matching +full:mixed +full:- +full:signals
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
53 return -1; in kmmio_fault()
60 * 32-bit mode:
65 * 64-bit mode:
92 * In 64-bit mode 0x40..0x4F are valid REX prefixes in check_prefetch_opcode()
120 c->x86_vendor == X86_VENDOR_AMD && in is_amd_k8_pre_npt()
121 c->x86 == 0xf && c->x86_model < 0x40); in is_amd_k8_pre_npt()
131 /* Erratum #91 affects AMD K8, pre-NPT CPUs */ in is_prefetch()
147 * not-present page (e.g. due to a race). No one has ever in is_prefetch()
193 * and redundant with the set_pmd() on non-PAE. As would in vmalloc_sync_one()
225 * where it synchronizes this update with the other page-tables in the
231 * which are not mapped in every page-table in the system, causing an
232 * unhandled page-fault when they are accessed.
242 return -1; in vmalloc_fault()
245 * Synchronize this task's top level page-table in vmalloc_fault()
254 return -1; in vmalloc_fault()
261 return -1; in vmalloc_fault()
281 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; in arch_sync_kernel_mappings()
322 * And let's rather not kmap-atomic the pte, just in case in dump_pagetable()
419 * Does nothing on 32-bit.
431 if (address != regs->ip) in is_errata93()
441 regs->ip = address; in is_errata93()
459 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) in is_errata100()
490 if (offset + sizeof(struct ldttss_desc) >= gdt->size) { in show_ldttss()
491 pr_alert("%s: 0x%hx -- out of bounds\n", name, index); in show_ldttss()
495 if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset), in show_ldttss()
497 pr_alert("%s: 0x%hx -- GDT entry is not readable\n", in show_ldttss()
506 pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n", in show_ldttss()
528 pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", in show_fault_oops()
550 pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code, in show_fault_oops()
551 !(error_code & X86_PF_PROT) ? "not-present page" : in show_fault_oops()
573 /* Usable even on Xen PV -- it's just slow. */ in show_fault_oops()
605 tsk->comm, address); in pgtable_bad()
619 * table layout, pretend that user-mode accesses to in sanitize_error_code()
635 tsk->thread.trap_nr = X86_TRAP_PF; in set_signal_archinfo()
636 tsk->thread.error_code = error_code | X86_PF_USER; in set_signal_archinfo()
637 tsk->thread.cr2 = address; in set_signal_archinfo()
661 * stack in the direct map, but that's not an overflow -- check in page_fault_oops()
669 * double-fault even before we get this far, in which case in page_fault_oops()
670 * we're fine: the double-fault handler will deal with it. in page_fault_oops()
673 * and then double-fault, though, because we're likely to in page_fault_oops()
676 call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*), in page_fault_oops()
693 /* Only not-present faults should be handled by KFENCE. */ in page_fault_oops()
760 loglvl, tsk->comm, task_pid_nr(tsk), address, in show_signal_msg()
761 (void *)regs->ip, (void *)regs->sp, error_code); in show_signal_msg()
763 print_vma_addr(KERN_CONT " in ", regs->ip); in show_signal_msg()
791 /* Implicit user access to kernel memory -- just oops */ in __bad_area_nosemaphore()
921 /* User-space => ok to do another page fault: */ in do_sigbus()
939 tsk->comm, tsk->pid, address); in do_sigbus()
966 * permissions of a kernel page (RO -> RW or NX -> X). Doing it
968 * cross-processor TLB flush, even if no stale TLB entries exist
972 * fewer permission than the page table entry. Non-present (P = 0)
978 * Returns non-zero if a spurious fault was handled, zero otherwise.
1061 * a follow-up action to resolve the fault, like a COW. in access_error()
1092 if (unlikely(!(vma->vm_flags & VM_SHADOW_STACK))) in access_error()
1094 if (unlikely(!(vma->vm_flags & VM_WRITE))) in access_error()
1101 if (unlikely(vma->vm_flags & VM_SHADOW_STACK)) in access_error()
1103 if (unlikely(!(vma->vm_flags & VM_WRITE))) in access_error()
1122 * On 64-bit systems, the vsyscall page is at an address above in fault_in_kernel_space()
1150 * We can fault-in kernel-space virtual memory on-demand. The in do_kern_addr_fault()
1158 * Before doing this on-demand faulting, ensure that the in do_kern_addr_fault()
1161 * 2. A fault caused by a user-mode access. (Do not demand- in do_kern_addr_fault()
1162 * fault kernel memory due to user-mode accesses). in do_kern_addr_fault()
1163 * 3. A fault caused by a page-level protection violation. in do_kern_addr_fault()
1164 * (A demand fault would be on a non-present page which in do_kern_addr_fault()
1167 * This is only needed to close a race condition on x86-32 in in do_kern_addr_fault()
1169 * vmalloc_fault() for details. On x86-64 the race does not in do_kern_addr_fault()
1222 mm = tsk->mm; in do_user_addr_fault()
1259 !(regs->flags & X86_EFLAGS_AC))) { in do_user_addr_fault()
1277 /* Legacy check - remove this after verifying that it doesn't trigger */ in do_user_addr_fault()
1278 if (WARN_ON_ONCE(!(regs->flags & X86_EFLAGS_IF))) { in do_user_addr_fault()
1288 * Read-only permissions can not be expressed in shadow stack PTEs. in do_user_addr_fault()
1350 /* Quick path to respond to signals */ in do_user_addr_fault()
1393 * Quick path to respond to signals. The core mm code in do_user_addr_fault()
1440 * oom-killed): in do_user_addr_fault()
1477 /* Was the fault on kernel-controlled part of the address space? */ in handle_page_fault()
1500 prefetchw(¤t->mm->mmap_lock); in DEFINE_IDTENTRY_RAW_ERRORCODE()
1516 * getting values from real and async page faults mixed up. in DEFINE_IDTENTRY_RAW_ERRORCODE()