Home
last modified time | relevance | path

Searched full:fault (Results 1 – 25 of 2006) sorted by relevance

12345678910>>...81

/linux-6.12.1/drivers/gpu/drm/nouveau/nvkm/subdev/fault/
Dbase.c29 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_fini() local
30 fault->func->buffer.intr(fault->buffer[index], false); in nvkm_fault_ntfy_fini()
36 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_init() local
37 fault->func->buffer.intr(fault->buffer[index], true); in nvkm_fault_ntfy_init()
49 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_intr() local
50 return fault->func->intr(fault); in nvkm_fault_intr()
56 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_fini() local
57 if (fault->func->fini) in nvkm_fault_fini()
58 fault->func->fini(fault); in nvkm_fault_fini()
65 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_init() local
[all …]
Dgv100.c33 struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work); in gv100_fault_buffer_process() local
34 struct nvkm_fault_buffer *buffer = fault->buffer[0]; in gv100_fault_buffer_process()
35 struct nvkm_device *device = fault->subdev.device; in gv100_fault_buffer_process()
44 const u32 base = get * buffer->fault->func->buffer.entry_size; in gv100_fault_buffer_process()
78 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_intr()
89 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_fini()
97 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_init()
109 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_info()
122 struct nvkm_fault *fault = container_of(ntfy, typeof(*fault), nrpfb); in gv100_fault_ntfy_nrpfb() local
124 schedule_work(&fault->nrpfb_work); in gv100_fault_ntfy_nrpfb()
[all …]
Dtu102.c38 nvkm_event_ntfy(&buffer->fault->event, buffer->id, NVKM_FAULT_BUFFER_EVENT_PENDING); in tu102_fault_buffer_notify()
54 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_fini()
63 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_init()
75 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_info()
88 struct nvkm_fault *fault = container_of(inth, typeof(*fault), info_fault); in tu102_fault_info_fault() local
89 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_info_fault()
116 tu102_fault_fini(struct nvkm_fault *fault) in tu102_fault_fini() argument
118 nvkm_event_ntfy_block(&fault->nrpfb); in tu102_fault_fini()
119 flush_work(&fault->nrpfb_work); in tu102_fault_fini()
121 if (fault->buffer[0]) in tu102_fault_fini()
[all …]
/linux-6.12.1/drivers/infiniband/hw/hfi1/
Dfault.c13 #include "fault.h"
69 if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) in _fault_stats_seq_show()
74 (unsigned long long)ibd->fault->n_rxfaults[i], in _fault_stats_seq_show()
75 (unsigned long long)ibd->fault->n_txfaults[i]); in _fault_stats_seq_show()
96 struct fault *fault = file->private_data; in fault_opcodes_write() local
138 bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * in fault_opcodes_write()
148 clear_bit(i, fault->opcodes); in fault_opcodes_write()
150 set_bit(i, fault->opcodes); in fault_opcodes_write()
170 struct fault *fault = file->private_data; in fault_opcodes_read() local
171 size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE; in fault_opcodes_read()
[all …]
/linux-6.12.1/drivers/iommu/iommufd/
Dfault.c82 if (!hwpt->fault) in iommufd_fault_domain_attach_dev()
99 struct iommufd_fault *fault = hwpt->fault; in iommufd_auto_response_faults() local
103 if (!fault) in iommufd_auto_response_faults()
106 mutex_lock(&fault->mutex); in iommufd_auto_response_faults()
107 list_for_each_entry_safe(group, next, &fault->deliver, node) { in iommufd_auto_response_faults()
115 xa_for_each(&fault->response, index, group) { in iommufd_auto_response_faults()
118 xa_erase(&fault->response, index); in iommufd_auto_response_faults()
122 mutex_unlock(&fault->mutex); in iommufd_auto_response_faults()
156 if (old->fault) in __fault_domain_replace_dev()
159 if (hwpt->fault) { in __fault_domain_replace_dev()
[all …]
/linux-6.12.1/arch/powerpc/mm/
Dfault.c6 * Derived from "arch/i386/mm/fault.c"
109 * 5. T1 : enters fault handler, takes mmap_lock, etc... in bad_access_pkey()
140 vm_fault_t fault) in do_sigbus() argument
147 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_sigbus()
150 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", in do_sigbus()
153 if (fault & VM_FAULT_HWPOISON_LARGE) in do_sigbus()
154 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); in do_sigbus()
155 if (fault & VM_FAULT_HWPOISON) in do_sigbus()
168 vm_fault_t fault) in mm_fault_error() argument
171 * Kernel page fault interrupted by SIGKILL. We have no reason to in mm_fault_error()
[all …]
/linux-6.12.1/drivers/iommu/
Dio-pgfault.c17 * Return the fault parameter of a device if it exists. Otherwise, return NULL.
35 /* Caller must hold a reference of the fault parameter. */
47 if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) in __iopf_free_group()
64 struct iommu_fault *fault) in report_partial_fault() argument
72 iopf->fault = *fault; in report_partial_fault()
98 group->last_fault.fault = evt->fault; in iopf_group_alloc()
106 if (iopf->fault.prm.grpid == evt->fault.prm.grpid) in iopf_group_alloc()
107 /* Insert *before* the last fault */ in iopf_group_alloc()
121 struct iommu_fault *fault = &evt->fault; in find_fault_handler() local
124 if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { in find_fault_handler()
[all …]
/linux-6.12.1/arch/arm64/mm/
Dfault.c3 * Based on arch/arm/mm/fault.c
291 * If we now have a valid translation, treat the translation fault as in is_spurious_el1_translation_fault()
298 * If we got a different type of fault from the AT instruction, in is_spurious_el1_translation_fault()
299 * treat the translation fault as spurious. in is_spurious_el1_translation_fault()
349 * tag fault. in do_tag_recovery()
375 * Are we prepared to handle this kernel fault? in __do_kernel_fault()
382 "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) in __do_kernel_fault()
423 * an alignment fault not caused by the memory type would take in set_thread_esr()
424 * precedence over translation fault for a real access to empty in set_thread_esr()
425 * space. Unfortunately we can't easily distinguish "alignment fault in set_thread_esr()
[all …]
/linux-6.12.1/arch/powerpc/platforms/powernv/
Dvas-fault.c3 * VAS Fault handling.
21 * The maximum FIFO size for fault window can be 8MB
23 * instance will be having fault window.
35 pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size, in dump_fifo()
39 pr_err("Fault FIFO Dump:\n"); in dump_fifo()
47 * Process valid CRBs in fault FIFO.
50 * request buffers, raises interrupt on the CPU to handle the fault.
51 * It takes credit on fault window, updates nx_fault_stamp in CRB with
52 * the following information and pastes CRB in fault FIFO.
55 * fault_storage_addr - fault address
[all …]
/linux-6.12.1/drivers/gpu/drm/nouveau/
Dnouveau_svm.c66 u8 fault; member
68 } **fault; member
160 * page fault) and maybe some other commands. in nouveau_svmm_bind()
379 /* Issue fault replay for GPU to retry accesses that faulted previously. */
390 /* Cancel a replayable fault that could not be handled.
392 * Cancelling the fault will trigger recovery to reset the engine
412 struct nouveau_svm_fault *fault) in nouveau_svm_fault_cancel_fault() argument
414 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault()
415 fault->hub, in nouveau_svm_fault_cancel_fault()
416 fault->gpc, in nouveau_svm_fault_cancel_fault()
[all …]
/linux-6.12.1/arch/x86/kvm/mmu/
Dpaging_tmpl.h92 struct x86_exception fault; member
249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME()
352 * Queue a page fault for injection if this assertion fails, as callers in FNAME()
353 * assume that walker.fault contains sane info on a walk failure. I.e. in FNAME()
380 nested_access, &walker->fault); in FNAME()
384 * instruction) triggers a nested page fault. The exit in FNAME()
386 * "guest page access" as the nested page fault's cause, in FNAME()
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
458 * On a write fault, fold the dirty bit into accessed_dirty. in FNAME()
481 walker->fault.vector = PF_VECTOR; in FNAME()
[all …]
Dmmu_internal.h215 * Maximum page size that can be created for this fault; input to
252 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
258 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
259 * RET_PF_RETRY: let CPU fault again on the address.
260 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
263 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
286 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument
288 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit()
289 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit()
290 fault->is_private); in kvm_mmu_prepare_memory_fault_exit()
[all …]
/linux-6.12.1/arch/microblaze/mm/
Dfault.c2 * arch/microblaze/mm/fault.c
6 * Derived from "arch/ppc/mm/fault.c"
9 * Derived from "arch/i386/mm/fault.c"
71 /* Are we prepared to handle this fault? */ in bad_page_fault()
83 * The error_code parameter is ESR for a data fault,
84 * 0 for an instruction fault.
93 vm_fault_t fault; in do_page_fault() local
115 pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n", in do_page_fault()
119 die("Weird page fault", regs, SIGSEGV); in do_page_fault()
130 * erroneous fault occurring in a code path which already holds mmap_lock in do_page_fault()
[all …]
/linux-6.12.1/arch/m68k/mm/
Dfault.c3 * linux/arch/m68k/mm/fault.c
20 #include "fault.h"
64 * bit 0 == 0 means no page found, 1 means protection fault
75 vm_fault_t fault; in do_page_fault() local
78 pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", in do_page_fault()
83 * context, we must not take the fault.. in do_page_fault()
136 * If for any reason at all we couldn't handle the fault, in do_page_fault()
138 * the fault. in do_page_fault()
141 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault()
142 pr_debug("handle_mm_fault returns %x\n", fault); in do_page_fault()
[all …]
/linux-6.12.1/arch/arm/mm/
Dfsr-3level.c7 { do_bad, SIGBUS, 0, "reserved translation fault" },
8 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
9 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
10 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
11 { do_bad, SIGBUS, 0, "reserved access flag fault" },
12 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
14 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
15 { do_bad, SIGBUS, 0, "reserved permission fault" },
16 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
[all …]
Dfsr-2level.c12 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
14 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
16 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
18 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
20 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
22 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
50 { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
52 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
53 { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
54 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
[all …]
/linux-6.12.1/arch/parisc/mm/
Dfault.c46 * the instruction has generated some sort of a memory access fault).
106 * Data TLB miss fault/data page fault in parisc_acctyp()
204 [6] = "Instruction TLB miss fault",
213 [15] = "Data TLB miss fault",
214 [16] = "Non-access ITLB miss fault",
215 [17] = "Non-access DTLB miss fault",
274 vm_fault_t fault = 0; in do_page_fault() local
281 msg = "Page fault: no context"; in do_page_fault()
313 * If for any reason at all we couldn't handle the fault, make in do_page_fault()
315 * fault. in do_page_fault()
[all …]
/linux-6.12.1/arch/hexagon/mm/
Dvm_fault.c3 * Memory fault handling for Hexagon
9 * Page fault handling for the Hexagon Virtual Machine.
35 * Canonical page fault handler
43 vm_fault_t fault; in do_page_fault() local
49 * then must not take the fault. in do_page_fault()
84 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault()
86 if (fault_signal_pending(fault, regs)) { in do_page_fault()
92 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault()
93 if (fault & VM_FAULT_COMPLETED) in do_page_fault()
97 if (likely(!(fault & VM_FAULT_ERROR))) { in do_page_fault()
[all …]
/linux-6.12.1/arch/alpha/mm/
Dfault.c3 * linux/arch/alpha/mm/fault.c
65 * 2 = fault-on-read
66 * 3 = fault-on-execute
67 * 4 = fault-on-write
92 vm_fault_t fault; in do_page_fault() local
110 we must not take the fault. */ in do_page_fault()
142 /* If for any reason at all we couldn't handle the fault, in do_page_fault()
144 the fault. */ in do_page_fault()
145 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault()
147 if (fault_signal_pending(fault, regs)) { in do_page_fault()
[all …]
/linux-6.12.1/arch/riscv/mm/
Dfault.c42 /* Are we prepared to handle this kernel fault? */ in no_context()
62 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) in mm_fault_error() argument
69 if (fault & VM_FAULT_OOM) { in mm_fault_error()
72 * (which will retry the fault, or kill us if we got oom-killed). in mm_fault_error()
76 } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { in mm_fault_error()
80 } else if (fault & VM_FAULT_SIGSEGV) { in mm_fault_error()
231 vm_fault_t fault; in handle_page_fault() local
243 * Fault-in kernel-space virtual memory on-demand. in handle_page_fault()
263 * in an atomic region, then we must not take the fault. in handle_page_fault()
302 fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); in handle_page_fault()
[all …]
/linux-6.12.1/arch/powerpc/lib/
Dchecksum_32.S109 EX_TABLE(8 ## n ## 0b, fault); \
110 EX_TABLE(8 ## n ## 1b, fault); \
111 EX_TABLE(8 ## n ## 2b, fault); \
112 EX_TABLE(8 ## n ## 3b, fault); \
113 EX_TABLE(8 ## n ## 4b, fault); \
114 EX_TABLE(8 ## n ## 5b, fault); \
115 EX_TABLE(8 ## n ## 6b, fault); \
116 EX_TABLE(8 ## n ## 7b, fault);
240 fault: label
244 EX_TABLE(70b, fault);
[all …]
/linux-6.12.1/arch/nios2/mm/
Dfault.c5 * based on arch/mips/mm/fault.c which is:
50 vm_fault_t fault; in do_page_fault() local
59 * We fault-in kernel-space virtual memory on-demand. The in do_page_fault()
79 * context, we must not take the fault.. in do_page_fault()
120 * If for any reason at all we couldn't handle the fault, in do_page_fault()
122 * the fault. in do_page_fault()
124 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault()
126 if (fault_signal_pending(fault, regs)) { in do_page_fault()
132 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault()
133 if (fault & VM_FAULT_COMPLETED) in do_page_fault()
[all …]
/linux-6.12.1/Documentation/fault-injection/
Dfault-injection.rst2 Fault injection capabilities infrastructure
8 Available fault injection capabilities
25 injects futex deadlock and uaddr fault errors.
48 - NVMe fault injection
55 - Null test block driver fault injection
64 Configure fault-injection capabilities behavior
70 fault-inject-debugfs kernel module provides some debugfs entries for runtime
71 configuration of fault-injection capabilities.
109 to debug the problems revealed by fault injection.
240 that the fault setup with a previous write to this file was injected.
[all …]
/linux-6.12.1/arch/sh/mm/
Dfault.c2 * Page fault handler for SH with an MMU.
7 * Based on linux/arch/i386/mm/fault.c:
157 * be another reason for the fault. Return NULL here to in vmalloc_sync_one()
158 * signal that we have not taken care of the fault. in vmalloc_sync_one()
174 * Handle a fault on the vmalloc or module mapping area
225 /* Are we prepared to handle this kernel fault? */ in no_context()
314 unsigned long address, vm_fault_t fault) in mm_fault_error() argument
320 if (fault_signal_pending(fault, regs)) { in mm_fault_error()
327 if (!(fault & VM_FAULT_RETRY)) in mm_fault_error()
330 if (!(fault & VM_FAULT_ERROR)) in mm_fault_error()
[all …]
/linux-6.12.1/arch/mips/kernel/
Dunaligned.c175 goto fault; in emulate_load_store_insn()
184 goto fault; in emulate_load_store_insn()
193 goto fault; in emulate_load_store_insn()
213 goto fault; in emulate_load_store_insn()
222 goto fault; in emulate_load_store_insn()
243 goto fault; in emulate_load_store_insn()
252 goto fault; in emulate_load_store_insn()
261 goto fault; in emulate_load_store_insn()
272 goto fault; in emulate_load_store_insn()
281 goto fault; in emulate_load_store_insn()
[all …]

12345678910>>...81