/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fault/ |
H A D | base.c | 29 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_fini() local 30 fault->func->buffer.intr(fault->buffer[index], false); in nvkm_fault_ntfy_fini() 36 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_init() local 37 fault->func->buffer.intr(fault->buffer[index], true); in nvkm_fault_ntfy_init() 49 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_intr() local 50 return fault in nvkm_fault_intr() 56 struct nvkm_fault *fault = nvkm_fault(subdev); nvkm_fault_fini() local 65 struct nvkm_fault *fault = nvkm_fault(subdev); nvkm_fault_init() local 72 nvkm_fault_oneinit_buffer(struct nvkm_fault * fault,int id) nvkm_fault_oneinit_buffer() argument 105 struct nvkm_fault *fault = nvkm_fault(subdev); nvkm_fault_oneinit() local 129 struct nvkm_fault *fault = nvkm_fault(subdev); nvkm_fault_dtor() local 158 struct nvkm_fault *fault; nvkm_fault_new_() local [all...] |
H A D | gv100.c | 33 struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work); in gv100_fault_buffer_process() local 34 struct nvkm_fault_buffer *buffer = fault->buffer[0]; in gv100_fault_buffer_process() 35 struct nvkm_device *device = fault->subdev.device; in gv100_fault_buffer_process() 44 const u32 base = get * buffer->fault->func->buffer.entry_size; in gv100_fault_buffer_process() 78 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_intr() 89 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_fini() 97 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_init() 109 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_info() 122 struct nvkm_fault *fault in gv100_fault_ntfy_nrpfb() local 129 gv100_fault_intr_fault(struct nvkm_fault * fault) gv100_fault_intr_fault() argument 155 gv100_fault_intr(struct nvkm_fault * fault) gv100_fault_intr() argument 187 gv100_fault_fini(struct nvkm_fault * fault) gv100_fault_fini() argument 199 gv100_fault_init(struct nvkm_fault * fault) gv100_fault_init() argument 207 gv100_fault_oneinit(struct nvkm_fault * fault) gv100_fault_oneinit() argument [all...] |
H A D | tu102.c | 38 nvkm_event_ntfy(&buffer->fault->event, buffer->id, NVKM_FAULT_BUFFER_EVENT_PENDING); in tu102_fault_buffer_notify() 54 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_fini() 63 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_init() 75 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_info() 88 struct nvkm_fault *fault = container_of(inth, typeof(*fault), info_fault); in tu102_fault_info_fault() local 89 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_info_fault() 116 tu102_fault_fini(struct nvkm_fault *fault) in tu102_fault_fini() argument 118 nvkm_event_ntfy_block(&fault->nrpfb); in tu102_fault_fini() 119 flush_work(&fault in tu102_fault_fini() 128 tu102_fault_init(struct nvkm_fault * fault) tu102_fault_init() argument 137 tu102_fault_oneinit(struct nvkm_fault * fault) tu102_fault_oneinit() argument [all...] |
H A D | Kbuild | 2 nvkm-y += nvkm/subdev/fault/base.o 3 nvkm-y += nvkm/subdev/fault/user.o 4 nvkm-y += nvkm/subdev/fault/gp100.o 5 nvkm-y += nvkm/subdev/fault/gp10b.o 6 nvkm-y += nvkm/subdev/fault/gv100.o 7 nvkm-y += nvkm/subdev/fault/tu102.o
|
H A D | user.c | 42 return nvkm_uevent_add(uevent, &buffer->fault->event, buffer->id, in nvkm_ufault_uevent() 51 struct nvkm_device *device = buffer->fault->subdev.device; in nvkm_ufault_map() 62 buffer->fault->func->buffer.fini(buffer); in nvkm_ufault_fini() 70 buffer->fault->func->buffer.init(buffer); in nvkm_ufault_init() 96 struct nvkm_fault *fault = device->fault; in nvkm_ufault_new() local 97 struct nvkm_fault_buffer *buffer = fault->buffer[fault->func->user.rp]; in nvkm_ufault_new()
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | fault.c | 13 #include "fault.h" 69 if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) in _fault_stats_seq_show() 74 (unsigned long long)ibd->fault->n_rxfaults[i], in _fault_stats_seq_show() 75 (unsigned long long)ibd->fault->n_txfaults[i]); in _fault_stats_seq_show() 96 struct fault *fault = file->private_data; in fault_opcodes_write() local 135 bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * in fault_opcodes_write() 145 clear_bit(i, fault in fault_opcodes_write() 166 struct fault *fault = file->private_data; fault_opcodes_read() local [all...] |
/linux/drivers/iommu/iommufd/ |
H A D | eventq.c | 22 struct iommufd_fault *fault = hwpt->fault; in iommufd_auto_response_faults() local 27 if (!fault || !handle) in iommufd_auto_response_faults() 31 mutex_lock(&fault->mutex); in iommufd_auto_response_faults() 32 spin_lock(&fault->common.lock); in iommufd_auto_response_faults() 33 list_for_each_entry_safe(group, next, &fault->common.deliver, node) { in iommufd_auto_response_faults() 38 spin_unlock(&fault->common.lock); in iommufd_auto_response_faults() 46 xa_for_each(&fault->response, index, group) { in iommufd_auto_response_faults() 49 xa_erase(&fault->response, index); in iommufd_auto_response_faults() 53 mutex_unlock(&fault in iommufd_auto_response_faults() 60 struct iommufd_fault *fault = eventq_to_fault(eventq); iommufd_fault_destroy() local 84 iommufd_compose_fault_message(struct iommu_fault * fault,struct iommu_hwpt_pgfault * hwpt_fault,struct iommufd_device * idev,u32 cookie) iommufd_compose_fault_message() argument 101 iommufd_fault_deliver_fetch(struct iommufd_fault * fault) iommufd_fault_deliver_fetch() argument 116 iommufd_fault_deliver_restore(struct iommufd_fault * fault,struct iopf_group * group) iommufd_fault_deliver_restore() argument 129 struct iommufd_fault *fault = eventq_to_fault(eventq); iommufd_fault_fops_read() local 179 struct iommufd_fault *fault = eventq_to_fault(eventq); iommufd_fault_fops_write() local 423 struct iommufd_fault *fault; iommufd_fault_alloc() local 462 struct iommufd_fault *fault; iommufd_fault_iopf_handler() local [all...] |
/linux/drivers/gpu/drm/ci/xfails/ |
H A D | msm-sm8350-hdk-skips.txt | 24 # [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISSION source=CP (0,0,0,1) 25 # [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 26 # [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 27 # [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 28 # [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 29 # [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 31 # [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 32 # [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 33 # [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) 34 # [ 201.006702] *** gpu fault [all...] |
/linux/arch/x86/kvm/mmu/ |
H A D | paging_tmpl.h | 92 struct x86_exception fault; member 249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME() 352 * Queue a page fault for injection if this assertion fails, as callers in FNAME() 353 * assume that walker.fault contains sane info on a walk failure. I.e. in FNAME() 380 nested_access, &walker->fault); in FNAME() 384 * instruction) triggers a nested page fault. The exit in FNAME() 386 * "guest page access" as the nested page fault's cause, in FNAME() 448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 458 * On a write fault, fold the dirty bit into accessed_dirty. in FNAME() 481 walker->fault in FNAME() [all...] |
H A D | mmu_internal.h | 261 * Maximum page size that can be created for this fault; input to 303 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 309 * RET_PF_CONTINUE: So far, so good, keep handling the page fault. 310 * RET_PF_RETRY: let CPU fault again on the address. 311 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 314 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 343 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument 345 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit() 346 PAGE_SIZE, fault->write, fault in kvm_mmu_prepare_memory_fault_exit() 354 struct kvm_page_fault fault = { kvm_mmu_do_page_fault() local [all...] |
/linux/arch/mips/kernel/ |
H A D | unaligned.c | 175 goto fault; in emulate_load_store_insn() 184 goto fault; in emulate_load_store_insn() 193 goto fault; in emulate_load_store_insn() 213 goto fault; in emulate_load_store_insn() 222 goto fault; in emulate_load_store_insn() 243 goto fault; in emulate_load_store_insn() 252 goto fault; in emulate_load_store_insn() 261 goto fault; in emulate_load_store_insn() 272 goto fault; in emulate_load_store_insn() 281 goto fault; in emulate_load_store_insn() [all...] |
/linux/arch/nios2/kernel/ |
H A D | misaligned.c | 72 unsigned int fault; in handle_unaligned_c() local 85 fault = 0; in handle_unaligned_c() 98 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 99 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 111 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 112 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 116 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 117 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 133 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 134 fault | in handle_unaligned_c() [all...] |
/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_svm.c | 66 u8 fault; member 68 } **fault; member 160 * page fault) and maybe some other commands. in nouveau_svmm_bind() 379 /* Issue fault replay for GPU to retry accesses that faulted previously. */ 390 /* Cancel a replayable fault that could not be handled. 392 * Cancelling the fault will trigger recovery to reset the engine 412 struct nouveau_svm_fault *fault) in nouveau_svm_fault_cancel_fault() argument 414 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault() 415 fault->hub, in nouveau_svm_fault_cancel_fault() 416 fault in nouveau_svm_fault_cancel_fault() 421 nouveau_svm_fault_priority(u8 fault) nouveau_svm_fault_priority() argument 469 struct nouveau_svm_fault *fault; nouveau_svm_fault_cache() local 869 struct nouveau_svm_fault *fault = nouveau_svm_fault() local [all...] |
/linux/arch/mips/loongson64/ |
H A D | cop2-ex.c | 76 goto fault; in loongson_cu2_call() 80 goto fault; in loongson_cu2_call() 92 goto fault; in loongson_cu2_call() 96 goto fault; in loongson_cu2_call() 118 goto fault; in loongson_cu2_call() 123 goto fault; in loongson_cu2_call() 135 goto fault; in loongson_cu2_call() 141 goto fault; in loongson_cu2_call() 165 goto fault; in loongson_cu2_call() 176 goto fault; in loongson_cu2_call() [all...] |
/linux/arch/arc/kernel/ |
H A D | unaligned.c | 51 goto fault; \ 66 goto fault; \ 93 goto fault; \ 126 goto fault; \ 160 fault: state->fault = 1; in fixup_load() 180 goto fault; in fixup_store() 192 fault: state->fault = 1; in fixup_store() 225 if (state.fault) in misaligned_fixup() [all...] |
/linux/arch/powerpc/mm/ |
H A D | fault.c | 6 * Derived from "arch/i386/mm/fault.c" 110 * 5. T1 : enters fault handler, takes mmap_lock, etc... in bad_access_pkey() 141 vm_fault_t fault) in do_sigbus() argument 148 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_sigbus() 151 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", in do_sigbus() 154 if (fault & VM_FAULT_HWPOISON_LARGE) in do_sigbus() 155 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); in do_sigbus() 156 if (fault & VM_FAULT_HWPOISON) in do_sigbus() 169 vm_fault_t fault) in mm_fault_error() argument 172 * Kernel page fault interrupte in mm_fault_error() 423 vm_fault_t fault, major = 0; ___do_page_fault() local [all...] |
/linux/arch/riscv/mm/ |
H A D | fault.c | 97 /* Are we prepared to handle this kernel fault? */ in no_context() 117 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) in mm_fault_error() argument 124 if (fault & VM_FAULT_OOM) { in mm_fault_error() 127 * (which will retry the fault, or kill us if we got oom-killed). in mm_fault_error() 131 } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { in mm_fault_error() 135 } else if (fault & VM_FAULT_SIGSEGV) { in mm_fault_error() 286 vm_fault_t fault; in handle_page_fault() local 323 * in an atomic region, then we must not take the fault. in handle_page_fault() 362 fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); in handle_page_fault() 363 if (!(fault in handle_page_fault() [all...] |
/linux/arch/hexagon/mm/ |
H A D | vm_fault.c | 3 * Memory fault handling for Hexagon 9 * Page fault handling for the Hexagon Virtual Machine. 35 * Canonical page fault handler 43 vm_fault_t fault; in do_page_fault() local 49 * then must not take the fault. in do_page_fault() 84 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 86 if (fault_signal_pending(fault, regs)) { in do_page_fault() 92 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault() 93 if (fault & VM_FAULT_COMPLETED) in do_page_fault() 97 if (likely(!(fault in do_page_fault() [all...] |
/linux/arch/alpha/mm/ |
H A D | fault.c | 3 * linux/arch/alpha/mm/fault.c 65 * 2 = fault-on-read 66 * 3 = fault-on-execute 67 * 4 = fault-on-write 92 vm_fault_t fault; in do_page_fault() local 110 we must not take the fault. */ in do_page_fault() 142 /* If for any reason at all we couldn't handle the fault, in do_page_fault() 144 the fault. */ in do_page_fault() 145 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 147 if (fault_signal_pending(fault, reg in do_page_fault() [all...] |
/linux/arch/microblaze/mm/ |
H A D | fault.c | 2 * arch/microblaze/mm/fault.c 6 * Derived from "arch/ppc/mm/fault.c" 9 * Derived from "arch/i386/mm/fault.c" 71 /* Are we prepared to handle this fault? */ in bad_page_fault() 83 * The error_code parameter is ESR for a data fault, 84 * 0 for an instruction fault. 93 vm_fault_t fault; in do_page_fault() local 115 pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n", in do_page_fault() 119 die("Weird page fault", regs, SIGSEGV); in do_page_fault() 130 * erroneous fault occurrin in do_page_fault() [all...] |
/linux/arch/nios2/mm/ |
H A D | fault.c | 5 * based on arch/mips/mm/fault.c which is: 50 vm_fault_t fault; in do_page_fault() local 59 * We fault-in kernel-space virtual memory on-demand. The in do_page_fault() 79 * context, we must not take the fault.. in do_page_fault() 120 * If for any reason at all we couldn't handle the fault, in do_page_fault() 122 * the fault. in do_page_fault() 124 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 126 if (fault_signal_pending(fault, regs)) { in do_page_fault() 132 /* The fault is fully completed (including releasing mmap lock) */ in do_page_fault() 133 if (fault in do_page_fault() [all...] |
/linux/arch/parisc/mm/ |
H A D | fault.c | 46 * the instruction has generated some sort of a memory access fault). 106 * Data TLB miss fault/data page fault in parisc_acctyp() 204 [6] = "Instruction TLB miss fault", 213 [15] = "Data TLB miss fault", 214 [16] = "Non-access ITLB miss fault", 215 [17] = "Non-access DTLB miss fault", 274 vm_fault_t fault = 0; in do_page_fault() local 281 msg = "Page fault: no context"; in do_page_fault() 313 * If for any reason at all we couldn't handle the fault, mak in do_page_fault() [all...] |
/linux/arch/csky/mm/ |
H A D | fault.c | 58 /* Are we prepared to handle this kernel fault? */ in no_context() 73 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) in mm_fault_error() argument 77 if (fault & VM_FAULT_OOM) { in mm_fault_error() 80 * (which will retry the fault, or kill us if we got oom-killed). in mm_fault_error() 88 } else if (fault & VM_FAULT_SIGBUS) { in mm_fault_error() 195 vm_fault_t fault; in do_page_fault() local 225 * in an atomic region, then we must not take the fault. in do_page_fault() 259 * If for any reason at all we could not handle the fault, in do_page_fault() 261 * the fault. in do_page_fault() 263 fault in do_page_fault() [all...] |
/linux/arch/x86/hyperv/ |
H A D | nested.c | 29 goto fault; in hyperv_flush_guest_mapping() 37 goto fault; in hyperv_flush_guest_mapping() 50 fault: in hyperv_flush_guest_mapping() 97 goto fault; in hyperv_flush_guest_mapping_range() 105 goto fault; in hyperv_flush_guest_mapping_range() 114 goto fault; in hyperv_flush_guest_mapping_range() 126 fault: in hyperv_flush_guest_mapping_range()
|
/linux/arch/openrisc/mm/ |
H A D | fault.c | 3 * OpenRISC fault.c 53 vm_fault_t fault; in do_page_fault() local 59 * We fault-in kernel-space virtual memory on-demand. The in do_page_fault() 73 * This verifies that the fault happens in kernel space in do_page_fault() 74 * and that the fault was not a protection error. in do_page_fault() 101 * context, we must not take the fault.. in do_page_fault() 161 * If for any reason at all we couldn't handle the fault, in do_page_fault() 163 * the fault. in do_page_fault() 166 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 168 if (fault_signal_pending(fault, reg in do_page_fault() [all...] |