1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CoProcessor (SPU/AFU) mm fault handler
4  *
5  * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
6  *
7  * Author: Arnd Bergmann <arndb@de.ibm.com>
8  * Author: Jeremy Kerr <jk@ozlabs.org>
9  */
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/export.h>
13 #include <asm/reg.h>
14 #include <asm/copro.h>
15 
16 /*
17  * This ought to be kept in sync with the powerpc specific do_page_fault
18  * function. Currently, there are a few corner cases that we haven't had
19  * to handle fortunately.
20  */
copro_handle_mm_fault(struct mm_struct * mm,unsigned long ea,unsigned long dsisr,vm_fault_t * flt)21 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
22 		unsigned long dsisr, vm_fault_t *flt)
23 {
24 	struct vm_area_struct *vma;
25 	unsigned long is_write;
26 	int ret;
27 
28 	if (mm == NULL)
29 		return -EFAULT;
30 
31 	if (mm->pgd == NULL)
32 		return -EFAULT;
33 
34 	vma = lock_mm_and_find_vma(mm, ea, NULL);
35 	if (!vma)
36 		return -EFAULT;
37 
38 	ret = -EFAULT;
39 	is_write = dsisr & DSISR_ISSTORE;
40 	if (is_write) {
41 		if (!(vma->vm_flags & VM_WRITE))
42 			goto out_unlock;
43 	} else {
44 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
45 			goto out_unlock;
46 		/*
47 		 * PROT_NONE is covered by the VMA check above.
48 		 * and hash should get a NOHPTE fault instead of
49 		 * a PROTFAULT in case fixup is needed for things
50 		 * like autonuma.
51 		 */
52 		if (!radix_enabled())
53 			WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
54 	}
55 
56 	ret = 0;
57 	*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
58 
59 	/* The fault is fully completed (including releasing mmap lock) */
60 	if (*flt & VM_FAULT_COMPLETED)
61 		return 0;
62 
63 	if (unlikely(*flt & VM_FAULT_ERROR)) {
64 		if (*flt & VM_FAULT_OOM) {
65 			ret = -ENOMEM;
66 			goto out_unlock;
67 		} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
68 			ret = -EFAULT;
69 			goto out_unlock;
70 		}
71 		BUG();
72 	}
73 
74 out_unlock:
75 	mmap_read_unlock(mm);
76 	return ret;
77 }
78 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
79 
80 #ifdef CONFIG_PPC_64S_HASH_MMU
copro_calculate_slb(struct mm_struct * mm,u64 ea,struct copro_slb * slb)81 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
82 {
83 	u64 vsid, vsidkey;
84 	int psize, ssize;
85 
86 	switch (get_region_id(ea)) {
87 	case USER_REGION_ID:
88 		pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
89 		if (mm == NULL)
90 			return 1;
91 		psize = get_slice_psize(mm, ea);
92 		ssize = user_segment_size(ea);
93 		vsid = get_user_vsid(&mm->context, ea, ssize);
94 		vsidkey = SLB_VSID_USER;
95 		break;
96 	case VMALLOC_REGION_ID:
97 		pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
98 		psize = mmu_vmalloc_psize;
99 		ssize = mmu_kernel_ssize;
100 		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
101 		vsidkey = SLB_VSID_KERNEL;
102 		break;
103 	case IO_REGION_ID:
104 		pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
105 		psize = mmu_io_psize;
106 		ssize = mmu_kernel_ssize;
107 		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
108 		vsidkey = SLB_VSID_KERNEL;
109 		break;
110 	case LINEAR_MAP_REGION_ID:
111 		pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
112 		psize = mmu_linear_psize;
113 		ssize = mmu_kernel_ssize;
114 		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
115 		vsidkey = SLB_VSID_KERNEL;
116 		break;
117 	default:
118 		pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
119 		return 1;
120 	}
121 	/* Bad address */
122 	if (!vsid)
123 		return 1;
124 
125 	vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
126 
127 	vsid |= mmu_psize_defs[psize].sllp |
128 		((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
129 
130 	slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
131 	slb->vsid = vsid;
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL_GPL(copro_calculate_slb);
136 #endif
137