1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Access to PCI I/O memory from user space programs.
4  *
5  * Copyright IBM Corp. 2014
6  * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/syscalls.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <asm/asm-extable.h>
15 #include <asm/pci_io.h>
16 #include <asm/pci_debug.h>
17 #include <asm/asm.h>
18 
19 static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
20 {
21 	struct {
22 		u64 offset;
23 		u8 cc;
24 		u8 status;
25 	} data = {offset, cc, status};
26 
27 	zpci_err_hex(&data, sizeof(data));
28 }
29 
30 static inline int __pcistb_mio_inuser(
31 		void __iomem *ioaddr, const void __user *src,
32 		u64 len, u8 *status)
33 {
34 	int cc, exception;
35 	bool sacf_flag;
36 
37 	exception = 1;
38 	sacf_flag = enable_sacf_uaccess();
39 	asm_inline volatile (
40 		"	sacf	256\n"
41 		"0:	.insn	rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
42 		"1:	lhi	%[exc],0\n"
43 		"2:	sacf	768\n"
44 		CC_IPM(cc)
45 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
46 		: CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
47 		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
48 		: CC_CLOBBER_LIST("memory"));
49 	disable_sacf_uaccess(sacf_flag);
50 	*status = len >> 24 & 0xff;
51 	return exception ? -ENXIO : CC_TRANSFORM(cc);
52 }
53 
54 static inline int __pcistg_mio_inuser(
55 		void __iomem *ioaddr, const void __user *src,
56 		u64 ulen, u8 *status)
57 {
58 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
59 	int cc, exception;
60 	bool sacf_flag;
61 	u64 val = 0;
62 	u64 cnt = ulen;
63 	u8 tmp;
64 
65 	/*
66 	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
67 	 * a register, then store it to PCI at @ioaddr while in secondary
68 	 * address space. pcistg then uses the user mappings.
69 	 */
70 	exception = 1;
71 	sacf_flag = enable_sacf_uaccess();
72 	asm_inline volatile (
73 		"	sacf	256\n"
74 		"0:	llgc	%[tmp],0(%[src])\n"
75 		"4:	sllg	%[val],%[val],8\n"
76 		"	aghi	%[src],1\n"
77 		"	ogr	%[val],%[tmp]\n"
78 		"	brctg	%[cnt],0b\n"
79 		"1:	.insn	rre,0xb9d40000,%[val],%[ioaddr_len]\n"
80 		"2:	lhi	%[exc],0\n"
81 		"3:	sacf	768\n"
82 		CC_IPM(cc)
83 		EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
84 		: [src] "+a" (src), [cnt] "+d" (cnt),
85 		  [val] "+d" (val), [tmp] "=d" (tmp), [exc] "+d" (exception),
86 		  CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
87 		:
88 		: CC_CLOBBER_LIST("memory"));
89 	disable_sacf_uaccess(sacf_flag);
90 	*status = ioaddr_len.odd >> 24 & 0xff;
91 
92 	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
93 	/* did we read everything from user memory? */
94 	if (!cc && cnt != 0)
95 		cc = -EFAULT;
96 
97 	return cc;
98 }
99 
100 static inline int __memcpy_toio_inuser(void __iomem *dst,
101 				   const void __user *src, size_t n)
102 {
103 	int size, rc = 0;
104 	u8 status = 0;
105 
106 	if (!src)
107 		return -EINVAL;
108 
109 	while (n > 0) {
110 		size = zpci_get_max_io_size((u64 __force) dst,
111 					    (u64 __force) src, n,
112 					    ZPCI_MAX_WRITE_SIZE);
113 		if (size > 8) /* main path */
114 			rc = __pcistb_mio_inuser(dst, src, size, &status);
115 		else
116 			rc = __pcistg_mio_inuser(dst, src, size, &status);
117 		if (rc)
118 			break;
119 		src += size;
120 		dst += size;
121 		n -= size;
122 	}
123 	if (rc)
124 		zpci_err_mmio(rc, status, (__force u64) dst);
125 	return rc;
126 }
127 
128 SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
129 		const void __user *, user_buffer, size_t, length)
130 {
131 	struct follow_pfnmap_args args = { };
132 	u8 local_buf[64];
133 	void __iomem *io_addr;
134 	void *buf;
135 	struct vm_area_struct *vma;
136 	long ret;
137 
138 	if (!zpci_is_enabled())
139 		return -ENODEV;
140 
141 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
142 		return -EINVAL;
143 
144 	/*
145 	 * We only support write access to MIO capable devices if we are on
146 	 * a MIO enabled system. Otherwise we would have to check for every
147 	 * address if it is a special ZPCI_ADDR and would have to do
148 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
149 	 * ISM devices are the only devices without MIO support and there is no
150 	 * known need for accessing these from userspace.
151 	 */
152 	if (static_branch_likely(&have_mio)) {
153 		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
154 					user_buffer,
155 					length);
156 		return ret;
157 	}
158 
159 	if (length > 64) {
160 		buf = kmalloc(length, GFP_KERNEL);
161 		if (!buf)
162 			return -ENOMEM;
163 	} else
164 		buf = local_buf;
165 
166 	ret = -EFAULT;
167 	if (copy_from_user(buf, user_buffer, length))
168 		goto out_free;
169 
170 	mmap_read_lock(current->mm);
171 	ret = -EINVAL;
172 	vma = vma_lookup(current->mm, mmio_addr);
173 	if (!vma)
174 		goto out_unlock_mmap;
175 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
176 		goto out_unlock_mmap;
177 	ret = -EACCES;
178 	if (!(vma->vm_flags & VM_WRITE))
179 		goto out_unlock_mmap;
180 
181 	args.address = mmio_addr;
182 	args.vma = vma;
183 	ret = follow_pfnmap_start(&args);
184 	if (ret) {
185 		fixup_user_fault(current->mm, mmio_addr, FAULT_FLAG_WRITE, NULL);
186 		ret = follow_pfnmap_start(&args);
187 		if (ret)
188 			goto out_unlock_mmap;
189 	}
190 
191 	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
192 			(mmio_addr & ~PAGE_MASK));
193 
194 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
195 		goto out_unlock_pt;
196 
197 	ret = zpci_memcpy_toio(io_addr, buf, length);
198 out_unlock_pt:
199 	follow_pfnmap_end(&args);
200 out_unlock_mmap:
201 	mmap_read_unlock(current->mm);
202 out_free:
203 	if (buf != local_buf)
204 		kfree(buf);
205 	return ret;
206 }
207 
208 static inline int __pcilg_mio_inuser(
209 		void __user *dst, const void __iomem *ioaddr,
210 		u64 ulen, u8 *status)
211 {
212 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
213 	bool sacf_flag;
214 	u64 cnt = ulen;
215 	int shift = ulen * 8;
216 	int cc, exception;
217 	u64 val, tmp;
218 
219 	/*
220 	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
221 	 * user space) into a register using pcilg then store these bytes at
222 	 * user address @dst
223 	 */
224 	exception = 1;
225 	sacf_flag = enable_sacf_uaccess();
226 	asm_inline volatile (
227 		"	sacf	256\n"
228 		"0:	.insn	rre,0xb9d60000,%[val],%[ioaddr_len]\n"
229 		"1:	lhi	%[exc],0\n"
230 		"	jne	4f\n"
231 		"2:	ahi	%[shift],-8\n"
232 		"	srlg	%[tmp],%[val],0(%[shift])\n"
233 		"3:	stc	%[tmp],0(%[dst])\n"
234 		"5:	aghi	%[dst],1\n"
235 		"	brctg	%[cnt],2b\n"
236 		/*
237 		 * Use xr to clear exc and set condition code to zero
238 		 * to ensure flag output is correct for this branch.
239 		 */
240 		"	xr	%[exc],%[exc]\n"
241 		"4:	sacf	768\n"
242 		CC_IPM(cc)
243 		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
244 		: [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception),
245 		  CC_OUT(cc, cc), [val] "=d" (val),
246 		  [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
247 		  [shift] "+a" (shift)
248 		:
249 		: CC_CLOBBER_LIST("memory"));
250 	disable_sacf_uaccess(sacf_flag);
251 	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
252 	/* did we write everything to the user space buffer? */
253 	if (!cc && cnt != 0)
254 		cc = -EFAULT;
255 
256 	*status = ioaddr_len.odd >> 24 & 0xff;
257 	return cc;
258 }
259 
260 static inline int __memcpy_fromio_inuser(void __user *dst,
261 				     const void __iomem *src,
262 				     unsigned long n)
263 {
264 	int size, rc = 0;
265 	u8 status;
266 
267 	while (n > 0) {
268 		size = zpci_get_max_io_size((u64 __force) src,
269 					    (u64 __force) dst, n,
270 					    ZPCI_MAX_READ_SIZE);
271 		rc = __pcilg_mio_inuser(dst, src, size, &status);
272 		if (rc)
273 			break;
274 		src += size;
275 		dst += size;
276 		n -= size;
277 	}
278 	if (rc)
279 		zpci_err_mmio(rc, status, (__force u64) dst);
280 	return rc;
281 }
282 
283 SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
284 		void __user *, user_buffer, size_t, length)
285 {
286 	struct follow_pfnmap_args args = { };
287 	u8 local_buf[64];
288 	void __iomem *io_addr;
289 	void *buf;
290 	struct vm_area_struct *vma;
291 	long ret;
292 
293 	if (!zpci_is_enabled())
294 		return -ENODEV;
295 
296 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
297 		return -EINVAL;
298 
299 	/*
300 	 * We only support read access to MIO capable devices if we are on
301 	 * a MIO enabled system. Otherwise we would have to check for every
302 	 * address if it is a special ZPCI_ADDR and would have to do
303 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
304 	 * ISM devices are the only devices without MIO support and there is no
305 	 * known need for accessing these from userspace.
306 	 */
307 	if (static_branch_likely(&have_mio)) {
308 		ret = __memcpy_fromio_inuser(
309 				user_buffer, (const void __iomem *)mmio_addr,
310 				length);
311 		return ret;
312 	}
313 
314 	if (length > 64) {
315 		buf = kmalloc(length, GFP_KERNEL);
316 		if (!buf)
317 			return -ENOMEM;
318 	} else {
319 		buf = local_buf;
320 	}
321 
322 	mmap_read_lock(current->mm);
323 	ret = -EINVAL;
324 	vma = vma_lookup(current->mm, mmio_addr);
325 	if (!vma)
326 		goto out_unlock_mmap;
327 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
328 		goto out_unlock_mmap;
329 	ret = -EACCES;
330 	if (!(vma->vm_flags & VM_READ))
331 		goto out_unlock_mmap;
332 
333 	args.vma = vma;
334 	args.address = mmio_addr;
335 	ret = follow_pfnmap_start(&args);
336 	if (ret) {
337 		fixup_user_fault(current->mm, mmio_addr, 0, NULL);
338 		ret = follow_pfnmap_start(&args);
339 		if (ret)
340 			goto out_unlock_mmap;
341 	}
342 
343 	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
344 			(mmio_addr & ~PAGE_MASK));
345 
346 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
347 		ret = -EFAULT;
348 		goto out_unlock_pt;
349 	}
350 	ret = zpci_memcpy_fromio(buf, io_addr, length);
351 
352 out_unlock_pt:
353 	follow_pfnmap_end(&args);
354 out_unlock_mmap:
355 	mmap_read_unlock(current->mm);
356 
357 	if (!ret && copy_to_user(user_buffer, buf, length))
358 		ret = -EFAULT;
359 
360 	if (buf != local_buf)
361 		kfree(buf);
362 	return ret;
363 }
364