1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * I/O string operations
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *    Copyright (C) 2006 IBM Corporation
6  *
7  * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8  * and Paul Mackerras.
9  *
10  * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
11  * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
12  *
13  * Rewritten in C by Stephen Rothwell.
14  */
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/compiler.h>
18 #include <linux/export.h>
19 
20 #include <asm/io.h>
21 #include <asm/firmware.h>
22 #include <asm/bug.h>
23 
24 /* See definition in io.h */
25 bool isa_io_special;
26 
_insb(const volatile u8 __iomem * port,void * buf,long count)27 void _insb(const volatile u8 __iomem *port, void *buf, long count)
28 {
29 	u8 *tbuf = buf;
30 	u8 tmp;
31 
32 	if (unlikely(count <= 0))
33 		return;
34 
35 	mb();
36 	do {
37 		tmp = *(const volatile u8 __force *)port;
38 		eieio();
39 		*tbuf++ = tmp;
40 	} while (--count != 0);
41 	data_barrier(tmp);
42 }
43 EXPORT_SYMBOL(_insb);
44 
_outsb(volatile u8 __iomem * port,const void * buf,long count)45 void _outsb(volatile u8 __iomem *port, const void *buf, long count)
46 {
47 	const u8 *tbuf = buf;
48 
49 	if (unlikely(count <= 0))
50 		return;
51 
52 	mb();
53 	do {
54 		*(volatile u8 __force *)port = *tbuf++;
55 	} while (--count != 0);
56 	mb();
57 }
58 EXPORT_SYMBOL(_outsb);
59 
_insw(const volatile u16 __iomem * port,void * buf,long count)60 void _insw(const volatile u16 __iomem *port, void *buf, long count)
61 {
62 	u16 *tbuf = buf;
63 	u16 tmp;
64 
65 	if (unlikely(count <= 0))
66 		return;
67 
68 	mb();
69 	do {
70 		tmp = *(const volatile u16 __force *)port;
71 		eieio();
72 		*tbuf++ = tmp;
73 	} while (--count != 0);
74 	data_barrier(tmp);
75 }
76 EXPORT_SYMBOL(_insw);
77 
_outsw(volatile u16 __iomem * port,const void * buf,long count)78 void _outsw(volatile u16 __iomem *port, const void *buf, long count)
79 {
80 	const u16 *tbuf = buf;
81 
82 	if (unlikely(count <= 0))
83 		return;
84 
85 	mb();
86 	do {
87 		*(volatile u16 __force *)port = *tbuf++;
88 	} while (--count != 0);
89 	mb();
90 }
91 EXPORT_SYMBOL(_outsw);
92 
_insl(const volatile u32 __iomem * port,void * buf,long count)93 void _insl(const volatile u32 __iomem *port, void *buf, long count)
94 {
95 	u32 *tbuf = buf;
96 	u32 tmp;
97 
98 	if (unlikely(count <= 0))
99 		return;
100 
101 	mb();
102 	do {
103 		tmp = *(const volatile u32 __force *)port;
104 		eieio();
105 		*tbuf++ = tmp;
106 	} while (--count != 0);
107 	data_barrier(tmp);
108 }
109 EXPORT_SYMBOL(_insl);
110 
_outsl(volatile u32 __iomem * port,const void * buf,long count)111 void _outsl(volatile u32 __iomem *port, const void *buf, long count)
112 {
113 	const u32 *tbuf = buf;
114 
115 	if (unlikely(count <= 0))
116 		return;
117 
118 	mb();
119 	do {
120 		*(volatile u32 __force *)port = *tbuf++;
121 	} while (--count != 0);
122 	mb();
123 }
124 EXPORT_SYMBOL(_outsl);
125 
126 #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
127 
128 notrace void
_memset_io(volatile void __iomem * addr,int c,unsigned long n)129 _memset_io(volatile void __iomem *addr, int c, unsigned long n)
130 {
131 	void *p = (void __force *)addr;
132 	u32 lc = c;
133 	lc |= lc << 8;
134 	lc |= lc << 16;
135 
136 	mb();
137 	while(n && !IO_CHECK_ALIGN(p, 4)) {
138 		*((volatile u8 *)p) = c;
139 		p++;
140 		n--;
141 	}
142 	while(n >= 4) {
143 		*((volatile u32 *)p) = lc;
144 		p += 4;
145 		n -= 4;
146 	}
147 	while(n) {
148 		*((volatile u8 *)p) = c;
149 		p++;
150 		n--;
151 	}
152 	mb();
153 }
154 EXPORT_SYMBOL(_memset_io);
155 
_memcpy_fromio(void * dest,const volatile void __iomem * src,unsigned long n)156 void _memcpy_fromio(void *dest, const volatile void __iomem *src,
157 		    unsigned long n)
158 {
159 	void *vsrc = (void __force *) src;
160 
161 	mb();
162 	while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
163 		*((u8 *)dest) = *((volatile u8 *)vsrc);
164 		eieio();
165 		vsrc++;
166 		dest++;
167 		n--;
168 	}
169 	while(n >= 4) {
170 		*((u32 *)dest) = *((volatile u32 *)vsrc);
171 		eieio();
172 		vsrc += 4;
173 		dest += 4;
174 		n -= 4;
175 	}
176 	while(n) {
177 		*((u8 *)dest) = *((volatile u8 *)vsrc);
178 		eieio();
179 		vsrc++;
180 		dest++;
181 		n--;
182 	}
183 	mb();
184 }
185 EXPORT_SYMBOL(_memcpy_fromio);
186 
_memcpy_toio(volatile void __iomem * dest,const void * src,unsigned long n)187 void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
188 {
189 	void *vdest = (void __force *) dest;
190 
191 	mb();
192 	while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
193 		*((volatile u8 *)vdest) = *((u8 *)src);
194 		src++;
195 		vdest++;
196 		n--;
197 	}
198 	while(n >= 4) {
199 		*((volatile u32 *)vdest) = *((volatile u32 *)src);
200 		src += 4;
201 		vdest += 4;
202 		n-=4;
203 	}
204 	while(n) {
205 		*((volatile u8 *)vdest) = *((u8 *)src);
206 		src++;
207 		vdest++;
208 		n--;
209 	}
210 	mb();
211 }
212 EXPORT_SYMBOL(_memcpy_toio);
213