Lines Matching full:src

22 #define src a1  macro
28 * memcpy copies len bytes from src to dst and sets v0 to dst.
30 * - src and dst don't overlap
31 * - src is readable
35 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
37 * __copy_user assumes that src and dst don't overlap, and that the call is
40 * - src is readable (no exceptions when reading src)
59 * 2- src_entry <= src < AT, and
60 * 3- (dst - src) == (dst_entry - src_entry),
64 * (2) is met by incrementing src by the number of bytes copied
65 * (3) is met by not doing loads between a pair of increments of dst and src
149 LEAF(memcpy) /* a0=dst a1=src a2=len */
156 * Note: dst & src may be unaligned, len may be 0
163 pref 0, 0(src)
166 and t0, src, ADDRMASK # Check if src unaligned
178 pref 0, 128(src) # We must not prefetch invalid addresses
181 2: pref 0, 256(src) # We must not prefetch invalid addresses
185 EXC( LOAD t0, UNIT(0)(src), l_exc)
186 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
187 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
188 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
194 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
195 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
196 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
197 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
201 ADD src, src, 16*NBYTES
204 EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
205 EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
206 EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
207 EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
212 EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
213 EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
214 EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
215 EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
233 EXC( LOAD t0, UNIT(0)(src), l_exc)
234 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
235 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
236 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
242 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
243 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
244 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
245 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
250 ADD src, src, 8*NBYTES
260 EXC( LOAD t0, UNIT(0)(src), l_exc)
261 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
262 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
263 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
269 ADD src, src, 4*NBYTES
283 EXC( LOAD t0, 0(src), l_exc)
287 ADD src, src, NBYTES
293 EXC( LOAD t0, 0(src), l_exc)
297 ADD src, src, NBYTES
303 EXC( LOAD t0, 0(src), l_exc)
305 ADD src, src, NBYTES
320 * are to the same unit (unless src is aligned, but it's not).
322 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
323 EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
325 EXC( LDREST t0, REST(0)(src), l_exc_copy)
326 EXC( LDREST t1, REST(1)(src), l_exc_copy)
327 EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
328 EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
329 EXC( LDREST t2, REST(2)(src), l_exc_copy)
330 EXC( LDREST t3, REST(3)(src), l_exc_copy)
331 ADD src, src, 4*NBYTES
345 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
346 EXC( LDREST t0, REST(0)(src), l_exc_copy)
349 ADD src, src, NBYTES
359 EXC( lb t0, N(src), l_exc); \
370 EXC( lb t0, NBYTES-2(src), l_exc)
380 /* Rewind src and dst by 16*NBYTES for l_exc_copy */
381 SUB src, src, 16*NBYTES
385 * Copy bytes from src until faulting load address (or until a
392 * Assumes src < THREAD_BUADDR($28)
397 EXC( lb t1, 0(src), l_exc)
398 ADD src, src, 1
400 bne src, t0, 1b
444 sltu t0, a1, t0 # dst + len <= src -> memcpy
445 sltu t1, a0, t1 # dst >= src + len -> memcpy
453 LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
455 beqz t0, r_end_bytes_up # src >= dst
458 ADD a1, a2 # src = src + len