Lines Matching +full:len +full:- +full:or +full:- +full:define

23  * dma-coherent systems.
37 #include <asm/asm-offsets.h>
40 #define dst a0
41 #define src a1
42 #define len a2 macro
47 * memcpy copies len bytes from src to dst and sets v0 to dst.
49 * - src and dst don't overlap
50 * - src is readable
51 * - dst is writable
54 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
55 * the number of uncopied bytes due to an exception caused by a read or write.
59 * - src is readable (no exceptions when reading src)
61 * - dst is writable (no exceptions when writing dst)
62 * __copy_user uses a non-standard calling convention; see
63 * include/asm-mips/uaccess.h
76 * 1- AT contain the address of the byte just past the end of the source
78 * 2- src_entry <= src < AT, and
79 * 3- (dst - src) == (dst_entry - src_entry),
86 * The exception handlers for stores adjust len (if necessary) and return.
94 #define LD_INSN 1
95 #define ST_INSN 2
97 #define SRC_PREFETCH 1
98 #define DST_PREFETCH 2
99 #define LEGACY_MODE 1
100 #define EVA_MODE 2
101 #define USEROP 1
102 #define KERNELOP 2
115 #define EXC(insn, type, reg, addr, handler) \
123 /* If loading from user or storing to user */ \
133 * exception handler or EVA insn \
140 * Only on the 64-bit kernel we can made use of 64-bit registers.
143 #define USE_DOUBLE
148 #define LOADK ld /* No exception */
149 #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
150 #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
151 #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
152 #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
153 #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
154 #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
155 #define ADD daddu
156 #define SUB dsubu
157 #define SRL dsrl
158 #define SRA dsra
159 #define SLL dsll
160 #define SLLV dsllv
161 #define SRLV dsrlv
162 #define NBYTES 8
163 #define LOG_NBYTES 3
174 #define t0 $8
175 #define t1 $9
176 #define t2 $10
177 #define t3 $11
178 #define t4 $12
179 #define t5 $13
180 #define t6 $14
181 #define t7 $15
185 #define LOADK lw /* No exception */
186 #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
187 #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
188 #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
189 #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
190 #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
191 #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
192 #define ADD addu
193 #define SUB subu
194 #define SRL srl
195 #define SLL sll
196 #define SRA sra
197 #define SLLV sllv
198 #define SRLV srlv
199 #define NBYTES 4
200 #define LOG_NBYTES 2
204 #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
205 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
208 # define _PREF(hint, addr, type) \
229 # define _PREF(hint, addr, type)
232 #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
233 #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
236 #define LDFIRST LOADR
237 #define LDREST LOADL
238 #define STFIRST STORER
239 #define STREST STOREL
240 #define SHIFT_DISCARD SLLV
242 #define LDFIRST LOADL
243 #define LDREST LOADR
244 #define STFIRST STOREL
245 #define STREST STORER
246 #define SHIFT_DISCARD SRLV
249 #define FIRST(unit) ((unit)*NBYTES)
250 #define REST(unit) (FIRST(unit)+NBYTES-1)
251 #define UNIT(unit) FIRST(unit)
253 #define ADDRMASK (NBYTES-1)
268 * mode : LEGACY_MODE or EVA_MODE
269 * from : Source operand. USEROP or KERNELOP
270 * to : Destination operand. USEROP or KERNELOP
281 * Note: dst & src may be unaligned, len may be 0
284 #define rem t8
292 * If len < NBYTES use byte operations.
296 sltu t2, len, NBYTES
309 or t0, t0, t1
313 * use delay slot for fall-through
317 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
318 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
319 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
329 SUB len, len, 8*NBYTES
338 STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
339 STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
340 STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
341 STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
342 STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
343 STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
346 bne len, rem, 1b
350 * len == rem == the number of bytes left to copy < 8*NBYTES
353 beqz len, .Ldone\@
354 sltu t0, len, 4*NBYTES
356 and rem, len, (NBYTES-1) # rem = len % NBYTES
358 * len >= 4*NBYTES
364 SUB len, len, 4*NBYTES
373 beqz len, .Ldone\@
377 * rem = len % NBYTES
379 beq rem, len, .Lcopy_bytes\@
385 SUB len, len, NBYTES
389 bne rem, len, 1b
396 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
397 * because can't assume read-access to dst. Instead, use
401 * wide-issue mips processors because the code has fewer branches and
402 * more instruction-level parallelism.
404 #define bits t2
405 beqz len, .Ldone\@
406 ADD t1, dst, len # t1 is just past last byte of dst
408 SLL rem, len, 3 # rem = number of bits to keep
412 STREST(t0, -1(t1), .Ls_exc\@)
414 move len, zero
420 * len >= NBYTES
425 #define match rem
433 beq len, t2, .Ldone\@
434 SUB len, len, t2
440 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
443 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
455 SUB len, len, 4*NBYTES
474 bne len, rem, 1b
478 beqz len, .Ldone\@
479 and rem, len, NBYTES-1 # rem = len % NBYTES
480 beq rem, len, .Lcopy_bytes\@
487 SUB len, len, NBYTES
491 bne len, rem, 1b
496 beqz len, .Ldone\@
499 /* 0 < len < NBYTES */
501 #define COPY_BYTE(N) \
503 SUB len, len, 1; \
504 beqz len, .Ldone\@; \
515 LOADB(t0, NBYTES-2(src), .Ll_exc\@)
516 SUB len, len, 1
518 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
546 * Copy bytes from src until faulting load address (or until a
561 sb t1, 0(dst) # can't fault -- we're copy_from_user
571 SUB len, AT, t0 # len number of uncopied bytes
575 #define SEXC(n) \
578 ADD len, len, n*NBYTES; \
593 ADD len, len, 1
607 sltu t0, a1, t0 # dst + len <= src -> memcpy
608 sltu t1, a0, t1 # dst >= src + len -> memcpy
616 LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
620 ADD a0, a2 # dst = dst + len
621 ADD a1, a2 # src = src + len
625 lb t0, -1(a1)
627 sb t0, -1(a0)
655 * __copy_user sets len to 0 for success; else to an upper bound of
660 LEAF(memcpy) /* a0=dst a1=src a2=len */
670 /* Legacy Mode, user <-> user */
680 * virtual <-> physical translation when a virtual address is actually in user