Lines Matching +full:ldo +full:-
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 * Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
6 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
8 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
40 addib,<> -1,%r25,$lclu_loop
48 ldo 1(%r25),%r25
58 * - sr1 already contains space of source region
59 * - sr2 already contains space of destination region
62 * - number of bytes that could not be copied.
65 * This code is based on a C-implementation of a copy routine written by
69 * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
71 * aligning the destination and then using shift-and-write method, or in a few
72 * cases by falling back to a byte-at-a-time copy.
75 * often >10x faster than a simple byte-at-a-time copy, even for strangely
78 * it by 30-40% for aligned copies because of the loop unrolling, but in some
83 * - add cache prefetching
84 * - try not to use the post-increment address modifiers; they may create
123 /* only do 64-bit copies if we can get aligned. */
127 /* loop until we are 64-bit aligned */
134 ldo -1(len),len
146 ldo 16(src),src
151 ldo 16(src),src
165 ldo -32(len),len
172 ldo -4(len),len
179 /* loop until we are 32-bit aligned */
186 ldo -1(len),len
201 ldo 16(src),src
215 ldo -16(len),len
221 ldo 1(src),src
224 ldo -1(len),len
237 /* align until dst is 32bit-word-aligned */
241 ldo 1(src),src
244 ldo -1(len),len
260 * Copy from a not-aligned src to an aligned dst using shifts.
290 ldo -1(len),len
316 ldo -4(len),len
326 /* calculate new src, dst and len and jump to byte-copy loop */
338 ldo 1(len),len
345 ldo 2(len),len