Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
/*
 * Copyright (c) 2006, 2009 Apple Inc. All rights reserved.
 *
 * @APPLE_LICENSE_HEADER_START@
 * 
 * This file contains Original Code and/or Modifications of Original Code
 * as defined in and that are subject to the Apple Public Source License
 * Version 2.0 (the 'License'). You may not use this file except in
 * compliance with the License. Please obtain a copy of the License at
 * http://www.opensource.apple.com/apsl/ and read it before using this
 * file.
 * 
 * The Original Code and all software distributed under the License are
 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 * Please see the License for the specific language governing rights and
 * limitations under the License.
 * 
 * @APPLE_LICENSE_HEADER_END@
 */

/*****************************************************************************
 * ARMv5 and ARMv6 implementation, also used in dyld on later archs          *
 *****************************************************************************/
 
#include <arm/arch.h>
#if !defined _ARM_ARCH_7 || defined VARIANT_DYLD

.text
.align 2
	
	.globl _memcpy
	.globl _bcopy
	.globl _memmove

_bcopy:		/* void bcopy(const void *src, void *dest, size_t len); */
	mov		r3, r0
	mov		r0, r1
	mov		r1, r3

_memcpy:		/* void *memcpy(void *dest, const void *src, size_t len); */
_memmove: 	/* void *memmove(void *dest, const void *src, size_t len); */
	/* check for zero len or if the pointers are the same */
	cmp		r2, #0
	cmpne	r0, r1
	bxeq	lr

	/* save r0 (return value), r4 (scratch), and r5 (scratch) */
	stmfd	sp!, { r0, r4, r5, r7, lr }
	add	r7, sp, #12
	
	/* check for overlap. r3 <- distance between src & dest */
	subhs	r3, r0, r1
	sublo	r3, r1, r0
	cmp		r3, r2			/* if distance(src, dest) < len, we have overlap */
	blo		Loverlap

Lnormalforwardcopy:
	/* are src and dest dissimilarly word aligned? */
	mov		r12, r0, lsl #30
	cmp		r12, r1, lsl #30
	bne		Lnonwordaligned_forward

	/* if len < 64, do a quick forward copy */
	cmp		r2, #64
	blt		Lsmallforwardcopy

	/* check for 16 byte src/dest unalignment */
	tst		r0, #0xf
	bne		Lsimilarlyunaligned

	/* check for 32 byte dest unalignment */
	tst		r0, #(1<<4)
	bne		Lunaligned_32

Lmorethan64_aligned:
	/* save some more registers to use in the copy */
	stmfd	sp!, { r6, r8, r10, r11 }

	/* pre-subtract 64 from the len counter to avoid an extra compare in the loop */
	sub		r2, r2, #64

L64loop:
	/* copy 64 bytes at a time */
	ldmia	r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }
#ifdef _ARM_ARCH_6
	pld		[r1, #32]
#endif
	stmia	r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }
	ldmia	r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }
	subs	r2, r2, #64
#ifdef _ARM_ARCH_6
	pld		[r1, #32]
#endif
	stmia	r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }
	bge		L64loop

	/* restore the scratch registers we just saved */
	ldmfd	sp!, { r6, r8, r10, r11 }

	/* fix up the len counter (previously subtracted an extra 64 from it) and test for completion */
	adds	r2, r2, #64
	beq		Lexit

Llessthan64_aligned:
	/* copy 16 bytes at a time until we have < 16 bytes */
	cmp		r2, #16
	ldmgeia	r1!, { r3, r4, r5, r12 }
	stmgeia	r0!, { r3, r4, r5, r12 }
	subges	r2, r2, #16
	bgt		Llessthan64_aligned
	beq		Lexit
	
Llessthan16_aligned:
	mov		r2, r2, lsl #28
	msr		cpsr_f, r2

	ldmmiia	r1!, { r2, r3 }
	ldreq	r4, [r1], #4
	ldrcsh	r5, [r1], #2
	ldrvsb	r12, [r1], #1

	stmmiia	r0!, { r2, r3 }
	streq	r4, [r0], #4
	strcsh	r5, [r0], #2
	strvsb	r12, [r0], #1
	b		Lexit

Lsimilarlyunaligned:
	/* both src and dest are unaligned in similar ways, align to dest on 32 byte boundary */
	mov		r12, r0, lsl #28
	rsb		r12, r12, #0
	msr		cpsr_f, r12

	ldrvsb	r3, [r1], #1
	ldrcsh	r4, [r1], #2
	ldreq	r5, [r1], #4

	strvsb	r3, [r0], #1
	strcsh	r4, [r0], #2
	streq	r5, [r0], #4

	ldmmiia	r1!, { r3, r4 }
	stmmiia	r0!, { r3, r4 }

	subs	r2, r2, r12, lsr #28
	beq		Lexit

Lunaligned_32:
	/* bring up to dest 32 byte alignment */
	tst		r0, #(1 << 4)
	ldmneia	r1!, { r3, r4, r5, r12 }
	stmneia	r0!, { r3, r4, r5, r12 }
	subne	r2, r2, #16

	/* we should now be aligned, see what copy method we should use */
	cmp		r2, #64
	bge		Lmorethan64_aligned
	b		Llessthan64_aligned
	
Lbytewise2:
	/* copy 2 bytes at a time */
	subs	r2, r2, #2

	ldrb	r3, [r1], #1
	ldrplb	r4, [r1], #1

	strb	r3, [r0], #1
	strplb	r4, [r0], #1

	bhi		Lbytewise2
	b		Lexit

Lbytewise:
	/* simple bytewise forward copy */
	ldrb	r3, [r1], #1
	subs	r2, r2, #1
	strb	r3, [r0], #1
	bne		Lbytewise
	b		Lexit

Lsmallforwardcopy:
	/* src and dest are word aligned similarly, less than 64 bytes to copy */
	cmp		r2, #4
	blt		Lbytewise2

	/* bytewise copy until word aligned */
	tst		r1, #3
Lwordalignloop:
	ldrneb	r3, [r1], #1
	strneb	r3, [r0], #1
	subne	r2, r2, #1
	tstne	r1, #3
	bne		Lwordalignloop

	cmp		r2, #16
	bge		Llessthan64_aligned
	blt		Llessthan16_aligned

Loverlap:
	/* src and dest overlap in some way, len > 0 */
	cmp		r0, r1				/* if dest > src */
	bhi		Loverlap_srclower

Loverlap_destlower:
	/* dest < src, see if we can still do a fast forward copy or fallback to slow forward copy */
	cmp		r3, #64
	bge		Lnormalforwardcopy 	/* overlap is greater than one stride of the copy, use normal copy */

	cmp		r3, #2
	bge		Lbytewise2
	b		Lbytewise

	/* the following routines deal with having to copy in the reverse direction */
Loverlap_srclower:
	/* src < dest, with overlap */

	/* src += len; dest += len; */
	add		r0, r0, r2
	add		r1, r1, r2

	/* we have to copy in reverse no matter what, test if we can we use a large block reverse copy */
	cmp		r2, #64				/* less than 64 bytes to copy? */
	cmpgt	r3, #64				/* less than 64 bytes of nonoverlap? */
	blt		Lbytewise_reverse

	/* test of src and dest are nonword aligned differently */
	mov		r3, r0, lsl #30
	cmp		r3, r1, lsl #30
	bne		Lbytewise_reverse

	/* test if src and dest are non word aligned or dest is non 16 byte aligned */
	tst		r0, #0xf
	bne		Lunaligned_reverse_similarly

	/* test for dest 32 byte alignment */
	tst		r0, #(1<<4)
	bne		Lunaligned_32_reverse_similarly

	/* 64 byte reverse block copy, src and dest aligned */
Lmorethan64_aligned_reverse:
	/* save some more registers to use in the copy */
	stmfd	sp!, { r6, r8, r10, r11 }

	/* pre-subtract 64 from the len counter to avoid an extra compare in the loop */
	sub		r2, r2, #64

L64loop_reverse:
	/* copy 64 bytes at a time */
	ldmdb	r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }
#ifdef _ARM_ARCH_6
	pld		[r1, #-32]
#endif
	stmdb	r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }	
	ldmdb	r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }	
	subs	r2, r2, #64
#ifdef _ARM_ARCH_6
	pld		[r1, #-32]
#endif
	stmdb	r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }	
	bge		L64loop_reverse

	/* restore the scratch registers we just saved */
	ldmfd	sp!, { r6, r8, r10, r11 }

	/* fix up the len counter (previously subtracted an extra 64 from it) and test for completion */
	adds	r2, r2, #64
	beq		Lexit

Lbytewise_reverse:
	ldrb	r3, [r1, #-1]!
	strb	r3, [r0, #-1]!
	subs	r2, r2, #1
	bne		Lbytewise_reverse
	b		Lexit

Lunaligned_reverse_similarly:
	/* both src and dest are unaligned in similar ways, align to dest on 32 byte boundary */
	mov		r12, r0, lsl #28
	msr		cpsr_f, r12

	ldrvsb	r3, [r1, #-1]!
	ldrcsh	r4, [r1, #-2]!
	ldreq	r5, [r1, #-4]!

	strvsb	r3, [r0, #-1]!
	strcsh	r4, [r0, #-2]!
	streq	r5, [r0, #-4]!

	ldmmidb	r1!, { r3, r4 }
	stmmidb	r0!, { r3, r4 }

	subs	r2, r2, r12, lsr #28
	beq		Lexit

Lunaligned_32_reverse_similarly:
	/* bring up to dest 32 byte alignment */
	tst		r0, #(1 << 4)
	ldmnedb	r1!, { r3, r4, r5, r12 }
	stmnedb	r0!, { r3, r4, r5, r12 }
	subne	r2, r2, #16

	/* we should now be aligned, see what copy method we should use */
	cmp		r2, #64
	bge		Lmorethan64_aligned_reverse
	b		Lbytewise_reverse

	/* the following routines deal with non word aligned copies */
Lnonwordaligned_forward:
	cmp		r2, #8
	blt		Lbytewise2			/* not worth the effort with less than 24 bytes total */

	/* bytewise copy until src word aligned */
	tst		r1, #3
Lwordalignloop2:
	ldrneb	r3, [r1], #1
	strneb	r3, [r0], #1
	subne	r2, r2, #1
	tstne	r1, #3
	bne		Lwordalignloop2

	/* figure out how the src and dest are unaligned */
	and		r3, r0, #3
	cmp		r3, #2
	blt		Lalign1_forward
	beq		Lalign2_forward
	bgt		Lalign3_forward

Lalign1_forward:
	/* the dest pointer is 1 byte off from src */
	mov		r12, r2, lsr #2		/* number of words we should copy */
	sub		r0, r0, #1

	/* prime the copy */
	ldrb	r4, [r0]			/* load D[7:0] */

Lalign1_forward_loop:
	ldr		r3, [r1], #4		/* load S */
	orr		r4, r4, r3, lsl #8	/* D[31:8] = S[24:0] */
	str		r4, [r0], #4		/* save D */
	mov		r4, r3, lsr #24		/* D[7:0] = S[31:25] */
	subs	r12, r12, #1
	bne		Lalign1_forward_loop

	/* finish the copy off */
	strb	r4, [r0], #1		/* save D[7:0] */

	ands	r2, r2, #3
	beq		Lexit
	b		Lbytewise2

Lalign2_forward:
	/* the dest pointer is 2 bytes off from src */
	mov		r12, r2, lsr #2		/* number of words we should copy */
	sub		r0, r0, #2

	/* prime the copy */
	ldrh	r4, [r0]			/* load D[15:0] */

Lalign2_forward_loop:
	ldr		r3, [r1], #4		/* load S */
	orr		r4, r4, r3, lsl #16	/* D[31:16] = S[15:0] */
	str		r4, [r0], #4		/* save D */
	mov		r4, r3, lsr #16		/* D[15:0] = S[31:15] */
	subs	r12, r12, #1
	bne		Lalign2_forward_loop

	/* finish the copy off */
	strh	r4, [r0], #2		/* save D[15:0] */

	ands	r2, r2, #3
	beq		Lexit
	b		Lbytewise2

Lalign3_forward:
	/* the dest pointer is 3 bytes off from src */
	mov		r12, r2, lsr #2		/* number of words we should copy */
	sub		r0, r0, #3

	/* prime the copy */
	ldr		r4, [r0]
	and		r4, r4, #0x00ffffff	/* load D[24:0] */

Lalign3_forward_loop:
	ldr		r3, [r1], #4		/* load S */
	orr		r4, r4, r3, lsl #24	/* D[31:25] = S[7:0] */
	str		r4, [r0], #4		/* save D */
	mov		r4, r3, lsr #8		/* D[24:0] = S[31:8] */
	subs	r12, r12, #1
	bne		Lalign3_forward_loop

	/* finish the copy off */
	strh	r4, [r0], #2		/* save D[15:0] */
	mov		r4, r4, lsr #16
	strb	r4, [r0], #1		/* save D[23:16] */

	ands	r2, r2, #3
	beq		Lexit
	b		Lbytewise2

Lexit:
	ldmfd	sp!, {r0, r4, r5, r7, pc}

#endif // !defined _ARM_ARCH_7 || defined VARIANT_DYLD