Use .L prefix for all local labels.

This commit is contained in:
briggs 2002-08-15 18:30:36 +00:00
parent 8d5eb3e93d
commit b98931f62e
3 changed files with 221 additions and 221 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: divsi3.S,v 1.2 2001/11/13 20:06:40 chris Exp $ */
/* $NetBSD: divsi3.S,v 1.3 2002/08/15 18:30:36 briggs Exp $ */
/*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
@ -24,7 +24,7 @@
ENTRY(__umodsi3)
stmfd sp!, {lr}
sub sp, sp, #4 /* align stack */
bl L_udivide
bl .L_udivide
add sp, sp, #4 /* unalign stack */
mov r0, r1
#ifdef __APCS_26__
@ -36,7 +36,7 @@ ENTRY(__umodsi3)
ENTRY(__modsi3)
stmfd sp!, {lr}
sub sp, sp, #4 /* align stack */
bl L_divide
bl .L_divide
add sp, sp, #4 /* unalign stack */
mov r0, r1
#ifdef __APCS_26__
@ -45,7 +45,7 @@ ENTRY(__modsi3)
ldmfd sp!, {pc}
#endif
L_overflow:
.L_overflow:
#if !defined(_KERNEL) && !defined(_STANDALONE)
mov r0, #8 /* SIGFPE */
bl PIC_SYM(_C_LABEL(raise), PLT) /* raise it */
@ -61,23 +61,23 @@ L_overflow:
#endif
ENTRY(__udivsi3)
L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */
.L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
/* r0 = r1 / r0; r1 = r1 % r0 */
cmp r0, #1
bcc L_overflow
beq L_divide_l0
bcc .L_overflow
beq .L_divide_l0
mov ip, #0
movs r1, r1
bpl L_divide_l1
bpl .L_divide_l1
orr ip, ip, #0x20000000 /* ip bit 0x20000000 = -ve r1 */
movs r1, r1, lsr #1
orrcs ip, ip, #0x10000000 /* ip bit 0x10000000 = bit 0 of r1 */
b L_divide_l1
b .L_divide_l1
L_divide_l0: /* r0 == 1 */
.L_divide_l0: /* r0 == 1 */
mov r0, r1
mov r1, #0
#ifdef __APCS_26__
@ -87,14 +87,14 @@ L_divide_l0: /* r0 == 1 */
#endif
ENTRY(__divsi3)
L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
.L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
/* r0 = r1 / r0; r1 = r1 % r0 */
cmp r0, #1
bcc L_overflow
beq L_divide_l0
bcc .L_overflow
beq .L_divide_l0
ands ip, r0, #0x80000000
rsbmi r0, r0, #0
ands r2, r1, #0x80000000
@ -103,7 +103,7 @@ L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
orr ip, r2, ip, lsr #1 /* ip bit 0x40000000 = -ve division */
/* ip bit 0x80000000 = -ve remainder */
L_divide_l1:
.L_divide_l1:
mov r2, #1
mov r3, #0
@ -112,276 +112,276 @@ L_divide_l1:
* careful when shifting the divisor. Test this.
*/
movs r1,r1
bpl L_old_code
bpl .L_old_code
/*
* At this point, the highest bit of r1 is known to be set.
* We abuse this below in the tst instructions.
*/
tst r1, r0 /*, lsl #0 */
bmi L_divide_b1
bmi .L_divide_b1
tst r1, r0, lsl #1
bmi L_divide_b2
bmi .L_divide_b2
tst r1, r0, lsl #2
bmi L_divide_b3
bmi .L_divide_b3
tst r1, r0, lsl #3
bmi L_divide_b4
bmi .L_divide_b4
tst r1, r0, lsl #4
bmi L_divide_b5
bmi .L_divide_b5
tst r1, r0, lsl #5
bmi L_divide_b6
bmi .L_divide_b6
tst r1, r0, lsl #6
bmi L_divide_b7
bmi .L_divide_b7
tst r1, r0, lsl #7
bmi L_divide_b8
bmi .L_divide_b8
tst r1, r0, lsl #8
bmi L_divide_b9
bmi .L_divide_b9
tst r1, r0, lsl #9
bmi L_divide_b10
bmi .L_divide_b10
tst r1, r0, lsl #10
bmi L_divide_b11
bmi .L_divide_b11
tst r1, r0, lsl #11
bmi L_divide_b12
bmi .L_divide_b12
tst r1, r0, lsl #12
bmi L_divide_b13
bmi .L_divide_b13
tst r1, r0, lsl #13
bmi L_divide_b14
bmi .L_divide_b14
tst r1, r0, lsl #14
bmi L_divide_b15
bmi .L_divide_b15
tst r1, r0, lsl #15
bmi L_divide_b16
bmi .L_divide_b16
tst r1, r0, lsl #16
bmi L_divide_b17
bmi .L_divide_b17
tst r1, r0, lsl #17
bmi L_divide_b18
bmi .L_divide_b18
tst r1, r0, lsl #18
bmi L_divide_b19
bmi .L_divide_b19
tst r1, r0, lsl #19
bmi L_divide_b20
bmi .L_divide_b20
tst r1, r0, lsl #20
bmi L_divide_b21
bmi .L_divide_b21
tst r1, r0, lsl #21
bmi L_divide_b22
bmi .L_divide_b22
tst r1, r0, lsl #22
bmi L_divide_b23
bmi .L_divide_b23
tst r1, r0, lsl #23
bmi L_divide_b24
bmi .L_divide_b24
tst r1, r0, lsl #24
bmi L_divide_b25
bmi .L_divide_b25
tst r1, r0, lsl #25
bmi L_divide_b26
bmi .L_divide_b26
tst r1, r0, lsl #26
bmi L_divide_b27
bmi .L_divide_b27
tst r1, r0, lsl #27
bmi L_divide_b28
bmi .L_divide_b28
tst r1, r0, lsl #28
bmi L_divide_b29
bmi .L_divide_b29
tst r1, r0, lsl #29
bmi L_divide_b30
bmi .L_divide_b30
tst r1, r0, lsl #30
bmi L_divide_b31
bmi .L_divide_b31
/*
* instead of:
* tst r1, r0, lsl #31
* bmi L_divide_b32
* bmi .L_divide_b32
*/
b L_divide_b32
b .L_divide_b32
L_old_code:
.L_old_code:
cmp r1, r0
bcc L_divide_b0
bcc .L_divide_b0
cmp r1, r0, lsl #1
bcc L_divide_b1
bcc .L_divide_b1
cmp r1, r0, lsl #2
bcc L_divide_b2
bcc .L_divide_b2
cmp r1, r0, lsl #3
bcc L_divide_b3
bcc .L_divide_b3
cmp r1, r0, lsl #4
bcc L_divide_b4
bcc .L_divide_b4
cmp r1, r0, lsl #5
bcc L_divide_b5
bcc .L_divide_b5
cmp r1, r0, lsl #6
bcc L_divide_b6
bcc .L_divide_b6
cmp r1, r0, lsl #7
bcc L_divide_b7
bcc .L_divide_b7
cmp r1, r0, lsl #8
bcc L_divide_b8
bcc .L_divide_b8
cmp r1, r0, lsl #9
bcc L_divide_b9
bcc .L_divide_b9
cmp r1, r0, lsl #10
bcc L_divide_b10
bcc .L_divide_b10
cmp r1, r0, lsl #11
bcc L_divide_b11
bcc .L_divide_b11
cmp r1, r0, lsl #12
bcc L_divide_b12
bcc .L_divide_b12
cmp r1, r0, lsl #13
bcc L_divide_b13
bcc .L_divide_b13
cmp r1, r0, lsl #14
bcc L_divide_b14
bcc .L_divide_b14
cmp r1, r0, lsl #15
bcc L_divide_b15
bcc .L_divide_b15
cmp r1, r0, lsl #16
bcc L_divide_b16
bcc .L_divide_b16
cmp r1, r0, lsl #17
bcc L_divide_b17
bcc .L_divide_b17
cmp r1, r0, lsl #18
bcc L_divide_b18
bcc .L_divide_b18
cmp r1, r0, lsl #19
bcc L_divide_b19
bcc .L_divide_b19
cmp r1, r0, lsl #20
bcc L_divide_b20
bcc .L_divide_b20
cmp r1, r0, lsl #21
bcc L_divide_b21
bcc .L_divide_b21
cmp r1, r0, lsl #22
bcc L_divide_b22
bcc .L_divide_b22
cmp r1, r0, lsl #23
bcc L_divide_b23
bcc .L_divide_b23
cmp r1, r0, lsl #24
bcc L_divide_b24
bcc .L_divide_b24
cmp r1, r0, lsl #25
bcc L_divide_b25
bcc .L_divide_b25
cmp r1, r0, lsl #26
bcc L_divide_b26
bcc .L_divide_b26
cmp r1, r0, lsl #27
bcc L_divide_b27
bcc .L_divide_b27
cmp r1, r0, lsl #28
bcc L_divide_b28
bcc .L_divide_b28
cmp r1, r0, lsl #29
bcc L_divide_b29
bcc .L_divide_b29
cmp r1, r0, lsl #30
bcc L_divide_b30
L_divide_b32:
bcc .L_divide_b30
.L_divide_b32:
cmp r1, r0, lsl #31
subhs r1, r1,r0, lsl #31
addhs r3, r3,r2, lsl #31
L_divide_b31:
.L_divide_b31:
cmp r1, r0, lsl #30
subhs r1, r1,r0, lsl #30
addhs r3, r3,r2, lsl #30
L_divide_b30:
.L_divide_b30:
cmp r1, r0, lsl #29
subhs r1, r1,r0, lsl #29
addhs r3, r3,r2, lsl #29
L_divide_b29:
.L_divide_b29:
cmp r1, r0, lsl #28
subhs r1, r1,r0, lsl #28
addhs r3, r3,r2, lsl #28
L_divide_b28:
.L_divide_b28:
cmp r1, r0, lsl #27
subhs r1, r1,r0, lsl #27
addhs r3, r3,r2, lsl #27
L_divide_b27:
.L_divide_b27:
cmp r1, r0, lsl #26
subhs r1, r1,r0, lsl #26
addhs r3, r3,r2, lsl #26
L_divide_b26:
.L_divide_b26:
cmp r1, r0, lsl #25
subhs r1, r1,r0, lsl #25
addhs r3, r3,r2, lsl #25
L_divide_b25:
.L_divide_b25:
cmp r1, r0, lsl #24
subhs r1, r1,r0, lsl #24
addhs r3, r3,r2, lsl #24
L_divide_b24:
.L_divide_b24:
cmp r1, r0, lsl #23
subhs r1, r1,r0, lsl #23
addhs r3, r3,r2, lsl #23
L_divide_b23:
.L_divide_b23:
cmp r1, r0, lsl #22
subhs r1, r1,r0, lsl #22
addhs r3, r3,r2, lsl #22
L_divide_b22:
.L_divide_b22:
cmp r1, r0, lsl #21
subhs r1, r1,r0, lsl #21
addhs r3, r3,r2, lsl #21
L_divide_b21:
.L_divide_b21:
cmp r1, r0, lsl #20
subhs r1, r1,r0, lsl #20
addhs r3, r3,r2, lsl #20
L_divide_b20:
.L_divide_b20:
cmp r1, r0, lsl #19
subhs r1, r1,r0, lsl #19
addhs r3, r3,r2, lsl #19
L_divide_b19:
.L_divide_b19:
cmp r1, r0, lsl #18
subhs r1, r1,r0, lsl #18
addhs r3, r3,r2, lsl #18
L_divide_b18:
.L_divide_b18:
cmp r1, r0, lsl #17
subhs r1, r1,r0, lsl #17
addhs r3, r3,r2, lsl #17
L_divide_b17:
.L_divide_b17:
cmp r1, r0, lsl #16
subhs r1, r1,r0, lsl #16
addhs r3, r3,r2, lsl #16
L_divide_b16:
.L_divide_b16:
cmp r1, r0, lsl #15
subhs r1, r1,r0, lsl #15
addhs r3, r3,r2, lsl #15
L_divide_b15:
.L_divide_b15:
cmp r1, r0, lsl #14
subhs r1, r1,r0, lsl #14
addhs r3, r3,r2, lsl #14
L_divide_b14:
.L_divide_b14:
cmp r1, r0, lsl #13
subhs r1, r1,r0, lsl #13
addhs r3, r3,r2, lsl #13
L_divide_b13:
.L_divide_b13:
cmp r1, r0, lsl #12
subhs r1, r1,r0, lsl #12
addhs r3, r3,r2, lsl #12
L_divide_b12:
.L_divide_b12:
cmp r1, r0, lsl #11
subhs r1, r1,r0, lsl #11
addhs r3, r3,r2, lsl #11
L_divide_b11:
.L_divide_b11:
cmp r1, r0, lsl #10
subhs r1, r1,r0, lsl #10
addhs r3, r3,r2, lsl #10
L_divide_b10:
.L_divide_b10:
cmp r1, r0, lsl #9
subhs r1, r1,r0, lsl #9
addhs r3, r3,r2, lsl #9
L_divide_b9:
.L_divide_b9:
cmp r1, r0, lsl #8
subhs r1, r1,r0, lsl #8
addhs r3, r3,r2, lsl #8
L_divide_b8:
.L_divide_b8:
cmp r1, r0, lsl #7
subhs r1, r1,r0, lsl #7
addhs r3, r3,r2, lsl #7
L_divide_b7:
.L_divide_b7:
cmp r1, r0, lsl #6
subhs r1, r1,r0, lsl #6
addhs r3, r3,r2, lsl #6
L_divide_b6:
.L_divide_b6:
cmp r1, r0, lsl #5
subhs r1, r1,r0, lsl #5
addhs r3, r3,r2, lsl #5
L_divide_b5:
.L_divide_b5:
cmp r1, r0, lsl #4
subhs r1, r1,r0, lsl #4
addhs r3, r3,r2, lsl #4
L_divide_b4:
.L_divide_b4:
cmp r1, r0, lsl #3
subhs r1, r1,r0, lsl #3
addhs r3, r3,r2, lsl #3
L_divide_b3:
.L_divide_b3:
cmp r1, r0, lsl #2
subhs r1, r1,r0, lsl #2
addhs r3, r3,r2, lsl #2
L_divide_b2:
.L_divide_b2:
cmp r1, r0, lsl #1
subhs r1, r1,r0, lsl #1
addhs r3, r3,r2, lsl #1
L_divide_b1:
.L_divide_b1:
cmp r1, r0
subhs r1, r1, r0
addhs r3, r3, r2
L_divide_b0:
.L_divide_b0:
tst ip, #0x20000000
bne L_udivide_l1
bne .L_udivide_l1
mov r0, r3
cmp ip, #0
rsbmi r1, r1, #0
@ -394,7 +394,7 @@ L_divide_b0:
mov pc, lr
#endif
L_udivide_l1:
.L_udivide_l1:
tst ip, #0x10000000
mov r1, r1, lsl #1
orrne r1, r1, #1

View File

@ -1,4 +1,4 @@
/* $NetBSD: memcpy.S,v 1.2 2001/11/20 00:29:20 chris Exp $ */
/* $NetBSD: memcpy.S,v 1.3 2002/08/15 18:30:36 briggs Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -84,33 +84,33 @@ ENTRY_NP(memmove)
/* save leaf functions having to store this away */
stmdb sp!, {r0, lr} /* memcpy() returns dest addr */
bcc Lmemcpy_backwards
bcc .Lmemcpy_backwards
/* start of forwards copy */
subs r2, r2, #4
blt Lmemcpy_fl4 /* less than 4 bytes */
blt .Lmemcpy_fl4 /* less than 4 bytes */
ands r12, r0, #3
bne Lmemcpy_fdestul /* oh unaligned destination addr */
bne .Lmemcpy_fdestul /* oh unaligned destination addr */
ands r12, r1, #3
bne Lmemcpy_fsrcul /* oh unaligned source addr */
bne .Lmemcpy_fsrcul /* oh unaligned source addr */
Lmemcpy_ft8:
.Lmemcpy_ft8:
/* We have aligned source and destination */
subs r2, r2, #8
blt Lmemcpy_fl12 /* less than 12 bytes (4 from above) */
blt .Lmemcpy_fl12 /* less than 12 bytes (4 from above) */
subs r2, r2, #0x14
blt Lmemcpy_fl32 /* less than 32 bytes (12 from above) */
blt .Lmemcpy_fl32 /* less than 32 bytes (12 from above) */
stmdb sp!, {r4} /* borrow r4 */
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
Lmemcpy_floop32:
.Lmemcpy_floop32:
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
subs r2, r2, #0x20
bge Lmemcpy_floop32
bge .Lmemcpy_floop32
cmn r2, #0x10
ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
@ -118,19 +118,19 @@ Lmemcpy_floop32:
subge r2, r2, #0x10
ldmia sp!, {r4} /* return r4 */
Lmemcpy_fl32:
.Lmemcpy_fl32:
adds r2, r2, #0x14
/* blat 12 bytes at a time */
Lmemcpy_floop12:
.Lmemcpy_floop12:
ldmgeia r1!, {r3, r12, lr}
stmgeia r0!, {r3, r12, lr}
subges r2, r2, #0x0c
bge Lmemcpy_floop12
bge .Lmemcpy_floop12
Lmemcpy_fl12:
.Lmemcpy_fl12:
adds r2, r2, #8
blt Lmemcpy_fl4
blt .Lmemcpy_fl4
subs r2, r2, #4
ldrlt r3, [r1], #4
@ -139,7 +139,7 @@ Lmemcpy_fl12:
stmgeia r0!, {r3, r12}
subge r2, r2, #4
Lmemcpy_fl4:
.Lmemcpy_fl4:
/* less than 4 bytes to go */
adds r2, r2, #4
#ifdef __APCS_26_
@ -162,7 +162,7 @@ Lmemcpy_fl4:
#endif
/* erg - unaligned destination */
Lmemcpy_fdestul:
.Lmemcpy_fdestul:
rsb r12, r12, #4
cmp r12, #2
@ -174,25 +174,25 @@ Lmemcpy_fdestul:
ldrgtb r3, [r1], #1
strgtb r3, [r0], #1
subs r2, r2, r12
blt Lmemcpy_fl4 /* less the 4 bytes */
blt .Lmemcpy_fl4 /* less the 4 bytes */
ands r12, r1, #3
beq Lmemcpy_ft8 /* we have an aligned source */
beq .Lmemcpy_ft8 /* we have an aligned source */
/* erg - unaligned source */
/* This is where it gets nasty ... */
Lmemcpy_fsrcul:
.Lmemcpy_fsrcul:
bic r1, r1, #3
ldr lr, [r1], #4
cmp r12, #2
bgt Lmemcpy_fsrcul3
beq Lmemcpy_fsrcul2
bgt .Lmemcpy_fsrcul3
beq .Lmemcpy_fsrcul2
cmp r2, #0x0c
blt Lmemcpy_fsrcul1loop4
blt .Lmemcpy_fsrcul1loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
Lmemcpy_fsrcul1loop16:
.Lmemcpy_fsrcul1loop16:
mov r3, lr, lsr #8
ldmia r1!, {r4, r5, r12, lr}
orr r3, r3, r4, lsl #24
@ -204,30 +204,30 @@ Lmemcpy_fsrcul1loop16:
orr r12, r12, lr, lsl #24
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge Lmemcpy_fsrcul1loop16
bge .Lmemcpy_fsrcul1loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt Lmemcpy_fsrcul1l4
blt .Lmemcpy_fsrcul1l4
Lmemcpy_fsrcul1loop4:
.Lmemcpy_fsrcul1loop4:
mov r12, lr, lsr #8
ldr lr, [r1], #4
orr r12, r12, lr, lsl #24
str r12, [r0], #4
subs r2, r2, #4
bge Lmemcpy_fsrcul1loop4
bge .Lmemcpy_fsrcul1loop4
Lmemcpy_fsrcul1l4:
.Lmemcpy_fsrcul1l4:
sub r1, r1, #3
b Lmemcpy_fl4
b .Lmemcpy_fl4
Lmemcpy_fsrcul2:
.Lmemcpy_fsrcul2:
cmp r2, #0x0c
blt Lmemcpy_fsrcul2loop4
blt .Lmemcpy_fsrcul2loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
Lmemcpy_fsrcul2loop16:
.Lmemcpy_fsrcul2loop16:
mov r3, lr, lsr #16
ldmia r1!, {r4, r5, r12, lr}
orr r3, r3, r4, lsl #16
@ -239,30 +239,30 @@ Lmemcpy_fsrcul2loop16:
orr r12, r12, lr, lsl #16
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge Lmemcpy_fsrcul2loop16
bge .Lmemcpy_fsrcul2loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt Lmemcpy_fsrcul2l4
blt .Lmemcpy_fsrcul2l4
Lmemcpy_fsrcul2loop4:
.Lmemcpy_fsrcul2loop4:
mov r12, lr, lsr #16
ldr lr, [r1], #4
orr r12, r12, lr, lsl #16
str r12, [r0], #4
subs r2, r2, #4
bge Lmemcpy_fsrcul2loop4
bge .Lmemcpy_fsrcul2loop4
Lmemcpy_fsrcul2l4:
.Lmemcpy_fsrcul2l4:
sub r1, r1, #2
b Lmemcpy_fl4
b .Lmemcpy_fl4
Lmemcpy_fsrcul3:
.Lmemcpy_fsrcul3:
cmp r2, #0x0c
blt Lmemcpy_fsrcul3loop4
blt .Lmemcpy_fsrcul3loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
Lmemcpy_fsrcul3loop16:
.Lmemcpy_fsrcul3loop16:
mov r3, lr, lsr #24
ldmia r1!, {r4, r5, r12, lr}
orr r3, r3, r4, lsl #8
@ -274,52 +274,52 @@ Lmemcpy_fsrcul3loop16:
orr r12, r12, lr, lsl #8
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge Lmemcpy_fsrcul3loop16
bge .Lmemcpy_fsrcul3loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt Lmemcpy_fsrcul3l4
blt .Lmemcpy_fsrcul3l4
Lmemcpy_fsrcul3loop4:
.Lmemcpy_fsrcul3loop4:
mov r12, lr, lsr #24
ldr lr, [r1], #4
orr r12, r12, lr, lsl #8
str r12, [r0], #4
subs r2, r2, #4
bge Lmemcpy_fsrcul3loop4
bge .Lmemcpy_fsrcul3loop4
Lmemcpy_fsrcul3l4:
.Lmemcpy_fsrcul3l4:
sub r1, r1, #1
b Lmemcpy_fl4
b .Lmemcpy_fl4
Lmemcpy_backwards:
.Lmemcpy_backwards:
add r1, r1, r2
add r0, r0, r2
subs r2, r2, #4
blt Lmemcpy_bl4 /* less than 4 bytes */
blt .Lmemcpy_bl4 /* less than 4 bytes */
ands r12, r0, #3
bne Lmemcpy_bdestul /* oh unaligned destination addr */
bne .Lmemcpy_bdestul /* oh unaligned destination addr */
ands r12, r1, #3
bne Lmemcpy_bsrcul /* oh unaligned source addr */
bne .Lmemcpy_bsrcul /* oh unaligned source addr */
Lmemcpy_bt8:
.Lmemcpy_bt8:
/* We have aligned source and destination */
subs r2, r2, #8
blt Lmemcpy_bl12 /* less than 12 bytes (4 from above) */
blt .Lmemcpy_bl12 /* less than 12 bytes (4 from above) */
stmdb sp!, {r4}
subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */
blt Lmemcpy_bl32
blt .Lmemcpy_bl32
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
Lmemcpy_bloop32:
.Lmemcpy_bloop32:
ldmdb r1!, {r3, r4, r12, lr}
stmdb r0!, {r3, r4, r12, lr}
ldmdb r1!, {r3, r4, r12, lr}
stmdb r0!, {r3, r4, r12, lr}
subs r2, r2, #0x20
bge Lmemcpy_bloop32
bge .Lmemcpy_bloop32
Lmemcpy_bl32:
.Lmemcpy_bl32:
cmn r2, #0x10
ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
stmgedb r0!, {r3, r4, r12, lr}
@ -330,9 +330,9 @@ Lmemcpy_bl32:
subge r2, r2, #0x0c
ldmia sp!, {r4}
Lmemcpy_bl12:
.Lmemcpy_bl12:
adds r2, r2, #8
blt Lmemcpy_bl4
blt .Lmemcpy_bl4
subs r2, r2, #4
ldrlt r3, [r1, #-4]!
strlt r3, [r0, #-4]!
@ -340,7 +340,7 @@ Lmemcpy_bl12:
stmgedb r0!, {r3, r12}
subge r2, r2, #4
Lmemcpy_bl4:
.Lmemcpy_bl4:
/* less than 4 bytes to go */
adds r2, r2, #4
#ifdef __APCS_26__
@ -364,7 +364,7 @@ Lmemcpy_bl4:
#endif
/* erg - unaligned destination */
Lmemcpy_bdestul:
.Lmemcpy_bdestul:
cmp r12, #2
/* align destination with byte copies */
@ -375,24 +375,24 @@ Lmemcpy_bdestul:
ldrgtb r3, [r1, #-1]!
strgtb r3, [r0, #-1]!
subs r2, r2, r12
blt Lmemcpy_bl4 /* less than 4 bytes to go */
blt .Lmemcpy_bl4 /* less than 4 bytes to go */
ands r12, r1, #3
beq Lmemcpy_bt8 /* we have an aligned source */
beq .Lmemcpy_bt8 /* we have an aligned source */
/* erg - unaligned source */
/* This is where it gets nasty ... */
Lmemcpy_bsrcul:
.Lmemcpy_bsrcul:
bic r1, r1, #3
ldr r3, [r1, #0]
cmp r12, #2
blt Lmemcpy_bsrcul1
beq Lmemcpy_bsrcul2
blt .Lmemcpy_bsrcul1
beq .Lmemcpy_bsrcul2
cmp r2, #0x0c
blt Lmemcpy_bsrcul3loop4
blt .Lmemcpy_bsrcul3loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
Lmemcpy_bsrcul3loop16:
.Lmemcpy_bsrcul3loop16:
mov lr, r3, lsl #8
ldmdb r1!, {r3-r5, r12}
orr lr, lr, r12, lsr #24
@ -404,30 +404,30 @@ Lmemcpy_bsrcul3loop16:
orr r4, r4, r3, lsr #24
stmdb r0!, {r4, r5, r12, lr}
subs r2, r2, #0x10
bge Lmemcpy_bsrcul3loop16
bge .Lmemcpy_bsrcul3loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt Lmemcpy_bsrcul3l4
blt .Lmemcpy_bsrcul3l4
Lmemcpy_bsrcul3loop4:
.Lmemcpy_bsrcul3loop4:
mov r12, r3, lsl #8
ldr r3, [r1, #-4]!
orr r12, r12, r3, lsr #24
str r12, [r0, #-4]!
subs r2, r2, #4
bge Lmemcpy_bsrcul3loop4
bge .Lmemcpy_bsrcul3loop4
Lmemcpy_bsrcul3l4:
.Lmemcpy_bsrcul3l4:
add r1, r1, #3
b Lmemcpy_bl4
b .Lmemcpy_bl4
Lmemcpy_bsrcul2:
.Lmemcpy_bsrcul2:
cmp r2, #0x0c
blt Lmemcpy_bsrcul2loop4
blt .Lmemcpy_bsrcul2loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
Lmemcpy_bsrcul2loop16:
.Lmemcpy_bsrcul2loop16:
mov lr, r3, lsl #16
ldmdb r1!, {r3-r5, r12}
orr lr, lr, r12, lsr #16
@ -439,30 +439,30 @@ Lmemcpy_bsrcul2loop16:
orr r4, r4, r3, lsr #16
stmdb r0!, {r4, r5, r12, lr}
subs r2, r2, #0x10
bge Lmemcpy_bsrcul2loop16
bge .Lmemcpy_bsrcul2loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt Lmemcpy_bsrcul2l4
blt .Lmemcpy_bsrcul2l4
Lmemcpy_bsrcul2loop4:
.Lmemcpy_bsrcul2loop4:
mov r12, r3, lsl #16
ldr r3, [r1, #-4]!
orr r12, r12, r3, lsr #16
str r12, [r0, #-4]!
subs r2, r2, #4
bge Lmemcpy_bsrcul2loop4
bge .Lmemcpy_bsrcul2loop4
Lmemcpy_bsrcul2l4:
.Lmemcpy_bsrcul2l4:
add r1, r1, #2
b Lmemcpy_bl4
b .Lmemcpy_bl4
Lmemcpy_bsrcul1:
.Lmemcpy_bsrcul1:
cmp r2, #0x0c
blt Lmemcpy_bsrcul1loop4
blt .Lmemcpy_bsrcul1loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
Lmemcpy_bsrcul1loop32:
.Lmemcpy_bsrcul1loop32:
mov lr, r3, lsl #24
ldmdb r1!, {r3-r5, r12}
orr lr, lr, r12, lsr #8
@ -474,20 +474,20 @@ Lmemcpy_bsrcul1loop32:
orr r4, r4, r3, lsr #8
stmdb r0!, {r4, r5, r12, lr}
subs r2, r2, #0x10
bge Lmemcpy_bsrcul1loop32
bge .Lmemcpy_bsrcul1loop32
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt Lmemcpy_bsrcul1l4
blt .Lmemcpy_bsrcul1l4
Lmemcpy_bsrcul1loop4:
.Lmemcpy_bsrcul1loop4:
mov r12, r3, lsl #24
ldr r3, [r1, #-4]!
orr r12, r12, r3, lsr #8
str r12, [r0, #-4]!
subs r2, r2, #4
bge Lmemcpy_bsrcul1loop4
bge .Lmemcpy_bsrcul1loop4
Lmemcpy_bsrcul1l4:
.Lmemcpy_bsrcul1l4:
add r1, r1, #1
b Lmemcpy_bl4
b .Lmemcpy_bl4

View File

@ -1,4 +1,4 @@
/* $NetBSD: memset.S,v 1.1 2000/12/29 20:51:57 bjh21 Exp $ */
/* $NetBSD: memset.S,v 1.2 2002/08/15 18:30:36 briggs Exp $ */
/*
* Copyright (c) 1995 Mark Brinicombe.
@ -51,12 +51,12 @@ ENTRY(memset)
and r1, r1, #0x000000ff /* We write bytes */
cmp r2, #0x00000004 /* Do we have less than 4 bytes */
blt Lmemset_lessthanfour
blt .Lmemset_lessthanfour
/* Ok first we will word align the address */
ands r3, r0, #0x00000003 /* Get the bottom two bits */
beq Lmemset_addraligned /* The address is word aligned */
beq .Lmemset_addraligned /* The address is word aligned */
rsb r3, r3, #0x00000004
sub r2, r2, r3
@ -66,11 +66,11 @@ ENTRY(memset)
strgtb r1, [r0], #0x0001 /* and a third */
cmp r2, #0x00000004
blt Lmemset_lessthanfour
blt .Lmemset_lessthanfour
/* Now we must be word aligned */
Lmemset_addraligned:
.Lmemset_addraligned:
orr r3, r1, r1, lsl #8 /* Repeat the byte into a word */
orr r3, r3, r3, lsl #16
@ -78,7 +78,7 @@ Lmemset_addraligned:
/* We know we have at least 4 bytes ... */
cmp r2, #0x00000020 /* If less than 32 then use words */
blt Lmemset_lessthan32
blt .Lmemset_lessthan32
/* We have at least 32 so lets use quad words */
@ -87,32 +87,32 @@ Lmemset_addraligned:
mov r5, r3
mov r6, r3
Lmemset_loop16:
.Lmemset_loop16:
stmia r0!, {r3-r6} /* Store 16 bytes */
sub r2, r2, #0x00000010 /* Adjust count */
cmp r2, #0x00000010 /* Still got at least 16 bytes ? */
bgt Lmemset_loop16
bgt .Lmemset_loop16
ldmfd sp!, {r4-r6} /* Restore registers */
/* Do we need to set some words as well ? */
cmp r2, #0x00000004
blt Lmemset_lessthanfour
blt .Lmemset_lessthanfour
/* Have either less than 16 or less than 32 depending on route taken */
Lmemset_lessthan32:
.Lmemset_lessthan32:
/* We have at least 4 bytes so copy as words */
Lmemset_loop4:
.Lmemset_loop4:
str r3, [r0], #0x0004
sub r2, r2, #0x0004
cmp r2, #0x00000004
bge Lmemset_loop4
bge .Lmemset_loop4
Lmemset_lessthanfour:
.Lmemset_lessthanfour:
cmp r2, #0x00000000
ldmeqfd sp!, {r0}
#ifdef __APCS_26__