Add a library for ARM systems with VFP which implements the soft-float ABI

but use VFP instructions to do the actual work.  This should give near
hard-float performance without requiring compiler changes.
This commit is contained in:
matt 2013-01-28 17:04:40 +00:00
parent b6b60ee9f3
commit b7de22daaa
4 changed files with 341 additions and 0 deletions

12
lib/libc_vfp/Makefile Normal file
View File

@ -0,0 +1,12 @@
# $NetBSD: Makefile,v 1.1 2013/01/28 17:04:40 matt Exp $
#
LIB= c_vfp
.include <bsd.own.mk>
CPUFLAGS+= -mfpu=vfp
SRCS= vfpsf.S vfpdf.S
.include <bsd.lib.mk>

View File

@ -0,0 +1,5 @@
# $NetBSD: shlib_version,v 1.1 2013/01/28 17:04:40 matt Exp $
# Remember to update distrib/sets/lists/base/shl.* when changing
#
major=0
minor=0

165
lib/libc_vfp/vfpdf.S Normal file
View File

@ -0,0 +1,165 @@
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas of 3am Software Foundry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arm/asm.h>
RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
/*
* This file provides softfloat compatible routines which use VFP instructions
* to do the actual work. This should give near hard-float performance while
* being compatible with soft-float code.
*
* This file implements the double precision floating point routines.
*/
#ifdef __ARMEL__
#define vmov_arg0 vmov d0, r0, r1
#define vmov_arg1 vmov d1, r2, r3
#define vmov_ret vmov r0, r1, d0
#else
#define vmov_arg0 vmov d0, r1, r0
#define vmov_arg1 vmov d1, r3, r2
#define vmov_ret vmov r1, r0, d0
#endif
#define vmov_args vmov_arg0; vmov_arg1
ENTRY(__adddf3)
vmov_args
vadd.f64 d0, d0, d1
vmov_ret
RET
END(__adddf3)
ENTRY(__subdf3)
vmov_args
vsub.f64 d0, d0, d1
vmov_ret
RET
END(__subdf3)
ENTRY(__muldf3)
vmov_args
vmul.f64 d0, d0, d1
vmov_ret
RET
END(__muldf3)
ENTRY(__divdf3)
vmov_args
vdiv.f64 d0, d0, d1
vmov_ret
RET
END(__divdf3)
ENTRY(__negdf2)
vmov_arg0
vneg.f64 d0, d0
vmov_ret
RET
END(__negdf2)
ENTRY(__extendsfdf2)
vmov s0, r0
vcvt.f64.f32 d0, s0
vmov_ret
RET
END(__extendsfdf2)
ENTRY(__fixdfsi)
vmov_arg0
vcvt.s32.f64 s0, d0
vmov r0, s0
RET
END(__fixdfsi)
ENTRY(__fixunsdfsi)
vmov_arg0
vcvt.u32.f64 s0, d0
vmov r0, s0
RET
END(__fixunsdfsi)
ENTRY(__floatsidf)
vmov s0, r0
vcvt.f64.s32 d0, s0
vmov_ret
RET
END(__floatsidf)
ENTRY(__floatunsidf)
vmov s0, r0
vcvt.f64.u32 d0, s0
vmov_ret
RET
END(__floatunsidf)
/* N set if compare <= result */
/* Z set if compare = result */
/* C set if compare (=,>=,UNORD) result */
/* V set if compare UNORD result */
STRONG_ALIAS(__eqdf2, __nedf2)
ENTRY(__nedf2)
vmov_args
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
moveq r0, #0 /* !(a == b) */
movne r0, #1 /* !(a == b) */
RET
END(__nedf2)
STRONG_ALIAS(__gedf2, __ltdf2)
ENTRY(__ltdf2)
vmov_args
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
mvnmi r0, #0 /* -(a < b) */
movpl r0, #0 /* -(a < b) */
RET
END(__ltdf2)
STRONG_ALIAS(__gtdf2, __ledf2)
ENTRY(__ledf2)
vmov_args
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
movgt r0, #1 /* (a > b) */
movle r0, #0 /* (a > b) */
RET
END(__ledf2)
ENTRY(__unorddf2)
vmov_args
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
movvs r0, #1 /* isnan(a) || isnan(b) */
movvc r0, #0 /* isnan(a) || isnan(b) */
RET
END(__unorddf2)

159
lib/libc_vfp/vfpsf.S Normal file
View File

@ -0,0 +1,159 @@
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas of 3am Software Foundry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arm/asm.h>
#include <arm/vfpreg.h>
RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
/*
* This file provides softfloat compatible routines which use VFP instructions
* to do the actual work. This should give near hard-float performance while
* being compatible with soft-float code.
*
* This file implements the single precision floating point routines.
*/
ENTRY(__addsf3)
vmov s0, s1, r0, r1
vadd.f32 s0, s0, s1
vmov r0, s0
RET
END(__addsf3)
ENTRY(__subsf3)
vmov s0, s1, r0, r1
vsub.f32 s0, s0, s1
vmov r0, s0
RET
END(__subsf3)
ENTRY(__mulsf3)
vmov s0, s1, r0, r1
vmul.f32 s0, s0, s1
vmov r0, s0
RET
END(__mulsf3)
ENTRY(__divsf3)
vmov s0, s1, r0, r1
vdiv.f32 s0, s0, s1
vmov r0, s0
RET
END(__divsf3)
ENTRY(__negsf2)
vmov s0, r0
vneg.f32 s0, s0
vmov r0, s0
RET
END(__negsf2)
ENTRY(__truncdfsf2)
#ifdef __ARMEL__
vmov d0, r0, r1
#else
vmov d0, r1, r0
#endif
vcvt.f32.f64 s0, d0
vmov r0, s0
RET
END(__truncdfsf2)
ENTRY(__fixsfsi)
vmov s0, r0
vcvt.s32.f32 s0, s0
vmov r0, s0
RET
END(__fixsfsi)
ENTRY(__fixunssfsi)
vmov s0, r0
vcvt.u32.f32 s0, s0
vmov r0, s0
RET
END(__fixunssfsi)
ENTRY(__floatsisf)
vmov s0, r0
vcvt.f32.s32 s0, s0
vmov r0, s0
RET
END(__floatsisf)
ENTRY(__floatunsisf)
vmov s0, r0
vcvt.f32.u32 s0, s0
vmov r0, s0
RET
END(__floatunsisf)
/* N set if compare <= result */
/* Z set if compare = result */
/* C set if compare (=,>=,UNORD) result */
/* V set if compare UNORD result */
STRONG_ALIAS(__eqsf2, __nesf2)
ENTRY(__nesf2)
vmov s0, s1, r0, r1
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
moveq r0, #0 /* !(a == b) */
movne r0, #1 /* !(a == b) */
RET
END(__nesf2)
STRONG_ALIAS(__gesf2, __ltsf2)
ENTRY(__ltsf2)
vmov s0, s1, r0, r1
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
mvnmi r0, #0 /* -(a < b) */
movpl r0, #0 /* -(a < b) */
RET
END(__ltsf2)
STRONG_ALIAS(__gtsf2, __lesf2)
ENTRY(__lesf2)
vmov s0, s1, r0, r1
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
movgt r0, #1 /* (a > b) */
movle r0, #0 /* (a > b) */
RET
END(__lesf2)
ENTRY(__unordsf2)
vmov s0, s1, r0, r1
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
movvs r0, #1 /* isnan(a) || isnan(b) */
movvc r0, #0 /* isnan(a) || isnan(b) */
RET
END(__unordsf2)