diff --git a/wolfcrypt/src/aes.c b/wolfcrypt/src/aes.c index e36d0f4f3..9a021dbf6 100644 --- a/wolfcrypt/src/aes.c +++ b/wolfcrypt/src/aes.c @@ -709,14 +709,6 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits #define AESNI_ALIGN 16 #endif - #ifdef _MSC_VER - #define XASM_LINK(f) - #elif defined(__APPLE__) - #define XASM_LINK(f) asm("_" f) - #else - #define XASM_LINK(f) asm(f) - #endif /* _MSC_VER */ - static int checkAESNI = 0; static int haveAESNI = 0; static word32 intel_flags = 0; diff --git a/wolfcrypt/src/asm.c b/wolfcrypt/src/asm.c index 5b9d1ffd7..ea2b4e6d7 100644 --- a/wolfcrypt/src/asm.c +++ b/wolfcrypt/src/asm.c @@ -46,15 +46,9 @@ __asm__ __volatile__ ("cpuid":\ "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) :\ "a" (leaf), "c"(sub)); - - #define XASM_LINK(f) asm(f) #else - #include #define cpuid(a,b,c) __cpuidex((int*)a,b,c) - - #define XASM_LINK(f) - #endif /* _MSC_VER */ #define EAX 0 diff --git a/wolfcrypt/src/cpuid.c b/wolfcrypt/src/cpuid.c index 744bb4986..c1924c1e5 100644 --- a/wolfcrypt/src/cpuid.c +++ b/wolfcrypt/src/cpuid.c @@ -43,14 +43,10 @@ __asm__ __volatile__ ("cpuid":\ "=a" ((reg)[0]), "=b" ((reg)[1]), "=c" ((reg)[2]), "=d" ((reg)[3]) :\ "a" (leaf), "c"(sub)); - - #define XASM_LINK(f) asm(f) #else #include #define cpuid(a,b,c) __cpuidex((int*)a,b,c) - - #define XASM_LINK(f) #endif /* _MSC_VER */ #define EAX 0 diff --git a/wolfssl/wolfcrypt/types.h b/wolfssl/wolfcrypt/types.h index 0298654c9..6a11c44b7 100644 --- a/wolfssl/wolfcrypt/types.h +++ b/wolfssl/wolfcrypt/types.h @@ -1180,8 +1180,22 @@ typedef struct w64wrapper { /* invalid device id */ #define INVALID_DEVID (-2) - /* AESNI requires alignment and ARMASM gains some performance from it - * Xilinx RSA operations require alignment */ + #ifdef XASM_LINK + /* keep user-supplied definition */ + #elif defined(_MSC_VER) + #define XASM_LINK(f) + #elif defined(__APPLE__) + #define XASM_LINK(f) asm("_" f) + #elif defined(__GNUC__) + /* use alternate keyword for compatibility with -std=c99 */ + #define XASM_LINK(f) __asm__(f) + #else + #define XASM_LINK(f) asm(f) + #endif + + /* AESNI requires alignment and ARMASM gains some performance from it. + * Xilinx RSA operations require alignment. + */ #if defined(WOLFSSL_AESNI) || defined(WOLFSSL_ARMASM) || \ defined(USE_INTEL_SPEEDUP) || defined(WOLFSSL_AFALG_XILINX) || \ defined(WOLFSSL_XILINX)