diff --git a/ctaocrypt/src/aes_asm.asm b/ctaocrypt/src/aes_asm.asm index 746364770..414d658db 100755 --- a/ctaocrypt/src/aes_asm.asm +++ b/ctaocrypt/src/aes_asm.asm @@ -133,14 +133,14 @@ AES_CBC_decrypt PROC ; on microsoft xmm6-xmm15 are non volaitle, let's save on stack and restore at end sub rsp,8+8*64 ; 8 = align stack , 8 xmm6-12,15 8 bytes each - movdqa xmm6, [rsp+0] - movdqa xmm7, [rsp+16] - movdqa xmm8, [rsp+32] - movdqa xmm9, [rsp+48] - movdqa xmm10, [rsp+64] - movdqa xmm11, [rsp+80] - movdqa xmm12, [rsp+96] - movdqa xmm15, [rsp+112] + movdqa [rsp+0], xmm6 + movdqa [rsp+16], xmm7 + movdqa [rsp+32], xmm8 + movdqa [rsp+48], xmm9 + movdqa [rsp+64], xmm10 + movdqa [rsp+80], xmm11 + movdqa [rsp+96], xmm12 + movdqa [rsp+112], xmm15 mov r10,rcx shr rcx,4 @@ -304,14 +304,14 @@ DEND_4: mov rdi,rax mov rsi,r11 ; restore non volatile xmms from stack - movdqa [rsp+0], xmm6 - movdqa [rsp+16], xmm7 - movdqa [rsp+32], xmm8 - movdqa [rsp+48], xmm9 - movdqa [rsp+64], xmm10 - movdqa [rsp+80], xmm11 - movdqa [rsp+96], xmm12 - movdqa [rsp+112], xmm15 + movdqa xmm6, [rsp+0] + movdqa xmm7, [rsp+16] + movdqa xmm8, [rsp+32] + movdqa xmm9, [rsp+48] + movdqa xmm10, [rsp+64] + movdqa xmm11, [rsp+80] + movdqa xmm12, [rsp+96] + movdqa xmm15, [rsp+112] add rsp,8+8*64 ; 8 = align stack , 8 xmm6-12,15 8 bytes each ret AES_CBC_decrypt ENDP @@ -344,10 +344,10 @@ AES_ECB_encrypt PROC ; on microsoft xmm6-xmm15 are non volaitle, let's save on stack and restore at end sub rsp,8+4*64 ; 8 = align stack , 4 xmm9-12, 8 bytes each - movdqa xmm9, [rsp+0] - movdqa xmm10, [rsp+16] - movdqa xmm11, [rsp+32] - movdqa xmm12, [rsp+48] + movdqa [rsp+0], xmm9 + movdqa [rsp+16], xmm10 + movdqa [rsp+32], xmm11 + movdqa [rsp+48], xmm12 mov r10,rdx @@ -496,10 +496,10 @@ EECB_END_4: mov rdi,rax mov rsi,r11 ; restore non volatile xmms from stack - movdqa [rsp+0], xmm9 - movdqa [rsp+16], xmm10 - movdqa [rsp+32], xmm11 - movdqa [rsp+48], xmm12 + movdqa xmm9, [rsp+0] + movdqa xmm9, [rsp+16] + movdqa xmm9, [rsp+32] + movdqa xmm9, [rsp+48] add rsp,8+4*64 ; 8 = align stack , 4 xmm9-12 8 bytes each ret AES_ECB_encrypt ENDP @@ -532,10 +532,10 @@ AES_ECB_decrypt PROC ; on microsoft xmm6-xmm15 are non volaitle, let's save on stack and restore at end sub rsp,8+4*64 ; 8 = align stack , 4 xmm9-12, 8 bytes each - movdqa xmm9, [rsp+0] - movdqa xmm10, [rsp+16] - movdqa xmm11, [rsp+32] - movdqa xmm12, [rsp+48] + movdqa [rsp+0], xmm9 + movdqa [rsp+16], xmm10 + movdqa [rsp+32], xmm11 + movdqa [rsp+48], xmm12 mov r10,rdx shr rdx,4 @@ -683,10 +683,10 @@ DECB_END_4: mov rdi,rax mov rsi,r11 ; restore non volatile xmms from stack - movdqa [rsp+0], xmm9 - movdqa [rsp+16], xmm10 - movdqa [rsp+32], xmm11 - movdqa [rsp+48], xmm12 + movdqa xmm9, [rsp+0] + movdqa xmm10, [rsp+16] + movdqa xmm11, [rsp+32] + movdqa xmm12, [rsp+48] add rsp,8+4*64 ; 8 = align stack , 4 xmm9-12 8 bytes each ret AES_ECB_decrypt ENDP @@ -791,7 +791,7 @@ AES_192_Key_Expansion PROC ; on microsoft xmm6-xmm15 are non volaitle, let's save on stack and restore at end sub rsp,8+1*64 ; 8 = align stack , 1 xmm6, 8 bytes each - movdqa xmm6, [rsp+0] + movdqa [rsp+0], xmm6 movdqu xmm1,[rdi] movdqu xmm3,16[rdi] @@ -853,7 +853,7 @@ AES_192_Key_Expansion PROC mov rdi,rax mov rsi,r11 ; restore non volatile xmms from stack - movdqa [rsp+0], xmm6 + movdqa xmm6, [rsp+0] add rsp,8+1*64 ; 8 = align stack , 1 xmm6 8 bytes each ret