this moved to common/lib/libc/hash

This commit is contained in:
christos 2006-10-27 21:24:35 +00:00
parent 77c9e41904
commit a3fadcee99
2 changed files with 0 additions and 1343 deletions

View File

@ -1,375 +0,0 @@
/* $NetBSD: rmd160.c,v 1.4 2006/02/09 22:03:15 dogcow Exp $ */
/* $KAME: rmd160.c,v 1.2 2003/07/25 09:37:55 itojun Exp $ */
/* $OpenBSD: rmd160.c,v 1.3 2001/09/26 21:40:13 markus Exp $ */
/*
* Copyright (c) 2001 Markus Friedl. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Preneel, Bosselaers, Dobbertin, "The Cryptographic Hash Function RIPEMD-160",
* RSA Laboratories, CryptoBytes, Volume 3, Number 2, Autumn 1997,
* ftp://ftp.rsasecurity.com/pub/cryptobytes/crypto3n2.pdf
*/
#include "namespace.h"
#include <sys/cdefs.h>
#include <sys/param.h>
#include <machine/endian.h>
#include <string.h>
#include <crypto/rmd160.h>
#define PUT_64BIT_LE(cp, value) do { \
(cp)[7] = (u_char)((value) >> 56); \
(cp)[6] = (u_char)((value) >> 48); \
(cp)[5] = (u_char)((value) >> 40); \
(cp)[4] = (u_char)((value) >> 32); \
(cp)[3] = (u_char)((value) >> 24); \
(cp)[2] = (u_char)((value) >> 16); \
(cp)[1] = (u_char)((value) >> 8); \
(cp)[0] = (u_char)((value)); } while (/*CONSTCOND*/0)
#define PUT_32BIT_LE(cp, value) do { \
(cp)[3] = (value) >> 24; \
(cp)[2] = (value) >> 16; \
(cp)[1] = (value) >> 8; \
(cp)[0] = (value); } while (/*CONSTCOND*/0)
#define H0 0x67452301U
#define H1 0xEFCDAB89U
#define H2 0x98BADCFEU
#define H3 0x10325476U
#define H4 0xC3D2E1F0U
#define K0 0x00000000U
#define K1 0x5A827999U
#define K2 0x6ED9EBA1U
#define K3 0x8F1BBCDCU
#define K4 0xA953FD4EU
#define KK0 0x50A28BE6U
#define KK1 0x5C4DD124U
#define KK2 0x6D703EF3U
#define KK3 0x7A6D76E9U
#define KK4 0x00000000U
/* rotate x left n bits. */
#define ROL(n, x) (((x) << (n)) | ((x) >> (32-(n))))
#define F0(x, y, z) ((x) ^ (y) ^ (z))
#define F1(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define F2(x, y, z) (((x) | (~y)) ^ (z))
#define F3(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define F4(x, y, z) ((x) ^ ((y) | (~z)))
#define R(a, b, c, d, e, Fj, Kj, sj, rj) \
do { \
a = ROL(sj, a + Fj(b,c,d) + X(rj) + Kj) + e; \
c = ROL(10, c); \
} while(/*CONSTCOND*/0)
#define X(i) x[i]
static const u_char PADDING[64] = {
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
#ifdef __weak_alias
__weak_alias(RMD160Init,_RMD160Init)
__weak_alias(RMD160Update,_RMD160Update)
__weak_alias(RMD160Final,_RMD160Final)
__weak_alias(RMD160Transform,_RMD160Transform)
#endif
void
RMD160Init(RMD160_CTX *ctx)
{
ctx->count = 0;
ctx->state[0] = H0;
ctx->state[1] = H1;
ctx->state[2] = H2;
ctx->state[3] = H3;
ctx->state[4] = H4;
}
void
RMD160Update(RMD160_CTX *ctx, const u_char *input, u_int32_t len)
{
u_int32_t have, off, need;
have = (u_int32_t)((ctx->count/8) % 64);
need = 64 - have;
ctx->count += 8 * len;
off = 0;
if (len >= need) {
if (have) {
memcpy(ctx->buffer + have, input, (size_t)need);
RMD160Transform(ctx->state, ctx->buffer);
off = need;
have = 0;
}
/* now the buffer is empty */
while (off + 64 <= len) {
RMD160Transform(ctx->state, input+off);
off += 64;
}
}
if (off < len)
memcpy(ctx->buffer + have, input+off, (size_t)len-off);
}
void
RMD160Final(u_char digest[20], RMD160_CTX *ctx)
{
int i;
u_char size[8];
u_int32_t padlen;
PUT_64BIT_LE(size, ctx->count);
/*
* pad to 64 byte blocks, at least one byte from PADDING plus 8 bytes
* for the size
*/
padlen = (u_int32_t)(64 - ((ctx->count/8) % 64));
if (padlen < 1 + 8)
padlen += 64;
RMD160Update(ctx, PADDING, padlen - 8); /* padlen - 8 <= 64 */
RMD160Update(ctx, size, 8);
if (digest != NULL)
for (i = 0; i < 5; i++)
PUT_32BIT_LE(digest + i*4, ctx->state[i]);
memset(ctx, 0, sizeof (*ctx));
}
void
RMD160Transform(u_int32_t state[5], const u_char block[64])
{
u_int32_t a, b, c, d, e, aa, bb, cc, dd, ee, t, x[16];
#if BYTE_ORDER == LITTLE_ENDIAN
memcpy(x, block, (size_t)64);
#else
int i;
for (i = 0; i < 16; i++)
x[i] = le32toh(*(const u_int32_t*)(block+i*4));
#endif
a = state[0];
b = state[1];
c = state[2];
d = state[3];
e = state[4];
/* Round 1 */
R(a, b, c, d, e, F0, K0, 11, 0);
R(e, a, b, c, d, F0, K0, 14, 1);
R(d, e, a, b, c, F0, K0, 15, 2);
R(c, d, e, a, b, F0, K0, 12, 3);
R(b, c, d, e, a, F0, K0, 5, 4);
R(a, b, c, d, e, F0, K0, 8, 5);
R(e, a, b, c, d, F0, K0, 7, 6);
R(d, e, a, b, c, F0, K0, 9, 7);
R(c, d, e, a, b, F0, K0, 11, 8);
R(b, c, d, e, a, F0, K0, 13, 9);
R(a, b, c, d, e, F0, K0, 14, 10);
R(e, a, b, c, d, F0, K0, 15, 11);
R(d, e, a, b, c, F0, K0, 6, 12);
R(c, d, e, a, b, F0, K0, 7, 13);
R(b, c, d, e, a, F0, K0, 9, 14);
R(a, b, c, d, e, F0, K0, 8, 15); /* #15 */
/* Round 2 */
R(e, a, b, c, d, F1, K1, 7, 7);
R(d, e, a, b, c, F1, K1, 6, 4);
R(c, d, e, a, b, F1, K1, 8, 13);
R(b, c, d, e, a, F1, K1, 13, 1);
R(a, b, c, d, e, F1, K1, 11, 10);
R(e, a, b, c, d, F1, K1, 9, 6);
R(d, e, a, b, c, F1, K1, 7, 15);
R(c, d, e, a, b, F1, K1, 15, 3);
R(b, c, d, e, a, F1, K1, 7, 12);
R(a, b, c, d, e, F1, K1, 12, 0);
R(e, a, b, c, d, F1, K1, 15, 9);
R(d, e, a, b, c, F1, K1, 9, 5);
R(c, d, e, a, b, F1, K1, 11, 2);
R(b, c, d, e, a, F1, K1, 7, 14);
R(a, b, c, d, e, F1, K1, 13, 11);
R(e, a, b, c, d, F1, K1, 12, 8); /* #31 */
/* Round 3 */
R(d, e, a, b, c, F2, K2, 11, 3);
R(c, d, e, a, b, F2, K2, 13, 10);
R(b, c, d, e, a, F2, K2, 6, 14);
R(a, b, c, d, e, F2, K2, 7, 4);
R(e, a, b, c, d, F2, K2, 14, 9);
R(d, e, a, b, c, F2, K2, 9, 15);
R(c, d, e, a, b, F2, K2, 13, 8);
R(b, c, d, e, a, F2, K2, 15, 1);
R(a, b, c, d, e, F2, K2, 14, 2);
R(e, a, b, c, d, F2, K2, 8, 7);
R(d, e, a, b, c, F2, K2, 13, 0);
R(c, d, e, a, b, F2, K2, 6, 6);
R(b, c, d, e, a, F2, K2, 5, 13);
R(a, b, c, d, e, F2, K2, 12, 11);
R(e, a, b, c, d, F2, K2, 7, 5);
R(d, e, a, b, c, F2, K2, 5, 12); /* #47 */
/* Round 4 */
R(c, d, e, a, b, F3, K3, 11, 1);
R(b, c, d, e, a, F3, K3, 12, 9);
R(a, b, c, d, e, F3, K3, 14, 11);
R(e, a, b, c, d, F3, K3, 15, 10);
R(d, e, a, b, c, F3, K3, 14, 0);
R(c, d, e, a, b, F3, K3, 15, 8);
R(b, c, d, e, a, F3, K3, 9, 12);
R(a, b, c, d, e, F3, K3, 8, 4);
R(e, a, b, c, d, F3, K3, 9, 13);
R(d, e, a, b, c, F3, K3, 14, 3);
R(c, d, e, a, b, F3, K3, 5, 7);
R(b, c, d, e, a, F3, K3, 6, 15);
R(a, b, c, d, e, F3, K3, 8, 14);
R(e, a, b, c, d, F3, K3, 6, 5);
R(d, e, a, b, c, F3, K3, 5, 6);
R(c, d, e, a, b, F3, K3, 12, 2); /* #63 */
/* Round 5 */
R(b, c, d, e, a, F4, K4, 9, 4);
R(a, b, c, d, e, F4, K4, 15, 0);
R(e, a, b, c, d, F4, K4, 5, 5);
R(d, e, a, b, c, F4, K4, 11, 9);
R(c, d, e, a, b, F4, K4, 6, 7);
R(b, c, d, e, a, F4, K4, 8, 12);
R(a, b, c, d, e, F4, K4, 13, 2);
R(e, a, b, c, d, F4, K4, 12, 10);
R(d, e, a, b, c, F4, K4, 5, 14);
R(c, d, e, a, b, F4, K4, 12, 1);
R(b, c, d, e, a, F4, K4, 13, 3);
R(a, b, c, d, e, F4, K4, 14, 8);
R(e, a, b, c, d, F4, K4, 11, 11);
R(d, e, a, b, c, F4, K4, 8, 6);
R(c, d, e, a, b, F4, K4, 5, 15);
R(b, c, d, e, a, F4, K4, 6, 13); /* #79 */
aa = a ; bb = b; cc = c; dd = d; ee = e;
a = state[0];
b = state[1];
c = state[2];
d = state[3];
e = state[4];
/* Parallel round 1 */
R(a, b, c, d, e, F4, KK0, 8, 5);
R(e, a, b, c, d, F4, KK0, 9, 14);
R(d, e, a, b, c, F4, KK0, 9, 7);
R(c, d, e, a, b, F4, KK0, 11, 0);
R(b, c, d, e, a, F4, KK0, 13, 9);
R(a, b, c, d, e, F4, KK0, 15, 2);
R(e, a, b, c, d, F4, KK0, 15, 11);
R(d, e, a, b, c, F4, KK0, 5, 4);
R(c, d, e, a, b, F4, KK0, 7, 13);
R(b, c, d, e, a, F4, KK0, 7, 6);
R(a, b, c, d, e, F4, KK0, 8, 15);
R(e, a, b, c, d, F4, KK0, 11, 8);
R(d, e, a, b, c, F4, KK0, 14, 1);
R(c, d, e, a, b, F4, KK0, 14, 10);
R(b, c, d, e, a, F4, KK0, 12, 3);
R(a, b, c, d, e, F4, KK0, 6, 12); /* #15 */
/* Parallel round 2 */
R(e, a, b, c, d, F3, KK1, 9, 6);
R(d, e, a, b, c, F3, KK1, 13, 11);
R(c, d, e, a, b, F3, KK1, 15, 3);
R(b, c, d, e, a, F3, KK1, 7, 7);
R(a, b, c, d, e, F3, KK1, 12, 0);
R(e, a, b, c, d, F3, KK1, 8, 13);
R(d, e, a, b, c, F3, KK1, 9, 5);
R(c, d, e, a, b, F3, KK1, 11, 10);
R(b, c, d, e, a, F3, KK1, 7, 14);
R(a, b, c, d, e, F3, KK1, 7, 15);
R(e, a, b, c, d, F3, KK1, 12, 8);
R(d, e, a, b, c, F3, KK1, 7, 12);
R(c, d, e, a, b, F3, KK1, 6, 4);
R(b, c, d, e, a, F3, KK1, 15, 9);
R(a, b, c, d, e, F3, KK1, 13, 1);
R(e, a, b, c, d, F3, KK1, 11, 2); /* #31 */
/* Parallel round 3 */
R(d, e, a, b, c, F2, KK2, 9, 15);
R(c, d, e, a, b, F2, KK2, 7, 5);
R(b, c, d, e, a, F2, KK2, 15, 1);
R(a, b, c, d, e, F2, KK2, 11, 3);
R(e, a, b, c, d, F2, KK2, 8, 7);
R(d, e, a, b, c, F2, KK2, 6, 14);
R(c, d, e, a, b, F2, KK2, 6, 6);
R(b, c, d, e, a, F2, KK2, 14, 9);
R(a, b, c, d, e, F2, KK2, 12, 11);
R(e, a, b, c, d, F2, KK2, 13, 8);
R(d, e, a, b, c, F2, KK2, 5, 12);
R(c, d, e, a, b, F2, KK2, 14, 2);
R(b, c, d, e, a, F2, KK2, 13, 10);
R(a, b, c, d, e, F2, KK2, 13, 0);
R(e, a, b, c, d, F2, KK2, 7, 4);
R(d, e, a, b, c, F2, KK2, 5, 13); /* #47 */
/* Parallel round 4 */
R(c, d, e, a, b, F1, KK3, 15, 8);
R(b, c, d, e, a, F1, KK3, 5, 6);
R(a, b, c, d, e, F1, KK3, 8, 4);
R(e, a, b, c, d, F1, KK3, 11, 1);
R(d, e, a, b, c, F1, KK3, 14, 3);
R(c, d, e, a, b, F1, KK3, 14, 11);
R(b, c, d, e, a, F1, KK3, 6, 15);
R(a, b, c, d, e, F1, KK3, 14, 0);
R(e, a, b, c, d, F1, KK3, 6, 5);
R(d, e, a, b, c, F1, KK3, 9, 12);
R(c, d, e, a, b, F1, KK3, 12, 2);
R(b, c, d, e, a, F1, KK3, 9, 13);
R(a, b, c, d, e, F1, KK3, 12, 9);
R(e, a, b, c, d, F1, KK3, 5, 7);
R(d, e, a, b, c, F1, KK3, 15, 10);
R(c, d, e, a, b, F1, KK3, 8, 14); /* #63 */
/* Parallel round 5 */
R(b, c, d, e, a, F0, KK4, 8, 12);
R(a, b, c, d, e, F0, KK4, 5, 15);
R(e, a, b, c, d, F0, KK4, 12, 10);
R(d, e, a, b, c, F0, KK4, 9, 4);
R(c, d, e, a, b, F0, KK4, 12, 1);
R(b, c, d, e, a, F0, KK4, 5, 5);
R(a, b, c, d, e, F0, KK4, 14, 8);
R(e, a, b, c, d, F0, KK4, 6, 7);
R(d, e, a, b, c, F0, KK4, 8, 6);
R(c, d, e, a, b, F0, KK4, 13, 2);
R(b, c, d, e, a, F0, KK4, 6, 13);
R(a, b, c, d, e, F0, KK4, 5, 14);
R(e, a, b, c, d, F0, KK4, 15, 0);
R(d, e, a, b, c, F0, KK4, 13, 3);
R(c, d, e, a, b, F0, KK4, 11, 9);
R(b, c, d, e, a, F0, KK4, 11, 11); /* #79 */
t = state[1] + cc + d;
state[1] = state[2] + dd + e;
state[2] = state[3] + ee + a;
state[3] = state[4] + aa + b;
state[4] = state[0] + bb + c;
state[0] = t;
}

View File

@ -1,968 +0,0 @@
/* $NetBSD: sha2.c,v 1.6 2006/10/15 16:11:04 christos Exp $ */
/* $KAME: sha2.c,v 1.9 2003/07/20 00:28:38 itojun Exp $ */
/*
* sha2.c
*
* Version 1.0.0beta1
*
* Written by Aaron D. Gifford <me@aarongifford.com>
*
* Copyright 2000 Aaron D. Gifford. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR(S) OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "namespace.h"
#include <sys/types.h>
#include <crypto/sha2.h>
#include <assert.h>
#include <string.h>
/*
* ASSERT NOTE:
* Some sanity checking code is included using assert(). On my FreeBSD
* system, this additional code can be removed by compiling with NDEBUG
* defined. Check your own systems manpage on assert() to see how to
* compile WITHOUT the sanity checking code on your system.
*
* UNROLLED TRANSFORM LOOP NOTE:
* You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform
* loop version for the hash transform rounds (defined using macros
* later in this file). Either define on the command line, for example:
*
* cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c
*
* or define below:
*
* #define SHA2_UNROLL_TRANSFORM
*
*/
#if defined(__bsdi__) || defined(__FreeBSD__)
#define assert(x)
#endif
/*** SHA-256/384/512 Machine Architecture Definitions *****************/
/*
* BYTE_ORDER NOTE:
*
* Please make sure that your system defines BYTE_ORDER. If your
* architecture is little-endian, make sure it also defines
* LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
* equivilent.
*
* If your system does not define the above, then you can do so by
* hand like this:
*
* #define LITTLE_ENDIAN 1234
* #define BIG_ENDIAN 4321
*
* And for little-endian machines, add:
*
* #define BYTE_ORDER LITTLE_ENDIAN
*
* Or for big-endian machines:
*
* #define BYTE_ORDER BIG_ENDIAN
*
* The FreeBSD machine this was written on defines BYTE_ORDER
* appropriately by including <sys/types.h> (which in turn includes
* <machine/endian.h> where the appropriate definitions are actually
* made).
*/
#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN)
#error Define BYTE_ORDER to be equal to either LITTLE_ENDIAN or BIG_ENDIAN
#endif
/*
* Define the followingsha2_* types to types of the correct length on
* the native archtecture. Most BSD systems and Linux define u_intXX_t
* types. Machines with very recent ANSI C headers, can use the
* uintXX_t definintions from inttypes.h by defining SHA2_USE_INTTYPES_H
* during compile or in the sha.h header file.
*
* Machines that support neither u_intXX_t nor inttypes.h's uintXX_t
* will need to define these three typedefs below (and the appropriate
* ones in sha.h too) by hand according to their system architecture.
*
* Thank you, Jun-ichiro itojun Hagino, for suggesting using u_intXX_t
* types and pointing out recent ANSI C support for uintXX_t in inttypes.h.
*/
#if 0 /*def SHA2_USE_INTTYPES_H*/
typedef uint8_t sha2_byte; /* Exactly 1 byte */
typedef uint32_t sha2_word32; /* Exactly 4 bytes */
typedef uint64_t sha2_word64; /* Exactly 8 bytes */
#else /* SHA2_USE_INTTYPES_H */
typedef u_int8_t sha2_byte; /* Exactly 1 byte */
typedef u_int32_t sha2_word32; /* Exactly 4 bytes */
typedef u_int64_t sha2_word64; /* Exactly 8 bytes */
#endif /* SHA2_USE_INTTYPES_H */
/*** SHA-256/384/512 Various Length Definitions ***********************/
/* NOTE: Most of these are in sha2.h */
#define SHA256_SHORT_BLOCK_LENGTH (SHA256_BLOCK_LENGTH - 8)
#define SHA384_SHORT_BLOCK_LENGTH (SHA384_BLOCK_LENGTH - 16)
#define SHA512_SHORT_BLOCK_LENGTH (SHA512_BLOCK_LENGTH - 16)
/*** ENDIAN REVERSAL MACROS *******************************************/
#if BYTE_ORDER == LITTLE_ENDIAN
#define REVERSE32(w,x) { \
sha2_word32 tmp = (w); \
tmp = (tmp >> 16) | (tmp << 16); \
(x) = (sha2_word32)(((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8)); \
}
#define REVERSE64(w,x) { \
sha2_word64 tmp = (w); \
tmp = (tmp >> 32) | (tmp << 32); \
tmp = (sha2_word64)(((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \
((tmp & 0x00ff00ff00ff00ffULL) << 8)); \
(x) = (sha2_word64)(((tmp & 0xffff0000ffff0000ULL) >> 16) | \
((tmp & 0x0000ffff0000ffffULL) << 16)); \
}
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
/*
* Macro for incrementally adding the unsigned 64-bit integer n to the
* unsigned 128-bit integer (represented using a two-element array of
* 64-bit words):
*/
#define ADDINC128(w,n) { \
(w)[0] += (sha2_word64)(n); \
if ((w)[0] < (n)) { \
(w)[1]++; \
} \
}
/*** THE SIX LOGICAL FUNCTIONS ****************************************/
/*
* Bit shifting and rotation (used by the six SHA-XYZ logical functions:
*
* NOTE: The naming of R and S appears backwards here (R is a SHIFT and
* S is a ROTATION) because the SHA-256/384/512 description document
* (see http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf) uses this
* same "backwards" definition.
*/
/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
#define R(b,x) ((x) >> (b))
/* 32-bit Rotate-right (used in SHA-256): */
#define S32(b,x) (((x) >> (b)) | ((x) << (32 - (b))))
/* 64-bit Rotate-right (used in SHA-384 and SHA-512): */
#define S64(b,x) (((x) >> (b)) | ((x) << (64 - (b))))
/* Two of six logical functions used in SHA-256, SHA-384, and SHA-512: */
#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
/* Four of six logical functions used in SHA-256: */
#define Sigma0_256(x) (S32(2, (x)) ^ S32(13, (x)) ^ S32(22, (x)))
#define Sigma1_256(x) (S32(6, (x)) ^ S32(11, (x)) ^ S32(25, (x)))
#define sigma0_256(x) (S32(7, (x)) ^ S32(18, (x)) ^ R(3 , (x)))
#define sigma1_256(x) (S32(17, (x)) ^ S32(19, (x)) ^ R(10, (x)))
/* Four of six logical functions used in SHA-384 and SHA-512: */
#define Sigma0_512(x) (S64(28, (x)) ^ S64(34, (x)) ^ S64(39, (x)))
#define Sigma1_512(x) (S64(14, (x)) ^ S64(18, (x)) ^ S64(41, (x)))
#define sigma0_512(x) (S64( 1, (x)) ^ S64( 8, (x)) ^ R( 7, (x)))
#define sigma1_512(x) (S64(19, (x)) ^ S64(61, (x)) ^ R( 6, (x)))
/*** INTERNAL FUNCTION PROTOTYPES *************************************/
/* NOTE: These should not be accessed directly from outside this
* library -- they are intended for private internal visibility/use
* only.
*/
void SHA512_Last(SHA512_CTX*);
void SHA256_Transform(SHA256_CTX*, const sha2_word32*);
void SHA384_Transform(SHA384_CTX*, const sha2_word64*);
void SHA512_Transform(SHA512_CTX*, const sha2_word64*);
/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/
/* Hash constant words K for SHA-256: */
static const sha2_word32 K256[64] = {
0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
};
/* Initial hash value H for SHA-256: */
static const sha2_word32 sha256_initial_hash_value[8] = {
0x6a09e667UL,
0xbb67ae85UL,
0x3c6ef372UL,
0xa54ff53aUL,
0x510e527fUL,
0x9b05688cUL,
0x1f83d9abUL,
0x5be0cd19UL
};
/* Hash constant words K for SHA-384 and SHA-512: */
static const sha2_word64 K512[80] = {
0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
};
/* Initial hash value H for SHA-384 */
static const sha2_word64 sha384_initial_hash_value[8] = {
0xcbbb9d5dc1059ed8ULL,
0x629a292a367cd507ULL,
0x9159015a3070dd17ULL,
0x152fecd8f70e5939ULL,
0x67332667ffc00b31ULL,
0x8eb44a8768581511ULL,
0xdb0c2e0d64f98fa7ULL,
0x47b5481dbefa4fa4ULL
};
/* Initial hash value H for SHA-512 */
static const sha2_word64 sha512_initial_hash_value[8] = {
0x6a09e667f3bcc908ULL,
0xbb67ae8584caa73bULL,
0x3c6ef372fe94f82bULL,
0xa54ff53a5f1d36f1ULL,
0x510e527fade682d1ULL,
0x9b05688c2b3e6c1fULL,
0x1f83d9abfb41bd6bULL,
0x5be0cd19137e2179ULL
};
#ifdef __weak_alias
__weak_alias(SHA256_Init,_SHA256_Init)
__weak_alias(SHA256_Update,_SHA256_Update)
__weak_alias(SHA256_Final,_SHA256_Final)
__weak_alias(SHA256_Transform,_SHA256_Transform)
__weak_alias(SHA384_Init,_SHA384_Init)
__weak_alias(SHA384_Update,_SHA384_Update)
__weak_alias(SHA384_Final,_SHA384_Final)
__weak_alias(SHA384_Transform,_SHA384_Transform)
__weak_alias(SHA512_Init,_SHA512_Init)
__weak_alias(SHA512_Update,_SHA512_Update)
__weak_alias(SHA512_Final,_SHA512_Final)
__weak_alias(SHA512_Transform,_SHA512_Transform)
#endif
/*** SHA-256: *********************************************************/
void SHA256_Init(SHA256_CTX* context) {
if (context == (SHA256_CTX*)0) {
return;
}
memcpy(context->state, sha256_initial_hash_value, (size_t)(SHA256_DIGEST_LENGTH));
memset(context->buffer, 0, (size_t)(SHA256_BLOCK_LENGTH));
context->bitcount = 0;
}
#ifdef SHA2_UNROLL_TRANSFORM
/* Unrolled SHA-256 round macros: */
#if BYTE_ORDER == LITTLE_ENDIAN
#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
REVERSE32(*data++, W256[j]); \
T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
K256[j] + W256[j]; \
(d) += T1; \
(h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
j++
#else /* BYTE_ORDER == LITTLE_ENDIAN */
#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
K256[j] + (W256[j] = *data++); \
(d) += T1; \
(h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
j++
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
#define ROUND256(a,b,c,d,e,f,g,h) \
s0 = W256[(j+1)&0x0f]; \
s0 = sigma0_256(s0); \
s1 = W256[(j+14)&0x0f]; \
s1 = sigma1_256(s1); \
T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
(W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
(d) += T1; \
(h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
j++
void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
sha2_word32 T1, *W256;
int j;
W256 = (sha2_word32*)context->buffer;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
j = 0;
do {
/* Rounds 0 to 15 (unrolled): */
ROUND256_0_TO_15(a,b,c,d,e,f,g,h);
ROUND256_0_TO_15(h,a,b,c,d,e,f,g);
ROUND256_0_TO_15(g,h,a,b,c,d,e,f);
ROUND256_0_TO_15(f,g,h,a,b,c,d,e);
ROUND256_0_TO_15(e,f,g,h,a,b,c,d);
ROUND256_0_TO_15(d,e,f,g,h,a,b,c);
ROUND256_0_TO_15(c,d,e,f,g,h,a,b);
ROUND256_0_TO_15(b,c,d,e,f,g,h,a);
} while (j < 16);
/* Now for the remaining rounds to 64: */
do {
ROUND256(a,b,c,d,e,f,g,h);
ROUND256(h,a,b,c,d,e,f,g);
ROUND256(g,h,a,b,c,d,e,f);
ROUND256(f,g,h,a,b,c,d,e);
ROUND256(e,f,g,h,a,b,c,d);
ROUND256(d,e,f,g,h,a,b,c);
ROUND256(c,d,e,f,g,h,a,b);
ROUND256(b,c,d,e,f,g,h,a);
} while (j < 64);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = 0;
}
#else /* SHA2_UNROLL_TRANSFORM */
void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
sha2_word32 T1, T2, *W256;
int j;
W256 = (sha2_word32*)(void *)context->buffer;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
j = 0;
do {
#if BYTE_ORDER == LITTLE_ENDIAN
/* Copy data while converting to host byte order */
REVERSE32(*data++,W256[j]);
/* Apply the SHA-256 compression function to update a..h */
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
#else /* BYTE_ORDER == LITTLE_ENDIAN */
/* Apply the SHA-256 compression function to update a..h with copy */
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
T2 = Sigma0_256(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + T1;
d = c;
c = b;
b = a;
a = T1 + T2;
j++;
} while (j < 16);
do {
/* Part of the message block expansion: */
s0 = W256[(j+1)&0x0f];
s0 = sigma0_256(s0);
s1 = W256[(j+14)&0x0f];
s1 = sigma1_256(s1);
/* Apply the SHA-256 compression function to update a..h */
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
(W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
T2 = Sigma0_256(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + T1;
d = c;
c = b;
b = a;
a = T1 + T2;
j++;
} while (j < 64);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = T2 = 0;
}
#endif /* SHA2_UNROLL_TRANSFORM */
void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
unsigned int freespace, usedspace;
if (len == 0) {
/* Calling with no data is valid - we do nothing */
return;
}
/* Sanity check: */
assert(context != (SHA256_CTX*)0 && data != (sha2_byte*)0);
usedspace = (unsigned int)((context->bitcount >> 3) %
SHA256_BLOCK_LENGTH);
if (usedspace > 0) {
/* Calculate how much free space is available in the buffer */
freespace = SHA256_BLOCK_LENGTH - usedspace;
if (len >= freespace) {
/* Fill the buffer completely and process it */
memcpy(&context->buffer[usedspace], data, (size_t)(freespace));
context->bitcount += freespace << 3;
len -= freespace;
data += freespace;
SHA256_Transform(context, (sha2_word32*)(void *)context->buffer);
} else {
/* The buffer is not yet full */
memcpy(&context->buffer[usedspace], data, len);
context->bitcount += len << 3;
/* Clean up: */
usedspace = freespace = 0;
return;
}
}
while (len >= SHA256_BLOCK_LENGTH) {
/* Process as many complete blocks as we can */
SHA256_Transform(context, (const sha2_word32*)(const void *)data);
context->bitcount += SHA256_BLOCK_LENGTH << 3;
len -= SHA256_BLOCK_LENGTH;
data += SHA256_BLOCK_LENGTH;
}
if (len > 0) {
/* There's left-overs, so save 'em */
memcpy(context->buffer, data, len);
context->bitcount += len << 3;
}
/* Clean up: */
usedspace = freespace = 0;
}
void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
sha2_word32 *d = (void *)digest;
unsigned int usedspace;
/* Sanity check: */
assert(context != (SHA256_CTX*)0);
/* If no digest buffer is passed, we don't bother doing this: */
if (digest != (sha2_byte*)0) {
usedspace = (unsigned int)((context->bitcount >> 3) % SHA256_BLOCK_LENGTH);
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert FROM host byte order */
REVERSE64(context->bitcount,context->bitcount);
#endif
if (usedspace > 0) {
/* Begin padding with a 1 bit: */
context->buffer[usedspace++] = 0x80;
if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) {
/* Set-up for the last transform: */
memset(&context->buffer[usedspace], 0, (size_t)(SHA256_SHORT_BLOCK_LENGTH - usedspace));
} else {
if (usedspace < SHA256_BLOCK_LENGTH) {
memset(&context->buffer[usedspace], 0, (size_t)(SHA256_BLOCK_LENGTH - usedspace));
}
/* Do second-to-last transform: */
SHA256_Transform(context, (sha2_word32*)(void *)context->buffer);
/* And set-up for the last transform: */
memset(context->buffer, 0, (size_t)(SHA256_SHORT_BLOCK_LENGTH));
}
} else {
/* Set-up for the last transform: */
memset(context->buffer, 0, (size_t)(SHA256_SHORT_BLOCK_LENGTH));
/* Begin padding with a 1 bit: */
*context->buffer = 0x80;
}
/* Set the bit count: */
*(sha2_word64*)(void *)&context->buffer[SHA256_SHORT_BLOCK_LENGTH] = context->bitcount;
/* Final transform: */
SHA256_Transform(context, (sha2_word32*)(void *)context->buffer);
#if BYTE_ORDER == LITTLE_ENDIAN
{
/* Convert TO host byte order */
int j;
for (j = 0; j < 8; j++) {
REVERSE32(context->state[j],context->state[j]);
*d++ = context->state[j];
}
}
#else
memcpy(d, context->state, SHA256_DIGEST_LENGTH);
#endif
}
/* Clean up state data: */
memset(context, 0, sizeof(*context));
usedspace = 0;
}
/*** SHA-512: *********************************************************/
void SHA512_Init(SHA512_CTX* context) {
if (context == (SHA512_CTX*)0) {
return;
}
memcpy(context->state, sha512_initial_hash_value, (size_t)(SHA512_DIGEST_LENGTH));
memset(context->buffer, 0, (size_t)(SHA512_BLOCK_LENGTH));
context->bitcount[0] = context->bitcount[1] = 0;
}
#ifdef SHA2_UNROLL_TRANSFORM
/* Unrolled SHA-512 round macros: */
#if BYTE_ORDER == LITTLE_ENDIAN
#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
REVERSE64(*data++, W512[j]); \
T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
K512[j] + W512[j]; \
(d) += T1, \
(h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
j++
#else /* BYTE_ORDER == LITTLE_ENDIAN */
#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
K512[j] + (W512[j] = *data++); \
(d) += T1; \
(h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
j++
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
#define ROUND512(a,b,c,d,e,f,g,h) \
s0 = W512[(j+1)&0x0f]; \
s0 = sigma0_512(s0); \
s1 = W512[(j+14)&0x0f]; \
s1 = sigma1_512(s1); \
T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
(W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
(d) += T1; \
(h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
j++
void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
sha2_word64 T1, *W512 = (sha2_word64*)context->buffer;
int j;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
j = 0;
do {
ROUND512_0_TO_15(a,b,c,d,e,f,g,h);
ROUND512_0_TO_15(h,a,b,c,d,e,f,g);
ROUND512_0_TO_15(g,h,a,b,c,d,e,f);
ROUND512_0_TO_15(f,g,h,a,b,c,d,e);
ROUND512_0_TO_15(e,f,g,h,a,b,c,d);
ROUND512_0_TO_15(d,e,f,g,h,a,b,c);
ROUND512_0_TO_15(c,d,e,f,g,h,a,b);
ROUND512_0_TO_15(b,c,d,e,f,g,h,a);
} while (j < 16);
/* Now for the remaining rounds up to 79: */
do {
ROUND512(a,b,c,d,e,f,g,h);
ROUND512(h,a,b,c,d,e,f,g);
ROUND512(g,h,a,b,c,d,e,f);
ROUND512(f,g,h,a,b,c,d,e);
ROUND512(e,f,g,h,a,b,c,d);
ROUND512(d,e,f,g,h,a,b,c);
ROUND512(c,d,e,f,g,h,a,b);
ROUND512(b,c,d,e,f,g,h,a);
} while (j < 80);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = 0;
}
#else /* SHA2_UNROLL_TRANSFORM */
void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
sha2_word64 T1, T2, *W512 = (void *)context->buffer;
int j;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
j = 0;
do {
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
REVERSE64(*data++, W512[j]);
/* Apply the SHA-512 compression function to update a..h */
T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
#else /* BYTE_ORDER == LITTLE_ENDIAN */
/* Apply the SHA-512 compression function to update a..h with copy */
T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++);
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
T2 = Sigma0_512(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + T1;
d = c;
c = b;
b = a;
a = T1 + T2;
j++;
} while (j < 16);
do {
/* Part of the message block expansion: */
s0 = W512[(j+1)&0x0f];
s0 = sigma0_512(s0);
s1 = W512[(j+14)&0x0f];
s1 = sigma1_512(s1);
/* Apply the SHA-512 compression function to update a..h */
T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] +
(W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0);
T2 = Sigma0_512(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + T1;
d = c;
c = b;
b = a;
a = T1 + T2;
j++;
} while (j < 80);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = T2 = 0;
}
#endif /* SHA2_UNROLL_TRANSFORM */
void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
unsigned int freespace, usedspace;
if (len == 0) {
/* Calling with no data is valid - we do nothing */
return;
}
/* Sanity check: */
assert(context != (SHA512_CTX*)0 && data != (sha2_byte*)0);
usedspace = (unsigned int)((context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH);
if (usedspace > 0) {
/* Calculate how much free space is available in the buffer */
freespace = SHA512_BLOCK_LENGTH - usedspace;
if (len >= freespace) {
/* Fill the buffer completely and process it */
memcpy(&context->buffer[usedspace], data, (size_t)(freespace));
ADDINC128(context->bitcount, freespace << 3);
len -= freespace;
data += freespace;
SHA512_Transform(context, (sha2_word64*)(void *)context->buffer);
} else {
/* The buffer is not yet full */
memcpy(&context->buffer[usedspace], data, len);
ADDINC128(context->bitcount, len << 3);
/* Clean up: */
usedspace = freespace = 0;
return;
}
}
while (len >= SHA512_BLOCK_LENGTH) {
/* Process as many complete blocks as we can */
SHA512_Transform(context, (const sha2_word64*)(const void *)data);
ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3);
len -= SHA512_BLOCK_LENGTH;
data += SHA512_BLOCK_LENGTH;
}
if (len > 0) {
/* There's left-overs, so save 'em */
memcpy(context->buffer, data, len);
ADDINC128(context->bitcount, len << 3);
}
/* Clean up: */
usedspace = freespace = 0;
}
void SHA512_Last(SHA512_CTX* context) {
unsigned int usedspace;
usedspace = (unsigned int)((context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH);
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert FROM host byte order */
REVERSE64(context->bitcount[0],context->bitcount[0]);
REVERSE64(context->bitcount[1],context->bitcount[1]);
#endif
if (usedspace > 0) {
/* Begin padding with a 1 bit: */
context->buffer[usedspace++] = 0x80;
if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) {
/* Set-up for the last transform: */
memset(&context->buffer[usedspace], 0, (size_t)(SHA512_SHORT_BLOCK_LENGTH - usedspace));
} else {
if (usedspace < SHA512_BLOCK_LENGTH) {
memset(&context->buffer[usedspace], 0, (size_t)(SHA512_BLOCK_LENGTH - usedspace));
}
/* Do second-to-last transform: */
SHA512_Transform(context, (sha2_word64*)(void *)context->buffer);
/* And set-up for the last transform: */
memset(context->buffer, 0, (size_t)(SHA512_BLOCK_LENGTH - 2));
}
} else {
/* Prepare for final transform: */
memset(context->buffer, 0, (size_t)(SHA512_SHORT_BLOCK_LENGTH));
/* Begin padding with a 1 bit: */
*context->buffer = 0x80;
}
/* Store the length of input data (in bits): */
*(sha2_word64*)(void *)&context->buffer[SHA512_SHORT_BLOCK_LENGTH] = context->bitcount[1];
*(sha2_word64*)(void *)&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8] = context->bitcount[0];
/* Final transform: */
SHA512_Transform(context, (sha2_word64*)(void *)context->buffer);
}
void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) {
sha2_word64 *d = (void *)digest;
/* Sanity check: */
assert(context != (SHA512_CTX*)0);
/* If no digest buffer is passed, we don't bother doing this: */
if (digest != (sha2_byte*)0) {
SHA512_Last(context);
/* Save the hash data for output: */
#if BYTE_ORDER == LITTLE_ENDIAN
{
/* Convert TO host byte order */
int j;
for (j = 0; j < 8; j++) {
REVERSE64(context->state[j],context->state[j]);
*d++ = context->state[j];
}
}
#else
memcpy(d, context->state, SHA512_DIGEST_LENGTH);
#endif
}
/* Zero out state data */
memset(context, 0, sizeof(*context));
}
/*** SHA-384: *********************************************************/
void SHA384_Init(SHA384_CTX* context) {
if (context == (SHA384_CTX*)0) {
return;
}
memcpy(context->state, sha384_initial_hash_value, (size_t)(SHA512_DIGEST_LENGTH));
memset(context->buffer, 0, (size_t)(SHA384_BLOCK_LENGTH));
context->bitcount[0] = context->bitcount[1] = 0;
}
void SHA384_Update(SHA384_CTX* context, const sha2_byte* data, size_t len) {
SHA512_Update((SHA512_CTX*)context, data, len);
}
void SHA384_Transform(SHA512_CTX* context, const sha2_word64* data) {
SHA512_Transform((SHA512_CTX*)context, data);
}
void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) {
sha2_word64 *d = (void *)digest;
/* Sanity check: */
assert(context != (SHA384_CTX*)0);
/* If no digest buffer is passed, we don't bother doing this: */
if (digest != (sha2_byte*)0) {
SHA512_Last((SHA512_CTX*)context);
/* Save the hash data for output: */
#if BYTE_ORDER == LITTLE_ENDIAN
{
/* Convert TO host byte order */
int j;
for (j = 0; j < 6; j++) {
REVERSE64(context->state[j],context->state[j]);
*d++ = context->state[j];
}
}
#else
memcpy(d, context->state, SHA384_DIGEST_LENGTH);
#endif
}
/* Zero out state data */
memset(context, 0, sizeof(*context));
}