summaryrefslogtreecommitdiffstats
path: root/src/lib/hash_sha1.c
diff options
context:
space:
mode:
authorDenys Vlasenko <dvlasenk@redhat.com>2011-04-18 14:23:19 +0200
committerDenys Vlasenko <dvlasenk@redhat.com>2011-04-18 14:23:19 +0200
commit0a41c4fdf4b242f77445a3e6d73443b40b6e460e (patch)
treee464a61df6653e91a2191950a6056ec70365fbd9 /src/lib/hash_sha1.c
parent305ba8e8786c32d5292d04e2ed25d1853479a23a (diff)
downloadabrt-0a41c4fdf4b242f77445a3e6d73443b40b6e460e.tar.gz
abrt-0a41c4fdf4b242f77445a3e6d73443b40b6e460e.tar.xz
abrt-0a41c4fdf4b242f77445a3e6d73443b40b6e460e.zip
switch python and oops hashing to sha1
Update sha1 code to a smaller version. Verified that ccpp hashing produces the same results as the old code. Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Diffstat (limited to 'src/lib/hash_sha1.c')
-rw-r--r--src/lib/hash_sha1.c246
1 files changed, 122 insertions, 124 deletions
diff --git a/src/lib/hash_sha1.c b/src/lib/hash_sha1.c
index 954e7bc8..f1a9e0a6 100644
--- a/src/lib/hash_sha1.c
+++ b/src/lib/hash_sha1.c
@@ -18,6 +18,7 @@
* ---------------------------------------------------------------------------
*/
#include "abrtlib.h"
+#include <byteswap.h>
#if defined(__BIG_ENDIAN__) && __BIG_ENDIAN__
# define SHA1_BIG_ENDIAN 1
@@ -32,51 +33,37 @@
# error "Can't determine endianness"
#endif
-
#define rotl32(x,n) (((x) << (n)) | ((x) >> (32 - (n))))
+/* for sha256: */
#define rotr32(x,n) (((x) >> (n)) | ((x) << (32 - (n))))
/* for sha512: */
#define rotr64(x,n) (((x) >> (n)) | ((x) << (64 - (n))))
-#if SHA1_LITTLE_ENDIAN
-static inline uint64_t hton64(uint64_t v)
-{
- return (((uint64_t)htonl(v)) << 32) | htonl(v >> 32);
-}
-#else
-#define hton64(v) (v)
-#endif
-#define ntoh64(v) hton64(v)
-/* To check alignment gcc has an appropriate operator. Other
- compilers don't. */
-#if defined(__GNUC__) && __GNUC__ >= 2
-# define UNALIGNED_P(p,type) (((uintptr_t) p) % __alignof__(type) != 0)
-#else
-# define UNALIGNED_P(p,type) (((uintptr_t) p) % sizeof(type) != 0)
-#endif
+
+/* Generic 64-byte helpers for 64-byte block hashes */
+static void common64_hash(sha1_ctx_t *ctx, const void *buffer, size_t len);
+static void common64_end(sha1_ctx_t *ctx, int swap_needed);
-/* Some arch headers have conflicting defines */
-#undef ch
-#undef parity
-#undef maj
-#undef rnd
+/* sha1 specific code */
static void sha1_process_block64(sha1_ctx_t *ctx)
{
- unsigned t;
- uint32_t W[80], a, b, c, d, e;
- const uint32_t *words = (uint32_t*) ctx->wbuffer;
-
- for (t = 0; t < 16; ++t) {
- W[t] = ntohl(*words);
- words++;
- }
-
- for (/*t = 16*/; t < 80; ++t) {
- uint32_t T = W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16];
- W[t] = rotl32(T, 1);
- }
+ static const uint32_t rconsts[] = {
+ 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6
+ };
+ int i, j;
+ int cnt;
+ uint32_t W[16+16];
+ uint32_t a, b, c, d, e;
+
+ /* On-stack work buffer frees up one register in the main loop
+ * which otherwise will be needed to hold ctx pointer */
+ for (i = 0; i < 16; i++)
+ if (SHA1_BIG_ENDIAN)
+ W[i] = W[i+16] = ((uint32_t*)ctx->wbuffer)[i];
+ else
+ W[i] = W[i+16] = bswap_32(((uint32_t*)ctx->wbuffer)[i]);
a = ctx->hash[0];
b = ctx->hash[1];
@@ -84,37 +71,41 @@ static void sha1_process_block64(sha1_ctx_t *ctx)
d = ctx->hash[3];
e = ctx->hash[4];
-/* Reverse byte order in 32-bit words */
-#define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
-#define parity(x,y,z) ((x) ^ (y) ^ (z))
-#define maj(x,y,z) (((x) & (y)) | ((z) & ((x) | (y))))
-/* A normal version as set out in the FIPS. This version uses */
-/* partial loop unrolling and is optimised for the Pentium 4 */
-#define rnd(f,k) \
- do { \
- uint32_t T = a; \
- a = rotl32(a, 5) + f(b, c, d) + e + k + W[t]; \
- e = d; \
- d = c; \
- c = rotl32(b, 30); \
- b = T; \
- } while (0)
-
- for (t = 0; t < 20; ++t)
- rnd(ch, 0x5a827999);
-
- for (/*t = 20*/; t < 40; ++t)
- rnd(parity, 0x6ed9eba1);
-
- for (/*t = 40*/; t < 60; ++t)
- rnd(maj, 0x8f1bbcdc);
-
- for (/*t = 60*/; t < 80; ++t)
- rnd(parity, 0xca62c1d6);
-#undef ch
-#undef parity
-#undef maj
-#undef rnd
+ /* 4 rounds of 20 operations each */
+ cnt = 0;
+ for (i = 0; i < 4; i++) {
+ j = 19;
+ do {
+ uint32_t work;
+
+ work = c ^ d;
+ if (i == 0) {
+ work = (work & b) ^ d;
+ if (j <= 3)
+ goto ge16;
+ /* Used to do bswap_32 here, but this
+ * requires ctx (see comment above) */
+ work += W[cnt];
+ } else {
+ if (i == 2)
+ work = ((b | c) & d) | (b & c);
+ else /* i = 1 or 3 */
+ work ^= b;
+ ge16:
+ W[cnt] = W[cnt+16] = rotl32(W[cnt+13] ^ W[cnt+8] ^ W[cnt+2] ^ W[cnt], 1);
+ work += W[cnt];
+ }
+ work += e + rotl32(a, 5) + rconsts[i];
+
+ /* Rotate by one for next time */
+ e = d;
+ d = c;
+ c = /* b = */ rotl32(b, 30);
+ b = a;
+ a = work;
+ cnt = (cnt + 1) & 15;
+ } while (--j >= 0);
+ }
ctx->hash[0] += a;
ctx->hash[1] += b;
@@ -131,83 +122,90 @@ void sha1_begin(sha1_ctx_t *ctx)
ctx->hash[3] = 0x10325476;
ctx->hash[4] = 0xc3d2e1f0;
ctx->total64 = 0;
- ctx->process_block = sha1_process_block64;
+ /* for sha256: ctx->process_block = sha1_process_block64; */
}
-static const uint32_t init256[] = {
- 0x6a09e667,
- 0xbb67ae85,
- 0x3c6ef372,
- 0xa54ff53a,
- 0x510e527f,
- 0x9b05688c,
- 0x1f83d9ab,
- 0x5be0cd19
-};
-static const uint32_t init512_lo[] = {
- 0xf3bcc908,
- 0x84caa73b,
- 0xfe94f82b,
- 0x5f1d36f1,
- 0xade682d1,
- 0x2b3e6c1f,
- 0xfb41bd6b,
- 0x137e2179
-};
-
-/* Used also for sha256 */
-void sha1_hash(const void *buffer, size_t len, sha1_ctx_t *ctx)
+void sha1_hash(sha1_ctx_t *ctx, const void *buffer, size_t len)
{
- unsigned in_buf = ctx->total64 & 63;
- unsigned add = 64 - in_buf;
+ common64_hash(ctx, buffer, len);
+}
- ctx->total64 += len;
+/* May be used also for sha256 */
+void sha1_end(sha1_ctx_t *ctx, void *resbuf)
+{
+ unsigned hash_size;
- while (len >= add) { /* transfer whole blocks while possible */
- memcpy(ctx->wbuffer + in_buf, buffer, add);
- buffer = (const char *)buffer + add;
- len -= add;
- add = 64;
- in_buf = 0;
- ctx->process_block(ctx);
- }
+ /* SHA stores total in BE, need to swap on LE arches: */
+ common64_end(ctx, /*swap_needed:*/ SHA1_LITTLE_ENDIAN);
- memcpy(ctx->wbuffer + in_buf, buffer, len);
+ hash_size = 5; /* (ctx->process_block == sha1_process_block64) ? 5 : 8; */
+ /* This way we do not impose alignment constraints on resbuf: */
+ if (SHA1_LITTLE_ENDIAN) {
+ unsigned i;
+ for (i = 0; i < hash_size; ++i)
+ ctx->hash[i] = bswap_32(ctx->hash[i]);
+ }
+ memcpy(resbuf, ctx->hash, sizeof(ctx->hash[0]) * hash_size);
}
-/* Used also for sha256 */
-void sha1_end(void *resbuf, sha1_ctx_t *ctx)
+
+/* Generic 64-byte helpers for 64-byte block hashes */
+
+/*#define PROCESS_BLOCK(ctx) ctx->process_block(ctx)*/
+#define PROCESS_BLOCK(ctx) sha1_process_block64(ctx)
+
+/* Feed data through a temporary buffer.
+ * The internal buffer remembers previous data until it has 64
+ * bytes worth to pass on.
+ */
+static void common64_hash(sha1_ctx_t *ctx, const void *buffer, size_t len)
{
- unsigned pad, in_buf;
+ unsigned bufpos = ctx->total64 & 63;
+
+ ctx->total64 += len;
- in_buf = ctx->total64 & 63;
+ while (1) {
+ unsigned remaining = 64 - bufpos;
+ if (remaining > len)
+ remaining = len;
+ /* Copy data into aligned buffer */
+ memcpy(ctx->wbuffer + bufpos, buffer, remaining);
+ len -= remaining;
+ buffer = (const char *)buffer + remaining;
+ bufpos += remaining;
+ /* clever way to do "if (bufpos != 64) break; ... ; bufpos = 0;" */
+ bufpos -= 64;
+ if (bufpos != 0)
+ break;
+ /* Buffer is filled up, process it */
+ PROCESS_BLOCK(ctx);
+ /*bufpos = 0; - already is */
+ }
+}
+
+/* Process the remaining bytes in the buffer */
+static void common64_end(sha1_ctx_t *ctx, int swap_needed)
+{
+ unsigned bufpos = ctx->total64 & 63;
/* Pad the buffer to the next 64-byte boundary with 0x80,0,0,0... */
- ctx->wbuffer[in_buf++] = 0x80;
+ ctx->wbuffer[bufpos++] = 0x80;
/* This loop iterates either once or twice, no more, no less */
while (1) {
- pad = 64 - in_buf;
- memset(ctx->wbuffer + in_buf, 0, pad);
- in_buf = 0;
+ unsigned remaining = 64 - bufpos;
+ memset(ctx->wbuffer + bufpos, 0, remaining);
/* Do we have enough space for the length count? */
- if (pad >= 8) {
- /* Store the 64-bit counter of bits in the buffer in BE format */
+ if (remaining >= 8) {
+ /* Store the 64-bit counter of bits in the buffer */
uint64_t t = ctx->total64 << 3;
- t = hton64(t);
+ if (swap_needed)
+ t = bswap_64(t);
/* wbuffer is suitably aligned for this */
*(uint64_t *) (&ctx->wbuffer[64 - 8]) = t;
}
- ctx->process_block(ctx);
- if (pad >= 8)
+ PROCESS_BLOCK(ctx);
+ if (remaining >= 8)
break;
+ bufpos = 0;
}
-
- in_buf = (ctx->process_block == sha1_process_block64) ? 5 : 8;
- /* This way we do not impose alignment constraints on resbuf: */
- if (SHA1_LITTLE_ENDIAN) {
- unsigned i;
- for (i = 0; i < in_buf; ++i)
- ctx->hash[i] = htonl(ctx->hash[i]);
- }
- memcpy(resbuf, ctx->hash, sizeof(ctx->hash[0]) * in_buf);
}