From: Dmitry Eremin-Solenikov dbaryshkov@gmail.com
Add common implementations for functions doing XOR over nettle_block16/nettle_block8.
Signed-off-by: Dmitry Eremin-Solenikov dbaryshkov@gmail.com --- Makefile.in | 3 +- block-internal.h | 93 ++++++++++++++++++++++++++++++++++++++++++++++++ cmac.c | 11 +++--- cmac64.c | 12 +++---- eax.c | 9 +---- gcm.c | 20 ++++------- siv-cmac.c | 9 ++--- 7 files changed, 120 insertions(+), 37 deletions(-) create mode 100644 block-internal.h
diff --git a/Makefile.in b/Makefile.in index af4f6e46ee9b..f6658c86341c 100644 --- a/Makefile.in +++ b/Makefile.in @@ -230,7 +230,8 @@ DISTFILES = $(SOURCES) $(HEADERS) getopt.h getopt_int.h \ INSTALL NEWS ChangeLog \ nettle.pc.in hogweed.pc.in \ $(des_headers) descore.README desdata.stamp \ - aes-internal.h camellia-internal.h cmac-internal.h serpent-internal.h \ + aes-internal.h block-internal.h \ + camellia-internal.h cmac-internal.h serpent-internal.h \ cast128_sboxes.h desinfo.h desCode.h \ ripemd160-internal.h sha2-internal.h \ memxor-internal.h nettle-internal.h nettle-write.h \ diff --git a/block-internal.h b/block-internal.h new file mode 100644 index 000000000000..ab3a6a79b8cb --- /dev/null +++ b/block-internal.h @@ -0,0 +1,93 @@ +/* block-internal.h + + Internal implementations of nettle_blockZ-related functions. + + Copyright (C) 2011 Katholieke Universiteit Leuven + Copyright (C) 2011, 2013, 2018 Niels Möller + Copyright (C) 2018 Red Hat, Inc. + Copyright (C) 2019 Dmitry Eremin-Solenikov + + This file is part of GNU Nettle. + + GNU Nettle is free software: you can redistribute it and/or + modify it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + + or + + * the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your + option) any later version. + + or both in parallel, as here. + + GNU Nettle is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received copies of the GNU General Public License and + the GNU Lesser General Public License along with this program. If + not, see http://www.gnu.org/licenses/. +*/ + +#ifndef NETTLE_BLOCK_INTERNAL_H_INCLUDED +#define NETTLE_BLOCK_INTERNAL_H_INCLUDED + +#include <assert.h> + +#include "nettle-types.h" +#include "memxor.h" + +static inline void +block16_xor (union nettle_block16 *r, + const union nettle_block16 *x) +{ + r->u64[0] ^= x->u64[0]; + r->u64[1] ^= x->u64[1]; +} + +static inline void +block16_xor3 (union nettle_block16 *r, + const union nettle_block16 *x, + const union nettle_block16 *y) +{ + r->u64[0] = x->u64[0] ^ y->u64[0]; + r->u64[1] = x->u64[1] ^ y->u64[1]; +} + +static inline void +block16_xor_bytes (union nettle_block16 *r, + const union nettle_block16 *x, + const uint8_t *bytes) +{ + memxor3 (r->b, x->b, bytes, 16); +} + +static inline void +block8_xor (union nettle_block8 *r, + const union nettle_block8 *x) +{ + r->u64 ^= x->u64; +} + +static inline void +block8_xor3 (union nettle_block8 *r, + const union nettle_block8 *x, + const union nettle_block8 *y) +{ + r->u64 = x->u64 ^ y->u64; +} + +static inline void +block8_xor_bytes (union nettle_block8 *r, + const union nettle_block8 *x, + const uint8_t *bytes) +{ + memxor3 (r->b, x->b, bytes, 8); +} + +#endif /* NETTLE_BLOCK_INTERNAL_H_INCLUDED */ diff --git a/cmac.c b/cmac.c index 70ce8132d9d1..194324421c58 100644 --- a/cmac.c +++ b/cmac.c @@ -45,6 +45,7 @@ #include "memxor.h" #include "nettle-internal.h" #include "cmac-internal.h" +#include "block-internal.h" #include "macros.h"
/* shift one and XOR with 0x87. */ @@ -119,12 +120,12 @@ cmac128_update(struct cmac128_ctx *ctx, const void *cipher, /* * now checksum everything but the last block */ - memxor3(Y.b, ctx->X.b, ctx->block.b, 16); + block16_xor3(&Y, &ctx->X, &ctx->block); encrypt(cipher, 16, ctx->X.b, Y.b);
while (msg_len > 16) { - memxor3(Y.b, ctx->X.b, msg, 16); + block16_xor_bytes (&Y, &ctx->X, msg); encrypt(cipher, 16, ctx->X.b, Y.b); msg += 16; msg_len -= 16; @@ -151,14 +152,14 @@ cmac128_digest(struct cmac128_ctx *ctx, const struct cmac128_key *key, ctx->block.b[ctx->index] = 0x80; memset(ctx->block.b + ctx->index + 1, 0, 16 - 1 - ctx->index);
- memxor(ctx->block.b, key->K2.b, 16); + block16_xor (&ctx->block, &key->K2); } else { - memxor(ctx->block.b, key->K1.b, 16); + block16_xor (&ctx->block, &key->K1); }
- memxor3(Y.b, ctx->block.b, ctx->X.b, 16); + block16_xor3 (&Y, &ctx->block, &ctx->X);
assert(length <= 16); if (length == 16) diff --git a/cmac64.c b/cmac64.c index 636635ba478b..e7bb438580d6 100644 --- a/cmac64.c +++ b/cmac64.c @@ -43,8 +43,8 @@
#include "cmac.h"
-#include "memxor.h" #include "nettle-internal.h" +#include "block-internal.h" #include "macros.h"
/* shift one and XOR with 0x87. */ @@ -119,12 +119,12 @@ cmac64_update(struct cmac64_ctx *ctx, const void *cipher, /* * now checksum everything but the last block */ - memxor3(Y.b, ctx->X.b, ctx->block.b, 8); + block8_xor3(&Y, &ctx->X, &ctx->block); encrypt(cipher, 8, ctx->X.b, Y.b);
while (msg_len > 8) { - memxor3(Y.b, ctx->X.b, msg, 8); + block8_xor_bytes(&Y, &ctx->X, msg); encrypt(cipher, 8, ctx->X.b, Y.b); msg += 8; msg_len -= 8; @@ -151,14 +151,14 @@ cmac64_digest(struct cmac64_ctx *ctx, const struct cmac64_key *key, if (ctx->index < 8) { ctx->block.b[ctx->index] = 0x80; - memxor(ctx->block.b, key->K2.b, 8); + block8_xor(&ctx->block, &key->K2); } else { - memxor(ctx->block.b, key->K1.b, 8); + block8_xor(&ctx->block, &key->K1); }
- memxor3(Y.b, ctx->block.b, ctx->X.b, 8); + block8_xor3(&Y, &ctx->block, &ctx->X);
assert(length <= 8); if (length == 8) diff --git a/eax.c b/eax.c index 4b8b5117746e..63f3ff82fe65 100644 --- a/eax.c +++ b/eax.c @@ -40,6 +40,7 @@
#include "eax.h"
+#include "block-internal.h" #include "ctr.h" #include "memxor.h"
@@ -50,14 +51,6 @@ omac_init (union nettle_block16 *state, unsigned t) state->b[EAX_BLOCK_SIZE - 1] = t; }
-/* Almost the same as gcm_gf_add */ -static void -block16_xor (union nettle_block16 *dst, const union nettle_block16 *src) -{ - dst->u64[0] ^= src->u64[0]; - dst->u64[1] ^= src->u64[1]; -} - static void omac_update (union nettle_block16 *state, const struct eax_key *key, const void *cipher, nettle_cipher_func *f, diff --git a/gcm.c b/gcm.c index 627097b24218..4a04a0a10842 100644 --- a/gcm.c +++ b/gcm.c @@ -53,16 +53,10 @@ #include "nettle-internal.h" #include "macros.h" #include "ctr-internal.h" +#include "block-internal.h"
#define GHASH_POLYNOMIAL 0xE1UL
-static void -gcm_gf_add (union nettle_block16 *r, - const union nettle_block16 *x, const union nettle_block16 *y) -{ - r->u64[0] = x->u64[0] ^ y->u64[0]; - r->u64[1] = x->u64[1] ^ y->u64[1]; -} /* Multiplication by 010...0; a big-endian shift right. If the bit shifted out is one, the defining polynomial is added to cancel it out. r == x is allowed. */ @@ -108,7 +102,7 @@ gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *y) for (j = 0; j < 8; j++, b <<= 1) { if (b & 0x80) - gcm_gf_add(&Z, &Z, &V); + block16_xor(&Z, &V); gcm_gf_shift(&V, &V); } @@ -165,9 +159,9 @@ gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *table) uint8_t b = x->b[i];
gcm_gf_shift_4(&Z); - gcm_gf_add(&Z, &Z, &table[b & 0xf]); + block16_xor(&Z, &table[b & 0xf]); gcm_gf_shift_4(&Z); - gcm_gf_add(&Z, &Z, &table[b >> 4]); + block16_xor(&Z, &table[b >> 4]); } memcpy (x->b, Z.b, sizeof(Z)); } @@ -243,10 +237,10 @@ gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *table) for (i = GCM_BLOCK_SIZE-2; i > 0; i--) { gcm_gf_shift_8(&Z); - gcm_gf_add(&Z, &Z, &table[x->b[i]]); + block16_xor(&Z, &table[x->b[i]]); } gcm_gf_shift_8(&Z); - gcm_gf_add(x, &Z, &table[x->b[0]]); + block16_xor3(x, &Z, &table[x->b[0]]); } # endif /* ! HAVE_NATIVE_gcm_hash8 */ # else /* GCM_TABLE_BITS != 8 */ @@ -286,7 +280,7 @@ gcm_set_key(struct gcm_key *key, { unsigned j; for (j = 1; j < i; j++) - gcm_gf_add(&key->h[i+j], &key->h[i],&key->h[j]); + block16_xor3(&key->h[i+j], &key->h[i],&key->h[j]); } #endif } diff --git a/siv-cmac.c b/siv-cmac.c index f498cb863f5a..42f740cddf5d 100644 --- a/siv-cmac.c +++ b/siv-cmac.c @@ -46,6 +46,7 @@ #include "memops.h" #include "cmac-internal.h" #include "nettle-internal.h" +#include "block-internal.h"
/* This is an implementation of S2V for the AEAD case where * vectors if zero, are considered as S empty components */ @@ -69,12 +70,12 @@ _siv_s2v (const struct nettle_cipher *nc, _cmac128_block_mulx (&D, &D); cmac128_update (&cmac_ctx, cmac_cipher, nc->encrypt, alength, adata); cmac128_digest (&cmac_ctx, cmac_key, cmac_cipher, nc->encrypt, 16, S.b); - memxor (D.b, S.b, 16); + block16_xor (&D, &S);
_cmac128_block_mulx (&D, &D); cmac128_update (&cmac_ctx, cmac_cipher, nc->encrypt, nlength, nonce); cmac128_digest (&cmac_ctx, cmac_key, cmac_cipher, nc->encrypt, 16, S.b); - memxor (D.b, S.b, 16); + block16_xor (&D, &S);
/* Sn */ if (plength >= 16) @@ -83,7 +84,7 @@ _siv_s2v (const struct nettle_cipher *nc,
pdata += plength - 16;
- memxor3 (T.b, pdata, D.b, 16); + block16_xor_bytes (&T, &D, pdata); } else { @@ -95,7 +96,7 @@ _siv_s2v (const struct nettle_cipher *nc, if (plength + 1 < 16) memset (&pad.b[plength + 1], 0, 16 - plength - 1);
- memxor (T.b, pad.b, 16); + block16_xor (&T, &pad); }
cmac128_update (&cmac_ctx, cmac_cipher, nc->encrypt, 16, T.b);