From b71168c7fbe2143bc72c674f1e74a239b90b5007 Mon Sep 17 00:00:00 2001 From: Ken Raeburn Date: Sun, 3 Jan 2010 23:39:12 +0000 Subject: Enable caching of key-derived context info such as key schedules from one encryption operation to another. Use a new function in the enc_provider structure for cleanup. Implement caching of aes_ctx values. Using Greg's performance tests from the derived-key caching work, on a 2.8GHz Xeon, I see 1 million AES-128 encryptions of 16 bytes improved by 5-6%; encryptions of 1024 bytes and checksums are not significantly affected. git-svn-id: svn://anonsvn.mit.edu/krb5/trunk@23574 dc483132-0cff-0310-8789-dd5450dbe970 --- src/lib/crypto/builtin/enc_provider/aes.c | 70 +++++++++++++++++++++++-------- src/lib/crypto/krb/key.c | 8 ++++ 2 files changed, 60 insertions(+), 18 deletions(-) (limited to 'src/lib') diff --git a/src/lib/crypto/builtin/enc_provider/aes.c b/src/lib/crypto/builtin/enc_provider/aes.c index 16e3932e7..9d2c5d4ff 100644 --- a/src/lib/crypto/builtin/enc_provider/aes.c +++ b/src/lib/crypto/builtin/enc_provider/aes.c @@ -33,6 +33,18 @@ #define CHECK_SIZES 0 +/* + * Private per-key data to cache after first generation. We don't + * want to mess with the imported AES implementation too much, so + * we'll just use two copies of its context, one for encryption and + * one for decryption, and use the #rounds field as a flag for whether + * we've initialized each half. + */ +struct aes_key_info_cache { + aes_ctx enc_ctx, dec_ctx; +}; +#define CACHE(X) ((struct aes_key_info_cache *)((X)->cache)) + static inline void enc(unsigned char *out, const unsigned char *in, aes_ctx *ctx) { @@ -76,16 +88,23 @@ krb5_error_code krb5int_aes_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { - aes_ctx ctx; unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE]; int nblocks = 0, blockno; size_t input_length, i; struct iov_block_state input_pos, output_pos; - if (aes_enc_key(key->keyblock.contents, key->keyblock.length, &ctx) - != aes_good) - abort(); - + if (key->cache == NULL) { + key->cache = malloc(sizeof(struct aes_key_info_cache)); + if (key->cache == NULL) + return ENOMEM; + CACHE(key)->enc_ctx.n_rnd = CACHE(key)->dec_ctx.n_rnd = 0; + } + if (CACHE(key)->enc_ctx.n_rnd == 0) { + if (aes_enc_key(key->keyblock.contents, key->keyblock.length, + &CACHE(key)->enc_ctx) + != aes_good) + abort(); + } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else @@ -104,7 +123,7 @@ krb5int_aes_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); - enc(tmp2, tmp, &ctx); + enc(tmp2, tmp, &CACHE(key)->enc_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ @@ -116,7 +135,7 @@ krb5int_aes_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, block = iov_next_block(blockN, BLOCK_SIZE, data, num_data, &input_pos); xorblock(tmp, block); - enc(block, tmp, &ctx); + enc(block, tmp, &CACHE(key)->enc_ctx); iov_store_block(data, num_data, block, blockN, BLOCK_SIZE, &output_pos); @@ -136,13 +155,13 @@ krb5int_aes_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, /* Encrypt second last block */ xorblock(tmp, blockN2); - enc(tmp2, tmp, &ctx); + enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */ memcpy(tmp, tmp2, BLOCK_SIZE); /* Encrypt last block */ xorblock(tmp, blockN1); - enc(tmp2, tmp, &ctx); + enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN1, tmp2, BLOCK_SIZE); /* Put the last two blocks back into the iovec (reverse order) */ @@ -162,7 +181,6 @@ krb5_error_code krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { - aes_ctx ctx; unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno; unsigned int i; @@ -171,9 +189,17 @@ krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, CHECK_SIZES; - if (aes_dec_key(key->keyblock.contents, key->keyblock.length, - &ctx) != aes_good) - abort(); + if (key->cache == NULL) { + key->cache = malloc(sizeof(struct aes_key_info_cache)); + if (key->cache == NULL) + return ENOMEM; + CACHE(key)->enc_ctx.n_rnd = CACHE(key)->dec_ctx.n_rnd = 0; + } + if (CACHE(key)->dec_ctx.n_rnd == 0) { + if (aes_dec_key(key->keyblock.contents, key->keyblock.length, + &CACHE(key)->dec_ctx) != aes_good) + abort(); + } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); @@ -193,7 +219,7 @@ krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); - dec(tmp2, tmp, &ctx); + dec(tmp2, tmp, &CACHE(key)->dec_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ @@ -205,7 +231,7 @@ krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, block = iov_next_block(blockN, BLOCK_SIZE, data, num_data, &input_pos); memcpy(tmp2, block, BLOCK_SIZE); - dec(block, block, &ctx); + dec(block, block, &CACHE(key)->dec_ctx); xorblock(block, tmp); memcpy(tmp, tmp2, BLOCK_SIZE); iov_store_block(data, num_data, block, blockN, BLOCK_SIZE, @@ -226,7 +252,7 @@ krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, memcpy(ivec->data, blockN2, BLOCK_SIZE); /* Decrypt second last block */ - dec(tmp2, blockN2, &ctx); + dec(tmp2, blockN2, &CACHE(key)->dec_ctx); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, blockN1); @@ -236,7 +262,7 @@ krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, ciphertext block. */ input_length %= BLOCK_SIZE; memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE); - dec(tmp3, tmp2, &ctx); + dec(tmp3, tmp2, &CACHE(key)->dec_ctx); xorblock(tmp3, tmp); memcpy(blockN1, tmp3, BLOCK_SIZE); @@ -262,6 +288,12 @@ aes_init_state(const krb5_keyblock *key, krb5_keyusage usage, return 0; } +static void +aes_key_cleanup(krb5_key key) +{ + zapfree(key->cache, sizeof(struct aes_key_info_cache)); +} + const struct krb5_enc_provider krb5int_enc_aes128 = { 16, 16, 16, @@ -271,6 +303,7 @@ const struct krb5_enc_provider krb5int_enc_aes128 = { krb5int_aes_make_key, aes_init_state, krb5int_default_free_state, + aes_key_cleanup }; const struct krb5_enc_provider krb5int_enc_aes256 = { @@ -281,5 +314,6 @@ const struct krb5_enc_provider krb5int_enc_aes256 = { NULL, krb5int_aes_make_key, aes_init_state, - krb5int_default_free_state + krb5int_default_free_state, + aes_key_cleanup }; diff --git a/src/lib/crypto/krb/key.c b/src/lib/crypto/krb/key.c index 2fabd3a74..a64c7a00c 100644 --- a/src/lib/crypto/krb/key.c +++ b/src/lib/crypto/krb/key.c @@ -26,6 +26,7 @@ */ #include "k5-int.h" +#include "etypes.h" /* * The krb5_key data type wraps an exposed keyblock in an opaque data @@ -52,6 +53,7 @@ krb5_k_create_key(krb5_context context, const krb5_keyblock *key_data, key->refcount = 1; key->derived = NULL; + key->cache = NULL; *out = key; return 0; @@ -72,6 +74,7 @@ void KRB5_CALLCONV krb5_k_free_key(krb5_context context, krb5_key key) { struct derived_key *dk; + const struct krb5_keytypes *ktp; if (key == NULL || --key->refcount > 0) return; @@ -84,6 +87,11 @@ krb5_k_free_key(krb5_context context, krb5_key key) free(dk); } krb5int_c_free_keyblock_contents(context, &key->keyblock); + if (key->cache) { + ktp = find_enctype(key->keyblock.enctype); + if (ktp && ktp->enc->key_cleanup) + ktp->enc->key_cleanup(key); + } free(key); } -- cgit