summaryrefslogtreecommitdiffstats
path: root/src/lib/gssapi/krb5/util_crypt.c
diff options
context:
space:
mode:
authorSam Hartman <hartmans@mit.edu>2009-01-03 23:19:42 +0000
committerSam Hartman <hartmans@mit.edu>2009-01-03 23:19:42 +0000
commit0ba5ccd7bb3ea15e44a87f84ca6feed8890f657d (patch)
tree2049c9c2cb135fe36b14c0a171711259258d18ec /src/lib/gssapi/krb5/util_crypt.c
parentff0a6514c9f4230938c29922d69cbd4e83691adf (diff)
downloadkrb5-0ba5ccd7bb3ea15e44a87f84ca6feed8890f657d.tar.gz
krb5-0ba5ccd7bb3ea15e44a87f84ca6feed8890f657d.tar.xz
krb5-0ba5ccd7bb3ea15e44a87f84ca6feed8890f657d.zip
Merge mskrb-integ onto trunk
The mskrb-integ branch includes support for the following projects: Projects/Aliases * Projects/PAC and principal APIs * Projects/AEAD encryption API * Projects/GSSAPI DCE * Projects/RFC 3244 In addition, it includes support for enctype negotiation, and a variety of GSS-API extensions. In the KDC it includes support for protocol transition, constrained delegation and a new authorization data interface. The old authorization data interface is also supported. This commit merges the mskrb-integ branch on to the trunk. Additional review and testing is required. Merge commit 'mskrb-integ' into trunk ticket: new status: open git-svn-id: svn://anonsvn.mit.edu/krb5/trunk@21690 dc483132-0cff-0310-8789-dd5450dbe970
Diffstat (limited to 'src/lib/gssapi/krb5/util_crypt.c')
-rw-r--r--src/lib/gssapi/krb5/util_crypt.c713
1 files changed, 696 insertions, 17 deletions
diff --git a/src/lib/gssapi/krb5/util_crypt.c b/src/lib/gssapi/krb5/util_crypt.c
index a0d0747e6b..d718ae0b18 100644
--- a/src/lib/gssapi/krb5/util_crypt.c
+++ b/src/lib/gssapi/krb5/util_crypt.c
@@ -1,6 +1,6 @@
/* -*- mode: c; indent-tabs-mode: nil -*- */
/*
- * Copyright2001 by the Massachusetts Institute of Technology.
+ * Copyright 2001, 2008 by the Massachusetts Institute of Technology.
* Copyright 1993 by OpenVision Technologies, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
@@ -54,6 +54,85 @@
#include <memory.h>
#endif
+const char const kg_arcfour_l40[] = "fortybits";
+
+krb5_error_code
+kg_setup_keys(krb5_context context,
+ krb5_gss_ctx_id_rec *ctx,
+ krb5_keyblock *subkey,
+ krb5_cksumtype *cksumtype)
+{
+ krb5_error_code code;
+ unsigned int i;
+ krb5int_access kaccess;
+
+ assert(ctx != NULL);
+ assert(subkey != NULL);
+
+ *cksumtype = 0;
+ ctx->proto = 0;
+
+ code = krb5int_accessor(&kaccess, KRB5INT_ACCESS_VERSION);
+ if (code != 0)
+ return code;
+
+ if (ctx->enc != NULL) {
+ krb5_free_keyblock(context, ctx->enc);
+ ctx->enc = NULL;
+ }
+ code = krb5_copy_keyblock(context, subkey, &ctx->enc);
+ if (code != 0)
+ return code;
+
+ if (ctx->seq != NULL) {
+ krb5_free_keyblock(context, ctx->seq);
+ ctx->seq = NULL;
+ }
+ code = krb5_copy_keyblock(context, subkey, &ctx->seq);
+ if (code != 0)
+ return code;
+
+ switch (subkey->enctype) {
+ case ENCTYPE_DES_CBC_MD5:
+ case ENCTYPE_DES_CBC_MD4:
+ case ENCTYPE_DES_CBC_CRC:
+ ctx->enc->enctype = ENCTYPE_DES_CBC_RAW;
+ ctx->seq->enctype = ENCTYPE_DES_CBC_RAW;
+ ctx->signalg = SGN_ALG_DES_MAC_MD5;
+ ctx->cksum_size = 8;
+ ctx->sealalg = SEAL_ALG_DES;
+
+ for (i = 0; i < ctx->enc->length; i++)
+ /*SUPPRESS 113*/
+ ctx->enc->contents[i] ^= 0xF0;
+ break;
+ case ENCTYPE_DES3_CBC_SHA1:
+ ctx->enc->enctype = ENCTYPE_DES3_CBC_RAW;
+ ctx->seq->enctype = ENCTYPE_DES3_CBC_RAW;
+ ctx->signalg = SGN_ALG_HMAC_SHA1_DES3_KD;
+ ctx->cksum_size = 20;
+ ctx->sealalg = SEAL_ALG_DES3KD;
+ break;
+ case ENCTYPE_ARCFOUR_HMAC:
+ case ENCTYPE_ARCFOUR_HMAC_EXP:
+ ctx->signalg = SGN_ALG_HMAC_MD5;
+ ctx->cksum_size = 8;
+ ctx->sealalg = SEAL_ALG_MICROSOFT_RC4;
+ break;
+ default:
+ ctx->signalg = -1;
+ ctx->sealalg = -1;
+ ctx->proto = 1;
+
+ code = (*kaccess.krb5int_c_mandatory_cksumtype)(context, subkey->enctype,
+ cksumtype);
+ if (code != 0)
+ return code;
+ }
+
+ return 0;
+}
+
int
kg_confounder_size(context, key)
krb5_context context;
@@ -62,7 +141,8 @@ kg_confounder_size(context, key)
krb5_error_code code;
size_t blocksize;
/* We special case rc4*/
- if (key->enctype == ENCTYPE_ARCFOUR_HMAC)
+ if (key->enctype == ENCTYPE_ARCFOUR_HMAC ||
+ key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP)
return 8;
code = krb5_c_block_size(context, key->enctype, &blocksize);
if (code)
@@ -77,16 +157,15 @@ kg_make_confounder(context, key, buf)
krb5_keyblock *key;
unsigned char *buf;
{
- krb5_error_code code;
- size_t blocksize;
+ int confsize;
krb5_data lrandom;
- code = krb5_c_block_size(context, key->enctype, &blocksize);
- if (code)
- return(code);
+ confsize = kg_confounder_size(context, key);
+ if (confsize < 0)
+ return KRB5_BAD_MSIZE;
- lrandom.length = blocksize;
- lrandom.data = buf;
+ lrandom.length = confsize;
+ lrandom.data = (char *)buf;
return(krb5_c_random_make_octets(context, &lrandom));
}
@@ -122,7 +201,7 @@ kg_encrypt(context, key, usage, iv, in, out, length)
}
inputd.length = length;
- inputd.data = in;
+ inputd.data = (char *)in;
outputd.ciphertext.length = length;
outputd.ciphertext.data = out;
@@ -167,7 +246,7 @@ kg_decrypt(context, key, usage, iv, in, out, length)
inputd.enctype = ENCTYPE_UNKNOWN;
inputd.ciphertext.length = length;
- inputd.ciphertext.data = in;
+ inputd.ciphertext.data = (char *)in;
outputd.length = length;
outputd.data = out;
@@ -188,7 +267,9 @@ kg_arcfour_docrypt (const krb5_keyblock *longterm_key , int ms_usage,
krb5_data input, output;
krb5int_access kaccess;
krb5_keyblock seq_enc_key, usage_key;
- unsigned char t[4];
+ unsigned char t[14];
+ size_t i = 0;
+ int exportable = (longterm_key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP);
usage_key.length = longterm_key->length;
usage_key.contents = malloc(usage_key.length);
@@ -204,18 +285,24 @@ kg_arcfour_docrypt (const krb5_keyblock *longterm_key , int ms_usage,
if (code)
goto cleanup_arcfour;
- t[0] = ms_usage &0xff;
- t[1] = (ms_usage>>8) & 0xff;
- t[2] = (ms_usage>>16) & 0xff;
- t[3] = (ms_usage>>24) & 0xff;
+ if (exportable) {
+ memcpy(t, kg_arcfour_l40, sizeof(kg_arcfour_l40));
+ i += sizeof(kg_arcfour_l40);
+ }
+ t[i++] = ms_usage &0xff;
+ t[i++] = (ms_usage>>8) & 0xff;
+ t[i++] = (ms_usage>>16) & 0xff;
+ t[i++] = (ms_usage>>24) & 0xff;
input.data = (void *) &t;
- input.length = 4;
+ input.length = i;
output.data = (void *) usage_key.contents;
output.length = usage_key.length;
code = (*kaccess.krb5_hmac) (kaccess.md5_hash_provider,
longterm_key, 1, &input, &output);
if (code)
goto cleanup_arcfour;
+ if (exportable)
+ memset(usage_key.contents + 7, 0xab, 9);
input.data = ( void *) kd_data;
input.length = kd_data_len;
@@ -238,3 +325,595 @@ cleanup_arcfour:
free ((void *) seq_enc_key.contents);
return (code);
}
+
+/* AEAD */
+static krb5_error_code
+kg_translate_iov_v1(context, key, iov, iov_count, pkiov, pkiov_count)
+ krb5_context context;
+ const krb5_keyblock *key;
+ gss_iov_buffer_desc *iov;
+ int iov_count;
+ krb5_crypto_iov **pkiov;
+ size_t *pkiov_count;
+{
+ gss_iov_buffer_desc *header;
+ gss_iov_buffer_desc *trailer;
+ int i = 0, j;
+ size_t kiov_count;
+ krb5_crypto_iov *kiov;
+ size_t conf_len;
+
+ *pkiov = NULL;
+ *pkiov_count = 0;
+
+ conf_len = kg_confounder_size(context, (krb5_keyblock *)key);
+
+ header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ assert(header != NULL);
+
+ if (header->buffer.length < conf_len)
+ return KRB5_BAD_MSIZE;
+
+ trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
+ assert(trailer == NULL || trailer->buffer.length == 0);
+
+ kiov_count = 3 + iov_count;
+ kiov = (krb5_crypto_iov *)malloc(kiov_count * sizeof(krb5_crypto_iov));
+ if (kiov == NULL)
+ return ENOMEM;
+
+ /* For pre-CFX (raw enctypes) there is no krb5 header */
+ kiov[i].flags = KRB5_CRYPTO_TYPE_HEADER;
+ kiov[i].data.length = 0;
+ kiov[i].data.data = NULL;
+ i++;
+
+ /* For pre-CFX, the confounder is at the end of the GSS header */
+ kiov[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ kiov[i].data.length = conf_len;
+ kiov[i].data.data = (char *)header->buffer.value + header->buffer.length - conf_len;
+ i++;
+
+ for (j = 0; j < iov_count; j++) {
+ kiov[i].flags = kg_translate_flag_iov(iov[j].type);
+ if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY)
+ continue;
+
+ kiov[i].data.length = iov[j].buffer.length;
+ kiov[i].data.data = (char *)iov[j].buffer.value;
+ i++;
+ }
+
+ kiov[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
+ kiov[i].data.length = 0;
+ kiov[i].data.data = NULL;
+ i++;
+
+ *pkiov = kiov;
+ *pkiov_count = i;
+
+ return 0;
+}
+
+static krb5_error_code
+kg_translate_iov_v3(context, dce_style, ec, rrc, key, iov, iov_count, pkiov, pkiov_count)
+ krb5_context context;
+ int dce_style; /* DCE_STYLE indicates actual RRC is EC + RRC */
+ size_t ec; /* Extra rotate count for DCE_STYLE, pad length otherwise */
+ size_t rrc; /* Rotate count */
+ const krb5_keyblock *key;
+ gss_iov_buffer_desc *iov;
+ int iov_count;
+ krb5_crypto_iov **pkiov;
+ size_t *pkiov_count;
+{
+ gss_iov_buffer_t header;
+ gss_iov_buffer_t trailer;
+ int i = 0, j;
+ size_t kiov_count;
+ krb5_crypto_iov *kiov;
+ unsigned int k5_headerlen = 0, k5_trailerlen = 0;
+ size_t gss_headerlen, gss_trailerlen;
+ krb5_error_code code;
+
+ *pkiov = NULL;
+ *pkiov_count = 0;
+
+ header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ assert(header != NULL);
+
+ trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
+ assert(trailer == NULL || rrc == 0);
+
+ code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
+ if (code != 0)
+ return code;
+
+ code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_TRAILER, &k5_trailerlen);
+ if (code != 0)
+ return code;
+
+ /* Check header and trailer sizes */
+ gss_headerlen = 16 /* GSS-Header */ + k5_headerlen; /* Kerb-Header */
+ gss_trailerlen = ec + 16 /* E(GSS-Header) */ + k5_trailerlen; /* Kerb-Trailer */
+
+ /* If we're caller without a trailer, we must rotate by trailer length */
+ if (trailer == NULL) {
+ size_t actual_rrc = rrc;
+
+ if (dce_style)
+ actual_rrc += ec; /* compensate for Windows bug */
+
+ if (actual_rrc != gss_trailerlen)
+ return KRB5_BAD_MSIZE;
+
+ gss_headerlen += gss_trailerlen;
+ gss_trailerlen = 0;
+ } else {
+ if (trailer->buffer.length != gss_trailerlen)
+ return KRB5_BAD_MSIZE;
+ }
+
+ if (header->buffer.length != gss_headerlen)
+ return KRB5_BAD_MSIZE;
+
+ kiov_count = 3 + iov_count;
+ kiov = (krb5_crypto_iov *)malloc(kiov_count * sizeof(krb5_crypto_iov));
+ if (kiov == NULL)
+ return ENOMEM;
+
+ /*
+ * The krb5 header is located at the end of the GSS header.
+ */
+ kiov[i].flags = KRB5_CRYPTO_TYPE_HEADER;
+ kiov[i].data.length = k5_headerlen;
+ kiov[i].data.data = (char *)header->buffer.value + header->buffer.length - k5_headerlen;
+ i++;
+
+ for (j = 0; j < iov_count; j++) {
+ kiov[i].flags = kg_translate_flag_iov(iov[j].type);
+ if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY)
+ continue;
+
+ kiov[i].data.length = iov[j].buffer.length;
+ kiov[i].data.data = (char *)iov[j].buffer.value;
+ i++;
+ }
+
+ /*
+ * The EC and encrypted GSS header are placed in the trailer, which may
+ * be rotated directly after the plaintext header if no trailer buffer
+ * is provided.
+ */
+ kiov[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ kiov[i].data.length = ec + 16; /* E(Header) */
+ if (trailer == NULL)
+ kiov[i].data.data = (char *)header->buffer.value + 16;
+ else
+ kiov[i].data.data = (char *)trailer->buffer.value;
+ i++;
+
+ /*
+ * The krb5 trailer is placed after the encrypted copy of the
+ * krb5 header (which may be in the GSS header or trailer).
+ */
+ kiov[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
+ kiov[i].data.length = k5_trailerlen;
+ kiov[i].data.data = kiov[i - 1].data.data + ec + 16; /* E(Header) */
+ i++;
+
+ *pkiov = kiov;
+ *pkiov_count = i;
+
+ return 0;
+}
+
+static krb5_error_code
+kg_translate_iov(context, proto, dce_style, ec, rrc, key, iov, iov_count, pkiov, pkiov_count)
+ krb5_context context;
+ int proto; /* 1 if CFX, 0 for pre-CFX */
+ int dce_style;
+ size_t ec;
+ size_t rrc;
+ const krb5_keyblock *key;
+ gss_iov_buffer_desc *iov;
+ int iov_count;
+ krb5_crypto_iov **pkiov;
+ size_t *pkiov_count;
+{
+ return proto ?
+ kg_translate_iov_v3(context, dce_style, ec, rrc, key, iov, iov_count, pkiov, pkiov_count) :
+ kg_translate_iov_v1(context, key, iov, iov_count, pkiov, pkiov_count);
+}
+
+krb5_error_code
+kg_encrypt_iov(context, proto, dce_style, ec, rrc, key, usage, iv, iov, iov_count)
+ krb5_context context;
+ int proto;
+ int dce_style;
+ size_t ec;
+ size_t rrc;
+ krb5_keyblock *key;
+ int usage;
+ krb5_pointer iv;
+ gss_iov_buffer_desc *iov;
+ int iov_count;
+{
+ krb5_error_code code;
+ size_t blocksize;
+ krb5_data ivd, *pivd;
+ size_t kiov_count;
+ krb5_crypto_iov *kiov;
+
+ if (iv) {
+ code = krb5_c_block_size(context, key->enctype, &blocksize);
+ if (code)
+ return(code);
+
+ ivd.length = blocksize;
+ ivd.data = malloc(ivd.length);
+ if (ivd.data == NULL)
+ return ENOMEM;
+ memcpy(ivd.data, iv, ivd.length);
+ pivd = &ivd;
+ } else {
+ pivd = NULL;
+ }
+
+ code = kg_translate_iov(context, proto, dce_style, ec, rrc, key,
+ iov, iov_count, &kiov, &kiov_count);
+ if (code == 0) {
+ code = krb5_c_encrypt_iov(context, key, usage, pivd, kiov, kiov_count);
+ free(kiov);
+ }
+
+ if (pivd != NULL)
+ free(pivd->data);
+
+ return code;
+}
+
+/* length is the length of the cleartext. */
+
+krb5_error_code
+kg_decrypt_iov(context, proto, dce_style, ec, rrc, key, usage, iv, iov, iov_count)
+ krb5_context context;
+ int proto;
+ int dce_style;
+ size_t ec;
+ size_t rrc;
+ krb5_keyblock *key;
+ int usage;
+ krb5_pointer iv;
+ gss_iov_buffer_desc *iov;
+ int iov_count;
+{
+ krb5_error_code code;
+ size_t blocksize;
+ krb5_data ivd, *pivd;
+ size_t kiov_count;
+ krb5_crypto_iov *kiov;
+
+ if (iv) {
+ code = krb5_c_block_size(context, key->enctype, &blocksize);
+ if (code)
+ return(code);
+
+ ivd.length = blocksize;
+ ivd.data = malloc(ivd.length);
+ if (ivd.data == NULL)
+ return ENOMEM;
+ memcpy(ivd.data, iv, ivd.length);
+ pivd = &ivd;
+ } else {
+ pivd = NULL;
+ }
+
+ code = kg_translate_iov(context, proto, dce_style, ec, rrc, key,
+ iov, iov_count, &kiov, &kiov_count);
+ if (code == 0) {
+ code = krb5_c_decrypt_iov(context, key, usage, pivd, kiov, kiov_count);
+ free(kiov);
+ }
+
+ if (pivd != NULL)
+ free(pivd->data);
+
+ return code;
+}
+
+krb5_error_code
+kg_arcfour_docrypt_iov (krb5_context context,
+ const krb5_keyblock *longterm_key , int ms_usage,
+ const unsigned char *kd_data, size_t kd_data_len,
+ gss_iov_buffer_desc *iov, int iov_count)
+{
+ krb5_error_code code;
+ krb5_data input, output;
+ krb5int_access kaccess;
+ krb5_keyblock seq_enc_key, usage_key;
+ unsigned char t[14];
+ size_t i = 0;
+ int exportable = (longterm_key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP);
+ krb5_crypto_iov *kiov = NULL;
+ size_t kiov_count = 0;
+
+ usage_key.length = longterm_key->length;
+ usage_key.contents = malloc(usage_key.length);
+ if (usage_key.contents == NULL)
+ return (ENOMEM);
+ seq_enc_key.length = longterm_key->length;
+ seq_enc_key.contents = malloc(seq_enc_key.length);
+ if (seq_enc_key.contents == NULL) {
+ free ((void *) usage_key.contents);
+ return (ENOMEM);
+ }
+ code = krb5int_accessor (&kaccess, KRB5INT_ACCESS_VERSION);
+ if (code)
+ goto cleanup_arcfour;
+
+ if (exportable) {
+ memcpy(t, kg_arcfour_l40, sizeof(kg_arcfour_l40));
+ i += sizeof(kg_arcfour_l40);
+ }
+ t[i++] = ms_usage &0xff;
+ t[i++] = (ms_usage>>8) & 0xff;
+ t[i++] = (ms_usage>>16) & 0xff;
+ t[i++] = (ms_usage>>24) & 0xff;
+ input.data = (void *) &t;
+ input.length = i;
+ output.data = (void *) usage_key.contents;
+ output.length = usage_key.length;
+ code = (*kaccess.krb5_hmac) (kaccess.md5_hash_provider,
+ longterm_key, 1, &input, &output);
+ if (code)
+ goto cleanup_arcfour;
+ if (exportable)
+ memset(usage_key.contents + 7, 0xab, 9);
+
+ input.data = ( void *) kd_data;
+ input.length = kd_data_len;
+ output.data = (void *) seq_enc_key.contents;
+ code = (*kaccess.krb5_hmac) (kaccess.md5_hash_provider,
+ &usage_key, 1, &input, &output);
+ if (code)
+ goto cleanup_arcfour;
+
+ code = kg_translate_iov(context, 0 /* proto */, 0 /* dce_style */,
+ 0 /* ec */, 0 /* rrc */, longterm_key,
+ iov, iov_count, &kiov, &kiov_count);
+ if (code)
+ goto cleanup_arcfour;
+
+ code = ((*kaccess.arcfour_enc_provider->encrypt_iov)(
+ &seq_enc_key, 0,
+ kiov, kiov_count));
+cleanup_arcfour:
+ memset ((void *) seq_enc_key.contents, 0, seq_enc_key.length);
+ memset ((void *) usage_key.contents, 0, usage_key.length);
+ free ((void *) usage_key.contents);
+ free ((void *) seq_enc_key.contents);
+ if (kiov != NULL)
+ free(kiov);
+ return (code);
+}
+
+krb5_cryptotype
+kg_translate_flag_iov(OM_uint32 type)
+{
+ krb5_cryptotype ktype;
+
+ switch (GSS_IOV_BUFFER_TYPE(type)) {
+ case GSS_IOV_BUFFER_TYPE_DATA:
+ case GSS_IOV_BUFFER_TYPE_PADDING:
+ ktype = KRB5_CRYPTO_TYPE_DATA;
+ break;
+ case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
+ ktype = KRB5_CRYPTO_TYPE_SIGN_ONLY;
+ break;
+ default:
+ ktype = KRB5_CRYPTO_TYPE_EMPTY;
+ break;
+ }
+
+ return ktype;
+}
+
+gss_iov_buffer_t
+kg_locate_iov(gss_iov_buffer_desc *iov,
+ int iov_count,
+ OM_uint32 type)
+{
+ int i;
+ gss_iov_buffer_t p = GSS_C_NO_IOV_BUFFER;
+
+ if (iov == GSS_C_NO_IOV_BUFFER)
+ return GSS_C_NO_IOV_BUFFER;
+
+ for (i = iov_count - 1; i >= 0; i--) {
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == type) {
+ if (p == GSS_C_NO_IOV_BUFFER)
+ p = &iov[i];
+ else
+ return GSS_C_NO_IOV_BUFFER;
+ }
+ }
+
+ return p;
+}
+
+void
+kg_iov_msglen(gss_iov_buffer_desc *iov,
+ int iov_count,
+ size_t *data_length_p,
+ size_t *assoc_data_length_p)
+{
+ int i;
+ size_t data_length = 0, assoc_data_length = 0;
+
+ assert(iov != GSS_C_NO_IOV_BUFFER);
+
+ *data_length_p = *assoc_data_length_p = 0;
+
+ for (i = 0; i < iov_count; i++) {
+ OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[i].type);
+
+ if (type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
+ assoc_data_length += iov[i].buffer.length;
+
+ if (type == GSS_IOV_BUFFER_TYPE_DATA ||
+ type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
+ data_length += iov[i].buffer.length;
+ }
+
+ *data_length_p = data_length;
+ *assoc_data_length_p = assoc_data_length;
+}
+
+void
+kg_release_iov(gss_iov_buffer_desc *iov, int iov_count)
+{
+ int i;
+ OM_uint32 min_stat;
+
+ assert(iov != GSS_C_NO_IOV_BUFFER);
+
+ for (i = 0; i < iov_count; i++) {
+ if (iov[i].type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
+ gss_release_buffer(&min_stat, &iov[i].buffer);
+ iov[i].type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
+ }
+ }
+}
+
+OM_uint32
+kg_fixup_padding_iov(OM_uint32 *minor_status,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ gss_iov_buffer_t padding = NULL;
+ gss_iov_buffer_t data = NULL;
+ size_t padlength, relative_padlength;
+ unsigned char *p;
+ OM_uint32 minor;
+
+ data = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_DATA);
+ padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
+
+ if (data == NULL) {
+ *minor_status = 0;
+ return GSS_S_COMPLETE;
+ }
+
+ if (padding == NULL || padding->buffer.length == 0) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+
+ p = (unsigned char *)padding->buffer.value;
+ padlength = p[padding->buffer.length - 1];
+
+ if (data->buffer.length + padding->buffer.length < padlength ||
+ padlength == 0) {
+ *minor_status = (OM_uint32)KRB5_BAD_MSIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ /*
+ * kg_unseal_stream_iov() will place one byte of padding in the
+ * padding buffer; its true value is unknown until after decryption.
+ *
+ * relative_padlength contains the number of bytes to compensate the
+ * padding and data buffers by; it will be zero if the caller manages
+ * the padding length.
+ *
+ * If the caller manages the padding length, then relative_padlength
+ * wil be zero.
+ *
+ * eg. if the buffers are structured as follows:
+ *
+ * +---DATA---+-PAD-+
+ * | ABCDE444 | 4 |
+ * +----------+-----+
+ *
+ * after compensation they would look like:
+ *
+ * +-DATA--+-PAD--+
+ * | ABCDE | NULL |
+ * +-------+------+
+ */
+ relative_padlength = padlength - padding->buffer.length;
+
+ assert(data->buffer.length >= relative_padlength);
+
+ data->buffer.length -= relative_padlength;
+
+ if (padding->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
+ gss_release_buffer(&minor, &padding->buffer);
+ padding->type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
+ }
+
+ padding->buffer.length = 0;
+ padding->buffer.value = NULL;
+
+ return GSS_S_COMPLETE;
+}
+
+int kg_map_toktype(int proto, int toktype)
+{
+ int toktype2;
+
+ if (proto)
+ switch (toktype) {
+ case KG_TOK_SIGN_MSG:
+ toktype2 = KG2_TOK_MIC_MSG;
+ break;
+ case KG_TOK_WRAP_MSG:
+ toktype2 = KG2_TOK_WRAP_MSG;
+ break;
+ case KG_TOK_DEL_CTX:
+ toktype2 = KG2_TOK_DEL_CTX;
+ break;
+ default:
+ toktype2 = toktype;
+ break;
+ }
+ else
+ toktype2 = toktype;
+
+ return toktype2;
+}
+
+krb5_boolean kg_integ_only_iov(gss_iov_buffer_desc *iov, int iov_count)
+{
+ int i;
+ krb5_boolean has_conf_data = FALSE;
+
+ assert(iov != GSS_C_NO_IOV_BUFFER);
+
+ for (i = 0; i < iov_count; i++) {
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA) {
+ has_conf_data = TRUE;
+ break;
+ }
+ }
+
+ return (has_conf_data == FALSE);
+}
+
+krb5_error_code kg_allocate_iov(gss_iov_buffer_t iov, size_t size)
+{
+ assert(iov != GSS_C_NO_IOV_BUFFER);
+ assert(iov->type & GSS_IOV_BUFFER_FLAG_ALLOCATE);
+
+ iov->buffer.length = size;
+ iov->buffer.value = xmalloc(size);
+ if (iov->buffer.value == NULL) {
+ iov->buffer.length = 0;
+ return ENOMEM;
+ }
+
+ iov->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
+
+ return 0;
+}