summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKen Raeburn <raeburn@mit.edu>2009-09-15 06:19:32 +0000
committerKen Raeburn <raeburn@mit.edu>2009-09-15 06:19:32 +0000
commit4ed6cb59d5135ab5d9b15574f4c4923a50fc1dd7 (patch)
tree4a4d4922530056d0f0a94b9a9fe93480d84890e1
parent66f4ea3db7a9c1eb278b035fdae8ebea7636edb9 (diff)
downloadkrb5-4ed6cb59d5135ab5d9b15574f4c4923a50fc1dd7.tar.gz
krb5-4ed6cb59d5135ab5d9b15574f4c4923a50fc1dd7.tar.xz
krb5-4ed6cb59d5135ab5d9b15574f4c4923a50fc1dd7.zip
Test byte ordering determination and unaligned access
git-svn-id: svn://anonsvn.mit.edu/krb5/trunk@22762 dc483132-0cff-0310-8789-dd5450dbe970
-rw-r--r--src/util/support/Makefile.in8
-rw-r--r--src/util/support/t_unal.c46
2 files changed, 52 insertions, 2 deletions
diff --git a/src/util/support/Makefile.in b/src/util/support/Makefile.in
index c890a8c69d..e20984a640 100644
--- a/src/util/support/Makefile.in
+++ b/src/util/support/Makefile.in
@@ -153,13 +153,17 @@ T_K5BUF_OBJS= t_k5buf.o k5buf.o $(PRINTF_ST_OBJ)
t_k5buf: $(T_K5BUF_OBJS)
$(CC_LINK) -o t_k5buf $(T_K5BUF_OBJS)
-TEST_PROGS= t_k5buf
+t_unal: t_unal.o
+ $(CC_LINK) -o t_unal t_unal.o
+
+TEST_PROGS= t_k5buf t_unal
check-unix:: $(TEST_PROGS)
./t_k5buf
+ ./t_unal
clean::
- $(RM) t_k5buf.o t_k5buf
+ $(RM) t_k5buf.o t_k5buf t_unal.o t_unal
@lib_frag@
@libobj_frag@
diff --git a/src/util/support/t_unal.c b/src/util/support/t_unal.c
new file mode 100644
index 0000000000..d9a3cc4b0f
--- /dev/null
+++ b/src/util/support/t_unal.c
@@ -0,0 +1,46 @@
+#undef NDEBUG
+#include <assert.h>
+#include "k5-platform.h"
+
+int main ()
+{
+ /* Test some low-level assumptions the Kerberos code depends
+ on. */
+
+ union {
+ UINT64_TYPE n64;
+ uint32_t n32;
+ uint16_t n16;
+ unsigned char b[9];
+ } u;
+ static unsigned char buf[9] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+
+ assert(load_64_be(buf+1) == 0x0102030405060708LL);
+ assert(load_64_le(buf+1) == 0x0807060504030201LL);
+ assert(load_32_le(buf+2) == 0x05040302);
+ assert(load_32_be(buf+2) == 0x02030405);
+ assert(load_16_be(buf+3) == 0x0304);
+ assert(load_16_le(buf+3) == 0x0403);
+ u.b[0] = 0;
+ assert((store_64_be(0x0102030405060708LL, u.b+1), !memcmp(buf, u.b, 9)));
+ u.b[1] = 9;
+ assert((store_64_le(0x0807060504030201LL, u.b+1), !memcmp(buf, u.b, 9)));
+ u.b[2] = 10;
+ assert((store_32_be(0x02030405, u.b+2), !memcmp(buf, u.b, 9)));
+ u.b[3] = 11;
+ assert((store_32_le(0x05040302, u.b+2), !memcmp(buf, u.b, 9)));
+ u.b[4] = 12;
+ assert((store_16_be(0x0304, u.b+3), !memcmp(buf, u.b, 9)));
+ u.b[4] = 13;
+ assert((store_16_le(0x0403, u.b+3), !memcmp(buf, u.b, 9)));
+ /* Verify that load_*_n properly does native format. Assume
+ the unaligned thing is okay. */
+ u.n64 = 0x090a0b0c0d0e0f00LL;
+ assert(load_64_n((unsigned char *) &u.n64) == 0x090a0b0c0d0e0f00LL);
+ u.n32 = 0x06070809;
+ assert(load_32_n((unsigned char *) &u.n32) == 0x06070809);
+ u.n16 = 0x0a0b;
+ assert(load_16_n((unsigned char *) &u.n16) == 0x0a0b);
+
+ return 0;
+}