From c6557e7f2b6ae76a44653d38f835174074c42e05 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 1 Aug 2008 20:42:05 +0200 Subject: [S390] move include/asm-s390 to arch/s390/include/asm Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/byteorder.h | 125 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 arch/s390/include/asm/byteorder.h (limited to 'arch/s390/include/asm/byteorder.h') diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h new file mode 100644 index 00000000000..1fe2492baa8 --- /dev/null +++ b/arch/s390/include/asm/byteorder.h @@ -0,0 +1,125 @@ +#ifndef _S390_BYTEORDER_H +#define _S390_BYTEORDER_H + +/* + * include/asm-s390/byteorder.h + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include + +#ifdef __GNUC__ + +#ifdef __s390x__ +static inline __u64 ___arch__swab64p(const __u64 *x) +{ + __u64 result; + + asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); + return result; +} + +static inline __u64 ___arch__swab64(__u64 x) +{ + __u64 result; + + asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); + return result; +} + +static inline void ___arch__swab64s(__u64 *x) +{ + *x = ___arch__swab64p(x); +} +#endif /* __s390x__ */ + +static inline __u32 ___arch__swab32p(const __u32 *x) +{ + __u32 result; + + asm volatile( +#ifndef __s390x__ + " icm %0,8,3(%1)\n" + " icm %0,4,2(%1)\n" + " icm %0,2,1(%1)\n" + " ic %0,0(%1)" + : "=&d" (result) : "a" (x), "m" (*x) : "cc"); +#else /* __s390x__ */ + " lrv %0,%1" + : "=d" (result) : "m" (*x)); +#endif /* __s390x__ */ + return result; +} + +static inline __u32 ___arch__swab32(__u32 x) +{ +#ifndef __s390x__ + return ___arch__swab32p(&x); +#else /* __s390x__ */ + __u32 result; + + asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); + return result; +#endif /* __s390x__ */ +} + +static __inline__ void ___arch__swab32s(__u32 *x) +{ + *x = ___arch__swab32p(x); +} + +static __inline__ __u16 ___arch__swab16p(const __u16 *x) +{ + __u16 result; + + asm volatile( +#ifndef __s390x__ + " icm %0,2,1(%1)\n" + " ic %0,0(%1)\n" + : "=&d" (result) : "a" (x), "m" (*x) : "cc"); +#else /* __s390x__ */ + " lrvh %0,%1" + : "=d" (result) : "m" (*x)); +#endif /* __s390x__ */ + return result; +} + +static __inline__ __u16 ___arch__swab16(__u16 x) +{ + return ___arch__swab16p(&x); +} + +static __inline__ void ___arch__swab16s(__u16 *x) +{ + *x = ___arch__swab16p(x); +} + +#ifdef __s390x__ +#define __arch__swab64(x) ___arch__swab64(x) +#define __arch__swab64p(x) ___arch__swab64p(x) +#define __arch__swab64s(x) ___arch__swab64s(x) +#endif /* __s390x__ */ +#define __arch__swab32(x) ___arch__swab32(x) +#define __arch__swab16(x) ___arch__swab16(x) +#define __arch__swab32p(x) ___arch__swab32p(x) +#define __arch__swab16p(x) ___arch__swab16p(x) +#define __arch__swab32s(x) ___arch__swab32s(x) +#define __arch__swab16s(x) ___arch__swab16s(x) + +#ifndef __s390x__ +#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif +#else /* __s390x__ */ +#define __BYTEORDER_HAS_U64__ +#endif /* __s390x__ */ + +#endif /* __GNUC__ */ + +#include + +#endif /* _S390_BYTEORDER_H */ -- cgit