aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/byteorder
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-01-17 12:11:02 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-01-17 12:11:02 +0000
commit201c511f5a11b2317070146dd5f83f1832fc45f5 (patch)
tree93ec73e84a481d125f8e95f7cc68d90a2aaa685d /xen/include/xen/byteorder
parentd74f5aa06311f5bb5ee39d0fbd32707336cf61d0 (diff)
downloadxen-201c511f5a11b2317070146dd5f83f1832fc45f5.tar.gz
xen-201c511f5a11b2317070146dd5f83f1832fc45f5.tar.bz2
xen-201c511f5a11b2317070146dd5f83f1832fc45f5.zip
[XEN] Include byteorder functions from Linux.
Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/include/xen/byteorder')
-rw-r--r--xen/include/xen/byteorder/big_endian.h106
-rw-r--r--xen/include/xen/byteorder/generic.h68
-rw-r--r--xen/include/xen/byteorder/little_endian.h106
-rw-r--r--xen/include/xen/byteorder/swab.h185
4 files changed, 465 insertions, 0 deletions
diff --git a/xen/include/xen/byteorder/big_endian.h b/xen/include/xen/byteorder/big_endian.h
new file mode 100644
index 0000000000..8a815534e4
--- /dev/null
+++ b/xen/include/xen/byteorder/big_endian.h
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_BIG_ENDIAN_H__
+#define __XEN_BYTEORDER_BIG_ENDIAN_H__
+
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#endif
+#ifndef __BIG_ENDIAN_BITFIELD
+#define __BIG_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)(__u32)(x))
+#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
+#define __constant_htons(x) ((__force __be16)(__u16)(x))
+#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
+#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
+#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
+#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
+#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
+#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
+#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)__swab64p(p);
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)__swab32p(p);
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)__swab16p(p);
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)*p;
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)*p;
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)*p;
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return (__force __u16)*p;
+}
+#define __cpu_to_le64s(x) __swab64s((x))
+#define __le64_to_cpus(x) __swab64s((x))
+#define __cpu_to_le32s(x) __swab32s((x))
+#define __le32_to_cpus(x) __swab32s((x))
+#define __cpu_to_le16s(x) __swab16s((x))
+#define __le16_to_cpus(x) __swab16s((x))
+#define __cpu_to_be64s(x) do {} while (0)
+#define __be64_to_cpus(x) do {} while (0)
+#define __cpu_to_be32s(x) do {} while (0)
+#define __be32_to_cpus(x) do {} while (0)
+#define __cpu_to_be16s(x) do {} while (0)
+#define __be16_to_cpus(x) do {} while (0)
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_BIG_ENDIAN_H__ */
diff --git a/xen/include/xen/byteorder/generic.h b/xen/include/xen/byteorder/generic.h
new file mode 100644
index 0000000000..8a0006b755
--- /dev/null
+++ b/xen/include/xen/byteorder/generic.h
@@ -0,0 +1,68 @@
+#ifndef __XEN_BYTEORDER_GENERIC_H__
+#define __XEN_BYTEORDER_GENERIC_H__
+
+/*
+ * Generic Byte-reordering support
+ *
+ * The "... p" macros, like le64_to_cpup, can be used with pointers
+ * to unaligned data, but there will be a performance penalty on
+ * some architectures. Use get_unaligned for unaligned data.
+ *
+ * The following macros are to be defined by <asm/byteorder.h>:
+ *
+ * Conversion of XX-bit integers (16- 32- or 64-)
+ * between native CPU format and little/big endian format
+ * 64-bit stuff only defined for proper architectures
+ * cpu_to_[bl]eXX(__uXX x)
+ * [bl]eXX_to_cpu(__uXX x)
+ *
+ * The same, but takes a pointer to the value to convert
+ * cpu_to_[bl]eXXp(__uXX x)
+ * [bl]eXX_to_cpup(__uXX x)
+ *
+ * The same, but change in situ
+ * cpu_to_[bl]eXXs(__uXX x)
+ * [bl]eXX_to_cpus(__uXX x)
+ *
+ * See asm-foo/byteorder.h for examples of how to provide
+ * architecture-optimized versions
+ */
+
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+
+#endif /* __XEN_BYTEORDER_GENERIC_H__ */
diff --git a/xen/include/xen/byteorder/little_endian.h b/xen/include/xen/byteorder/little_endian.h
new file mode 100644
index 0000000000..24ac42701e
--- /dev/null
+++ b/xen/include/xen/byteorder/little_endian.h
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+#define __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
+#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
+#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
+#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
+#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)*p;
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)*p;
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)*p;
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return (__force __u16)*p;
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)__swab64p(p);
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)__swab32p(p);
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)__swab16p(p);
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+#define __cpu_to_le64s(x) do {} while (0)
+#define __le64_to_cpus(x) do {} while (0)
+#define __cpu_to_le32s(x) do {} while (0)
+#define __le32_to_cpus(x) do {} while (0)
+#define __cpu_to_le16s(x) do {} while (0)
+#define __le16_to_cpus(x) do {} while (0)
+#define __cpu_to_be64s(x) __swab64s((x))
+#define __be64_to_cpus(x) __swab64s((x))
+#define __cpu_to_be32s(x) __swab32s((x))
+#define __be32_to_cpus(x) __swab32s((x))
+#define __cpu_to_be16s(x) __swab16s((x))
+#define __be16_to_cpus(x) __swab16s((x))
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_LITTLE_ENDIAN_H__ */
diff --git a/xen/include/xen/byteorder/swab.h b/xen/include/xen/byteorder/swab.h
new file mode 100644
index 0000000000..0193ff83e7
--- /dev/null
+++ b/xen/include/xen/byteorder/swab.h
@@ -0,0 +1,185 @@
+#ifndef __XEN_BYTEORDER_SWAB_H__
+#define __XEN_BYTEORDER_SWAB_H__
+
+/*
+ * Byte-swapping, independently from CPU endianness
+ * swabXX[ps]?(foo)
+ *
+ * Francois-Rene Rideau <fare@tunes.org> 19971205
+ * separated swab functions from cpu_to_XX,
+ * to clean up support for bizarre-endian architectures.
+ */
+
+#include <xen/compiler.h>
+
+/* casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define ___swab16(x) \
+({ \
+ __u16 __x = (x); \
+ ((__u16)( \
+ (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
+})
+
+#define ___swab32(x) \
+({ \
+ __u32 __x = (x); \
+ ((__u32)( \
+ (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
+})
+
+#define ___swab64(x) \
+({ \
+ __u64 __x = (x); \
+ ((__u64)( \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
+})
+
+#define ___constant_swab16(x) \
+ ((__u16)( \
+ (((__u16)(x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(x) & (__u16)0xff00U) >> 8) ))
+#define ___constant_swab32(x) \
+ ((__u32)( \
+ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
+#define ___constant_swab64(x) \
+ ((__u64)( \
+ (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
+
+/*
+ * provide defaults when no architecture-specific optimization is detected
+ */
+#ifndef __arch__swab16
+# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
+#endif
+#ifndef __arch__swab32
+# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+#endif
+#ifndef __arch__swab64
+# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+#endif
+
+#ifndef __arch__swab16p
+# define __arch__swab16p(x) __arch__swab16(*(x))
+#endif
+#ifndef __arch__swab32p
+# define __arch__swab32p(x) __arch__swab32(*(x))
+#endif
+#ifndef __arch__swab64p
+# define __arch__swab64p(x) __arch__swab64(*(x))
+#endif
+
+#ifndef __arch__swab16s
+# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
+#endif
+#ifndef __arch__swab32s
+# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
+#endif
+#ifndef __arch__swab64s
+# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
+#endif
+
+
+/*
+ * Allow constant folding
+ */
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+# define __swab16(x) \
+(__builtin_constant_p((__u16)(x)) ? \
+ ___swab16((x)) : \
+ __fswab16((x)))
+# define __swab32(x) \
+(__builtin_constant_p((__u32)(x)) ? \
+ ___swab32((x)) : \
+ __fswab32((x)))
+# define __swab64(x) \
+(__builtin_constant_p((__u64)(x)) ? \
+ ___swab64((x)) : \
+ __fswab64((x)))
+#else
+# define __swab16(x) __fswab16(x)
+# define __swab32(x) __fswab32(x)
+# define __swab64(x) __fswab64(x)
+#endif /* OPTIMIZE */
+
+
+static inline __attribute_const__ __u16 __fswab16(__u16 x)
+{
+ return __arch__swab16(x);
+}
+static inline __u16 __swab16p(const __u16 *x)
+{
+ return __arch__swab16p(x);
+}
+static inline void __swab16s(__u16 *addr)
+{
+ __arch__swab16s(addr);
+}
+
+static inline __attribute_const__ __u32 __fswab32(__u32 x)
+{
+ return __arch__swab32(x);
+}
+static inline __u32 __swab32p(const __u32 *x)
+{
+ return __arch__swab32p(x);
+}
+static inline void __swab32s(__u32 *addr)
+{
+ __arch__swab32s(addr);
+}
+
+#ifdef __BYTEORDER_HAS_U64__
+static inline __attribute_const__ __u64 __fswab64(__u64 x)
+{
+# ifdef __SWAB_64_THRU_32__
+ __u32 h = x >> 32;
+ __u32 l = x & ((1ULL<<32)-1);
+ return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
+# else
+ return __arch__swab64(x);
+# endif
+}
+static inline __u64 __swab64p(const __u64 *x)
+{
+ return __arch__swab64p(x);
+}
+static inline void __swab64s(__u64 *addr)
+{
+ __arch__swab64s(addr);
+}
+#endif /* __BYTEORDER_HAS_U64__ */
+
+#define swab16 __swab16
+#define swab32 __swab32
+#define swab64 __swab64
+#define swab16p __swab16p
+#define swab32p __swab32p
+#define swab64p __swab64p
+#define swab16s __swab16s
+#define swab32s __swab32s
+#define swab64s __swab64s
+
+#endif /* __XEN_BYTEORDER_SWAB_H__ */