From: Christian Lamparter Date: Sat, 16 Sep 2023 21:48:13 +0000 (+0200) Subject: linux: import unaligned.h X-Git-Url: https://jxself.org/git/?a=commitdiff_plain;h=c0eb5f0c0ed17249b6c2f361f108b1fd6c85d658;p=carl9170fw.git linux: import unaligned.h we likely need those for the future. Signed-off-by: Christian Lamparter --- diff --git a/carlfw/include/carl9170.h b/carlfw/include/carl9170.h index 6e8a3e1..0a52a67 100644 --- a/carlfw/include/carl9170.h +++ b/carlfw/include/carl9170.h @@ -27,6 +27,7 @@ #include "config.h" #include "types.h" #include "compiler.h" +#include "unaligned.h" #include "fwcmd.h" #include "hw.h" #include "dma.h" diff --git a/include/linux/byteorder_generic.h b/include/linux/byteorder_generic.h new file mode 100644 index 0000000..c9a4c96 --- /dev/null +++ b/include/linux/byteorder_generic.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_GENERIC_H +#define _LINUX_BYTEORDER_GENERIC_H + +/* + * linux/byteorder/generic.h + * Generic Byte-reordering support + * + * The "... p" macros, like le64_to_cpup, can be used with pointers + * to unaligned data, but there will be a performance penalty on + * some architectures. Use get_unaligned for unaligned data. + * + * Francois-Rene Rideau 19970707 + * gathered all the good ideas from all asm-foo/byteorder.h into one file, + * cleaned them up. + * I hope it is compliant with non-GCC compilers. + * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, + * because I wasn't sure it would be ok to put it in types.h + * Upgraded it to 2.1.43 + * Francois-Rene Rideau 19971012 + * Upgraded it to 2.1.57 + * to please Linus T., replaced huge #ifdef's between little/big endian + * by nestedly #include'd files. + * Francois-Rene Rideau 19971205 + * Made it to 2.1.71; now a facelift: + * Put files under include/linux/byteorder/ + * Split swab from generic support. + * + * TODO: + * = Regular kernel maintainers could also replace all these manual + * byteswap macros that remain, disseminated among drivers, + * after some grep or the sources... + * = Linus might want to rename all these macros and files to fit his taste, + * to fit his personal naming scheme. + * = it seems that a few drivers would also appreciate + * nybble swapping support... + * = every architecture could add their byteswap macro in asm/byteorder.h + * see how some architectures already do (i386, alpha, ppc, etc) + * = cpu_to_beXX and beXX_to_cpu might some day need to be well + * distinguished throughout the kernel. This is not the case currently, + * since little endian, big endian, and pdp endian machines needn't it. + * But this might be the case for, say, a port of Linux to 20/21 bit + * architectures (and F21 Linux addict around?). + */ + +/* + * The following macros are to be defined by : + * + * Conversion of long and short int between network and host format + * ntohl(__u32 x) + * ntohs(__u16 x) + * htonl(__u32 x) + * htons(__u16 x) + * It seems that some programs (which? where? or perhaps a standard? POSIX?) + * might like the above to be functions, not macros (why?). + * if that's true, then detect them, and take measures. + * Anyway, the measure is: define only ___ntohl as a macro instead, + * and in a separate file, have + * unsigned long inline ntohl(x){return ___ntohl(x);} + * + * The same for constant arguments + * __constant_ntohl(__u32 x) + * __constant_ntohs(__u16 x) + * __constant_htonl(__u32 x) + * __constant_htons(__u16 x) + * + * Conversion of XX-bit integers (16- 32- or 64-) + * between native CPU format and little/big endian format + * 64-bit stuff only defined for proper architectures + * cpu_to_[bl]eXX(__uXX x) + * [bl]eXX_to_cpu(__uXX x) + * + * The same, but takes a pointer to the value to convert + * cpu_to_[bl]eXXp(__uXX x) + * [bl]eXX_to_cpup(__uXX x) + * + * The same, but change in situ + * cpu_to_[bl]eXXs(__uXX x) + * [bl]eXX_to_cpus(__uXX x) + * + * See asm-foo/byteorder.h for examples of how to provide + * architecture-optimized versions + * + */ + +#define cpu_to_le64 __cpu_to_le64 +#define le64_to_cpu __le64_to_cpu +#define cpu_to_le32 __cpu_to_le32 +#define le32_to_cpu __le32_to_cpu +#define cpu_to_le16 __cpu_to_le16 +#define le16_to_cpu __le16_to_cpu +#define cpu_to_be64 __cpu_to_be64 +#define be64_to_cpu __be64_to_cpu +#define cpu_to_be32 __cpu_to_be32 +#define be32_to_cpu __be32_to_cpu +#define cpu_to_be16 __cpu_to_be16 +#define be16_to_cpu __be16_to_cpu +#define cpu_to_le64p __cpu_to_le64p +#define le64_to_cpup __le64_to_cpup +#define cpu_to_le32p __cpu_to_le32p +#define le32_to_cpup __le32_to_cpup +#define cpu_to_le16p __cpu_to_le16p +#define le16_to_cpup __le16_to_cpup +#define cpu_to_be64p __cpu_to_be64p +#define be64_to_cpup __be64_to_cpup +#define cpu_to_be32p __cpu_to_be32p +#define be32_to_cpup __be32_to_cpup +#define cpu_to_be16p __cpu_to_be16p +#define be16_to_cpup __be16_to_cpup +#define cpu_to_le64s __cpu_to_le64s +#define le64_to_cpus __le64_to_cpus +#define cpu_to_le32s __cpu_to_le32s +#define le32_to_cpus __le32_to_cpus +#define cpu_to_le16s __cpu_to_le16s +#define le16_to_cpus __le16_to_cpus +#define cpu_to_be64s __cpu_to_be64s +#define be64_to_cpus __be64_to_cpus +#define cpu_to_be32s __cpu_to_be32s +#define be32_to_cpus __be32_to_cpus +#define cpu_to_be16s __cpu_to_be16s +#define be16_to_cpus __be16_to_cpus + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ + +#undef ntohl +#undef ntohs +#undef htonl +#undef htons + +#define ___htonl(x) __cpu_to_be32(x) +#define ___htons(x) __cpu_to_be16(x) +#define ___ntohl(x) __be32_to_cpu(x) +#define ___ntohs(x) __be16_to_cpu(x) + +#define htonl(x) ___htonl(x) +#define ntohl(x) ___ntohl(x) +#define htons(x) ___htons(x) +#define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpu(*var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpu(*var) + val); +} + +/* XXX: this stuff can be optimized */ +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpu(*var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpu(*var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpu(*var) + val); +} + +static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) + dst[i] = be32_to_cpu(src[i]); +} + +#endif /* _LINUX_BYTEORDER_GENERIC_H */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index e3390d2..3825c32 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -21,12 +21,17 @@ #define __inline __attribute__((always_inline)) #define __hot __attribute__((hot)) #define __cold __attribute__((cold)) -#define __force __attribute__((force)) +#define __force #define __in_section(s) __attribute__((section("." # s))) #define __visible __attribute__((externally_visible)) +#ifndef __attribute_const__ +#define __attribute_const__ __attribute__((__const__)) +#endif -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#include "swab.h" +#include "little_endian.h" +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) @@ -239,6 +244,7 @@ static inline base type##_get_bits(__##type v, base field) \ } #define __MAKE_OP(size) \ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ + ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ ____MAKE_OP(u##size,u##size,,) ____MAKE_OP(u8,u8,,) __MAKE_OP(16) diff --git a/include/linux/little_endian.h b/include/linux/little_endian.h new file mode 100644 index 0000000..efc6882 --- /dev/null +++ b/include/linux/little_endian.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif +#ifndef __LITTLE_ENDIAN_BITFIELD +#define __LITTLE_ENDIAN_BITFIELD +#endif + +#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) +#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) +#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) +#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) +#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) +#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) +#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) +#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) +#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) +#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) + +static __always_inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)*p; +} +static __always_inline __u64 __le64_to_cpup(const __le64 *p) +{ + return (__force __u64)*p; +} +static __always_inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)*p; +} +static __always_inline __u32 __le32_to_cpup(const __le32 *p) +{ + return (__force __u32)*p; +} +static __always_inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)*p; +} +static __always_inline __u16 __le16_to_cpup(const __le16 *p) +{ + return (__force __u16)*p; +} +static __always_inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)__swab64p(p); +} +static __always_inline __u64 __be64_to_cpup(const __be64 *p) +{ + return __swab64p((__u64 *)p); +} +static __always_inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)__swab32p(p); +} +static __always_inline __u32 __be32_to_cpup(const __be32 *p) +{ + return __swab32p((__u32 *)p); +} +static __always_inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)__swab16p(p); +} +static __always_inline __u16 __be16_to_cpup(const __be16 *p) +{ + return __swab16p((__u16 *)p); +} +#define __cpu_to_le64s(x) do { (void)(x); } while (0) +#define __le64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le32s(x) do { (void)(x); } while (0) +#define __le32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le16s(x) do { (void)(x); } while (0) +#define __le16_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be64s(x) __swab64s((x)) +#define __be64_to_cpus(x) __swab64s((x)) +#define __cpu_to_be32s(x) __swab32s((x)) +#define __be32_to_cpus(x) __swab32s((x)) +#define __cpu_to_be16s(x) __swab16s((x)) +#define __be16_to_cpus(x) __swab16s((x)) + + +#endif /* _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/include/linux/packed_struct.h b/include/linux/packed_struct.h new file mode 100644 index 0000000..f2de73b --- /dev/null +++ b/include/linux/packed_struct.h @@ -0,0 +1,44 @@ +#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H +#define _LINUX_UNALIGNED_PACKED_STRUCT_H + +struct __una_u16 { u16 x; } __packed; +struct __una_u32 { u32 x; } __packed; +struct __una_u64 { u64 x; } __packed; + +static inline u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} + +#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 0000000..9a53899 --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_SWAB_H +#define _UAPI_LINUX_SWAB_H + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define ___constant_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define ___constant_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define ___constant_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define ___constant_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define ___constant_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 __fswab16(__u16 val) +{ +#if defined (__arch_swab16) + return __arch_swab16(val); +#else + return ___constant_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 __fswab32(__u32 val) +{ +#if defined(__arch_swab32) + return __arch_swab32(val); +#else + return ___constant_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 __fswab64(__u64 val) +{ +#if defined (__arch_swab64) + return __arch_swab64(val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); +#else + return ___constant_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 __fswahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#else + return ___constant_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 __fswahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#else + return ___constant_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#ifdef __HAVE_BUILTIN_BSWAP16__ +#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x)) +#else +#define __swab16(x) \ + (__u16)(__builtin_constant_p(x) ? \ + ___constant_swab16(x) : \ + __fswab16(x)) +#endif + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#ifdef __HAVE_BUILTIN_BSWAP32__ +#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x)) +#else +#define __swab32(x) \ + (__u32)(__builtin_constant_p(x) ? \ + ___constant_swab32(x) : \ + __fswab32(x)) +#endif + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#ifdef __HAVE_BUILTIN_BSWAP64__ +#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x)) +#else +#define __swab64(x) \ + (__u64)(__builtin_constant_p(x) ? \ + ___constant_swab64(x) : \ + __fswab64(x)) +#endif + +static __always_inline unsigned long __swab(const unsigned long y) +{ +#if __BITS_PER_LONG == 64 + return __swab64(y); +#else /* __BITS_PER_LONG == 32 */ + return __swab32(y); +#endif +} + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swahw32(x) : \ + __fswahw32(x)) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swahb32(x) : \ + __fswahb32(x)) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static __always_inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static __always_inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static __always_inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static __always_inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static __always_inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + + +#endif /* _UAPI_LINUX_SWAB_H */ diff --git a/include/linux/types.h b/include/linux/types.h index 3f45f0f..82bf905 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -21,6 +21,7 @@ #include #include #include +#include #if BYTE_ORDER == BIG_ENDIAN #error "big endian is not supported by target" @@ -58,4 +59,23 @@ typedef uint16_t __be16; typedef uint32_t __be32; typedef uint64_t __be64; +#define cpu_to_be64 __cpu_to_be64 +#define be64_to_cpu __be64_to_cpu +#define cpu_to_be32 __cpu_to_be32 +#define be32_to_cpu __be32_to_cpu +#define cpu_to_be16 __cpu_to_be16 +#define be16_to_cpu __be16_to_cpu +#define cpu_to_be64p __cpu_to_be64p +#define be64_to_cpup __be64_to_cpup +#define cpu_to_be32p __cpu_to_be32p +#define be32_to_cpup __be32_to_cpup +#define cpu_to_be16p __cpu_to_be16p +#define cpu_to_be64s __cpu_to_be64s +#define be64_to_cpus __be64_to_cpus +#define cpu_to_be32s __cpu_to_be32s +#define be32_to_cpus __be32_to_cpus +#define cpu_to_be16s __cpu_to_be16s +#define be16_to_cpus __be16_to_cpus +#define be16_to_cpup __be16_to_cpup + #endif /* __LINUX_TYPES_H */ diff --git a/include/linux/unaligned.h b/include/linux/unaligned.h new file mode 100644 index 0000000..70d1edd --- /dev/null +++ b/include/linux/unaligned.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_UNALIGNED_H +#define __ASM_GENERIC_UNALIGNED_H + +/* + * This is the most generic implementation of unaligned accesses + * and should work almost anywhere. + */ +#include "swab.h" +#include "little_endian.h" +#include "packed_struct.h" + +#define __get_unaligned_t(type, ptr) ({ \ + const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ + __pptr->x; \ +}) + +#define __put_unaligned_t(type, val, ptr) do { \ + struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ + __pptr->x = (val); \ +} while (0) + +#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) +#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr)) + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpu(__get_unaligned_t(__le16, p)); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpu(__get_unaligned_t(__le32, p)); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpu(__get_unaligned_t(__le64, p)); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_t(__le16, cpu_to_le16(val), p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_t(__le32, cpu_to_le32(val), p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_t(__le64, cpu_to_le64(val), p); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpu(__get_unaligned_t(__be16, p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpu(__get_unaligned_t(__be32, p)); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpu(__get_unaligned_t(__be64, p)); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_t(__be16, cpu_to_be16(val), p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_t(__be32, cpu_to_be32(val), p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_t(__be64, cpu_to_be64(val), p); +} + +static inline u32 __get_unaligned_be24(const u8 *p) +{ + return p[0] << 16 | p[1] << 8 | p[2]; +} + +static inline u32 get_unaligned_be24(const void *p) +{ + return __get_unaligned_be24(p); +} + +static inline u32 __get_unaligned_le24(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16; +} + +static inline u32 get_unaligned_le24(const void *p) +{ + return __get_unaligned_le24(p); +} + +static inline void __put_unaligned_be24(const u32 val, u8 *p) +{ + *p++ = val >> 16; + *p++ = val >> 8; + *p++ = val; +} + +static inline void put_unaligned_be24(const u32 val, void *p) +{ + __put_unaligned_be24(val, p); +} + +static inline void __put_unaligned_le24(const u32 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; + *p++ = val >> 16; +} + +static inline void put_unaligned_le24(const u32 val, void *p) +{ + __put_unaligned_le24(val, p); +} + +static inline void __put_unaligned_be48(const u64 val, u8 *p) +{ + *p++ = val >> 40; + *p++ = val >> 32; + *p++ = val >> 24; + *p++ = val >> 16; + *p++ = val >> 8; + *p++ = val; +} + +static inline void put_unaligned_be48(const u64 val, void *p) +{ + __put_unaligned_be48(val, p); +} + +static inline u64 __get_unaligned_be48(const u8 *p) +{ + return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 | + p[3] << 16 | p[4] << 8 | p[5]; +} + +static inline u64 get_unaligned_be48(const void *p) +{ + return __get_unaligned_be48(p); +} + +#endif /* __ASM_GENERIC_UNALIGNED_H */