X-Git-Url: https://git.cworth.org/git?a=blobdiff_plain;f=thirdparty%2Fsnappy%2Fsnappy-stubs-internal.h;fp=thirdparty%2Fsnappy%2Fsnappy-stubs-internal.h;h=6033cdfb41a10df6424dbb478aaa1e149f7203cc;hb=af0f4f9483fc9aef4d3af914d80d56b560272dcb;hp=021528893357ae5b610788a8763e6b27bc1910fc;hpb=df1a1816c13e6fcdf63a45c973f09f04af318073;p=apitrace diff --git a/thirdparty/snappy/snappy-stubs-internal.h b/thirdparty/snappy/snappy-stubs-internal.h index 0215288..6033cdf 100644 --- a/thirdparty/snappy/snappy-stubs-internal.h +++ b/thirdparty/snappy/snappy-stubs-internal.h @@ -86,10 +86,9 @@ using namespace std; // version (anyone who wants to regenerate it can just do the call // themselves within main()). #define DEFINE_bool(flag_name, default_value, description) \ - bool FLAGS_ ## flag_name = default_value; + bool FLAGS_ ## flag_name = default_value #define DECLARE_bool(flag_name) \ - extern bool FLAGS_ ## flag_name; -#define REGISTER_MODULE_INITIALIZER(name, code) + extern bool FLAGS_ ## flag_name namespace snappy { @@ -179,6 +178,8 @@ class LogMessageVoidify { // Potentially unaligned loads and stores. +// x86 and PowerPC can simply do these loads and stores native. + #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) #define UNALIGNED_LOAD16(_p) (*reinterpret_cast(_p)) @@ -189,6 +190,47 @@ class LogMessageVoidify { #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast(_p) = (_val)) #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast(_p) = (_val)) +// ARMv7 and newer support native unaligned accesses, but only of 16-bit +// and 32-bit values (not 64-bit); older versions either raise a fatal signal, +// do an unaligned read and rotate the words around a bit, or do the reads very +// slowly (trip through kernel mode). There's no simple #define that says just +// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6 +// sub-architectures. +// +// This is a mess, but there's not much we can do about it. + +#elif defined(__arm__) && \ + !defined(__ARM_ARCH_5__) && \ + !defined(__ARM_ARCH_5T__) && \ + !defined(__ARM_ARCH_5TE__) && \ + !defined(__ARM_ARCH_5TEJ__) && \ + !defined(__ARM_ARCH_6__) && \ + !defined(__ARM_ARCH_6J__) && \ + !defined(__ARM_ARCH_6K__) && \ + !defined(__ARM_ARCH_6Z__) && \ + !defined(__ARM_ARCH_6ZK__) && \ + !defined(__ARM_ARCH_6T2__) + +#define UNALIGNED_LOAD16(_p) (*reinterpret_cast(_p)) +#define UNALIGNED_LOAD32(_p) (*reinterpret_cast(_p)) + +#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast(_p) = (_val)) +#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast(_p) = (_val)) + +// TODO(user): NEON supports unaligned 64-bit loads and stores. +// See if that would be more efficient on platforms supporting it, +// at least for copies. + +inline uint64 UNALIGNED_LOAD64(const void *p) { + uint64 t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UNALIGNED_STORE64(void *p, uint64 v) { + memcpy(p, &v, sizeof v); +} + #else // These functions are provided for architectures that don't support @@ -226,6 +268,20 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) { #endif +// This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64 +// on some platforms, in particular ARM. +inline void UnalignedCopy64(const void *src, void *dst) { + if (sizeof(void *) == 8) { + UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src)); + } else { + const char *src_char = reinterpret_cast(src); + char *dst_char = reinterpret_cast(dst); + + UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char)); + UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4)); + } +} + // The following guarantees declaration of the byte swap functions. #ifdef WORDS_BIGENDIAN