1 // Copyright 2011 Google Inc. All Rights Reserved.
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following disclaimer
11 // in the documentation and/or other materials provided with the
13 // * Neither the name of Google Inc. nor the names of its
14 // contributors may be used to endorse or promote products derived from
15 // this software without specific prior written permission.
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 // Various stubs for the open-source version of Snappy.
31 #ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32 #define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
45 #ifdef HAVE_SYS_MMAN_H
49 #include "snappy-stubs-public.h"
51 #if defined(__x86_64__)
53 // Enable 64-bit optimized versions of some routines.
58 // Needed by OS X, among others.
60 #define MAP_ANONYMOUS MAP_ANON
63 // Pull in std::min, std::ostream, and the likes. This is safe because this
64 // header file is never used from any public header files.
67 // The size of an array, if known at compile-time.
68 // Will give unexpected results if used on a pointer.
69 // We undefine it first, since some compilers already have a definition.
73 #define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
75 // Static prediction hints.
76 #ifdef HAVE_BUILTIN_EXPECT
77 #define PREDICT_FALSE(x) (__builtin_expect(x, 0))
78 #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
80 #define PREDICT_FALSE(x) x
81 #define PREDICT_TRUE(x) x
84 // This is only used for recomputing the tag byte table used during
85 // decompression; for simplicity we just remove it from the open-source
86 // version (anyone who wants to regenerate it can just do the call
87 // themselves within main()).
88 #define DEFINE_bool(flag_name, default_value, description) \
89 bool FLAGS_ ## flag_name = default_value;
90 #define DECLARE_bool(flag_name) \
91 extern bool FLAGS_ ## flag_name;
92 #define REGISTER_MODULE_INITIALIZER(name, code)
96 static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
97 static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
101 #define LOG(level) LogMessage()
102 #define VLOG(level) true ? (void)0 : \
103 snappy::LogMessageVoidify() & snappy::LogMessage()
112 LogMessage& operator<<(const std::string& msg) {
116 LogMessage& operator<<(int x) {
122 // Asserts, both versions activated in debug mode only,
123 // and ones that are always active.
125 #define CRASH_UNLESS(condition) \
126 PREDICT_TRUE(condition) ? (void)0 : \
127 snappy::LogMessageVoidify() & snappy::LogMessageCrash()
129 class LogMessageCrash : public LogMessage {
131 LogMessageCrash() { }
138 // This class is used to explicitly ignore values in the conditional
139 // logging macros. This avoids compiler warnings like "value computed
140 // is not used" and "statement has no effect".
142 class LogMessageVoidify {
144 LogMessageVoidify() { }
145 // This has to be an operator with a precedence lower than << but
147 void operator&(const LogMessage&) { }
150 #define CHECK(cond) CRASH_UNLESS(cond)
151 #define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
152 #define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
153 #define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
154 #define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
155 #define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
156 #define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
160 #define DCHECK(cond) CRASH_UNLESS(true)
161 #define DCHECK_LE(a, b) CRASH_UNLESS(true)
162 #define DCHECK_GE(a, b) CRASH_UNLESS(true)
163 #define DCHECK_EQ(a, b) CRASH_UNLESS(true)
164 #define DCHECK_NE(a, b) CRASH_UNLESS(true)
165 #define DCHECK_LT(a, b) CRASH_UNLESS(true)
166 #define DCHECK_GT(a, b) CRASH_UNLESS(true)
170 #define DCHECK(cond) CHECK(cond)
171 #define DCHECK_LE(a, b) CHECK_LE(a, b)
172 #define DCHECK_GE(a, b) CHECK_GE(a, b)
173 #define DCHECK_EQ(a, b) CHECK_EQ(a, b)
174 #define DCHECK_NE(a, b) CHECK_NE(a, b)
175 #define DCHECK_LT(a, b) CHECK_LT(a, b)
176 #define DCHECK_GT(a, b) CHECK_GT(a, b)
180 // Potentially unaligned loads and stores.
182 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
184 #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
185 #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
186 #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
188 #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
189 #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
190 #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
194 // These functions are provided for architectures that don't support
195 // unaligned loads and stores.
197 inline uint16 UNALIGNED_LOAD16(const void *p) {
199 memcpy(&t, p, sizeof t);
203 inline uint32 UNALIGNED_LOAD32(const void *p) {
205 memcpy(&t, p, sizeof t);
209 inline uint64 UNALIGNED_LOAD64(const void *p) {
211 memcpy(&t, p, sizeof t);
215 inline void UNALIGNED_STORE16(void *p, uint16 v) {
216 memcpy(p, &v, sizeof v);
219 inline void UNALIGNED_STORE32(void *p, uint32 v) {
220 memcpy(p, &v, sizeof v);
223 inline void UNALIGNED_STORE64(void *p, uint64 v) {
224 memcpy(p, &v, sizeof v);
229 // The following guarantees declaration of the byte swap functions.
230 #ifdef WORDS_BIGENDIAN
232 #ifdef HAVE_SYS_BYTEORDER_H
233 #include <sys/byteorder.h>
236 #ifdef HAVE_SYS_ENDIAN_H
237 #include <sys/endian.h>
242 #define bswap_16(x) _byteswap_ushort(x)
243 #define bswap_32(x) _byteswap_ulong(x)
244 #define bswap_64(x) _byteswap_uint64(x)
246 #elif defined(__APPLE__)
247 // Mac OS X / Darwin features
248 #include <libkern/OSByteOrder.h>
249 #define bswap_16(x) OSSwapInt16(x)
250 #define bswap_32(x) OSSwapInt32(x)
251 #define bswap_64(x) OSSwapInt64(x)
253 #elif defined(HAVE_BYTESWAP_H)
254 #include <byteswap.h>
256 #elif defined(bswap32)
257 // FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
258 #define bswap_16(x) bswap16(x)
259 #define bswap_32(x) bswap32(x)
260 #define bswap_64(x) bswap64(x)
262 #elif defined(BSWAP_64)
263 // Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
264 #define bswap_16(x) BSWAP_16(x)
265 #define bswap_32(x) BSWAP_32(x)
266 #define bswap_64(x) BSWAP_64(x)
270 inline uint16 bswap_16(uint16 x) {
271 return (x << 8) | (x >> 8);
274 inline uint32 bswap_32(uint32 x) {
275 x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
276 return (x >> 16) | (x << 16);
279 inline uint64 bswap_64(uint64 x) {
280 x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
281 x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
282 return (x >> 32) | (x << 32);
287 #endif // WORDS_BIGENDIAN
289 // Convert to little-endian storage, opposite of network format.
290 // Convert x from host to little endian: x = LittleEndian.FromHost(x);
291 // convert x from little endian to host: x = LittleEndian.ToHost(x);
293 // Store values into unaligned memory converting to little endian order:
294 // LittleEndian.Store16(p, x);
296 // Load unaligned values stored in little endian converting to host order:
297 // x = LittleEndian.Load16(p);
300 // Conversion functions.
301 #ifdef WORDS_BIGENDIAN
303 static uint16 FromHost16(uint16 x) { return bswap_16(x); }
304 static uint16 ToHost16(uint16 x) { return bswap_16(x); }
306 static uint32 FromHost32(uint32 x) { return bswap_32(x); }
307 static uint32 ToHost32(uint32 x) { return bswap_32(x); }
309 static bool IsLittleEndian() { return false; }
311 #else // !defined(WORDS_BIGENDIAN)
313 static uint16 FromHost16(uint16 x) { return x; }
314 static uint16 ToHost16(uint16 x) { return x; }
316 static uint32 FromHost32(uint32 x) { return x; }
317 static uint32 ToHost32(uint32 x) { return x; }
319 static bool IsLittleEndian() { return true; }
321 #endif // !defined(WORDS_BIGENDIAN)
323 // Functions to do unaligned loads and stores in little-endian order.
324 static uint16 Load16(const void *p) {
325 return ToHost16(UNALIGNED_LOAD16(p));
328 static void Store16(void *p, uint16 v) {
329 UNALIGNED_STORE16(p, FromHost16(v));
332 static uint32 Load32(const void *p) {
333 return ToHost32(UNALIGNED_LOAD32(p));
336 static void Store32(void *p, uint32 v) {
337 UNALIGNED_STORE32(p, FromHost32(v));
341 // Some bit-manipulation functions.
344 // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
345 static int Log2Floor(uint32 n);
347 // Return the first set least / most significant bit, 0-indexed. Returns an
348 // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
349 // that it's 0-indexed.
350 static int FindLSBSetNonZero(uint32 n);
351 static int FindLSBSetNonZero64(uint64 n);
354 DISALLOW_COPY_AND_ASSIGN(Bits);
357 #ifdef HAVE_BUILTIN_CTZ
359 inline int Bits::Log2Floor(uint32 n) {
360 return n == 0 ? -1 : 31 ^ __builtin_clz(n);
363 inline int Bits::FindLSBSetNonZero(uint32 n) {
364 return __builtin_ctz(n);
367 inline int Bits::FindLSBSetNonZero64(uint64 n) {
368 return __builtin_ctzll(n);
371 #else // Portable versions.
373 inline int Bits::Log2Floor(uint32 n) {
378 for (int i = 4; i >= 0; --i) {
379 int shift = (1 << i);
380 uint32 x = value >> shift;
390 inline int Bits::FindLSBSetNonZero(uint32 n) {
392 for (int i = 4, shift = 1 << 4; i >= 0; --i) {
393 const uint32 x = n << shift;
403 // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
404 inline int Bits::FindLSBSetNonZero64(uint64 n) {
405 const uint32 bottombits = static_cast<uint32>(n);
406 if (bottombits == 0) {
407 // Bottom bits are zero, so scan in top bits
408 return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
410 return FindLSBSetNonZero(bottombits);
414 #endif // End portable versions.
416 // Variable-length integer encoding.
419 // Maximum lengths of varint encoding of uint32.
420 static const int kMax32 = 5;
422 // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
423 // Never reads a character at or beyond limit. If a valid/terminated varint32
424 // was found in the range, stores it in *OUTPUT and returns a pointer just
425 // past the last byte of the varint32. Else returns NULL. On success,
426 // "result <= limit".
427 static const char* Parse32WithLimit(const char* ptr, const char* limit,
430 // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
431 // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
432 // byte just past the last encoded byte.
433 static char* Encode32(char* ptr, uint32 v);
435 // EFFECTS Appends the varint representation of "value" to "*s".
436 static void Append32(string* s, uint32 value);
439 inline const char* Varint::Parse32WithLimit(const char* p,
442 const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
443 const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
445 if (ptr >= limit) return NULL;
446 b = *(ptr++); result = b & 127; if (b < 128) goto done;
447 if (ptr >= limit) return NULL;
448 b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
449 if (ptr >= limit) return NULL;
450 b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
451 if (ptr >= limit) return NULL;
452 b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
453 if (ptr >= limit) return NULL;
454 b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
455 return NULL; // Value is too long to be a varint32
458 return reinterpret_cast<const char*>(ptr);
461 inline char* Varint::Encode32(char* sptr, uint32 v) {
462 // Operate on characters as unsigneds
463 unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
464 static const int B = 128;
467 } else if (v < (1<<14)) {
470 } else if (v < (1<<21)) {
472 *(ptr++) = (v>>7) | B;
474 } else if (v < (1<<28)) {
476 *(ptr++) = (v>>7) | B;
477 *(ptr++) = (v>>14) | B;
481 *(ptr++) = (v>>7) | B;
482 *(ptr++) = (v>>14) | B;
483 *(ptr++) = (v>>21) | B;
486 return reinterpret_cast<char*>(ptr);
489 // If you know the internal layout of the std::string in use, you can
490 // replace this function with one that resizes the string without
491 // filling the new space with zeros (if applicable) --
492 // it will be non-portable but faster.
493 inline void STLStringResizeUninitialized(string* s, size_t new_size) {
497 // Return a mutable char* pointing to a string's internal buffer,
498 // which may not be null-terminated. Writing through this pointer will
499 // modify the string.
501 // string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
502 // next call to a string method that invalidates iterators.
504 // As of 2006-04, there is no standard-blessed way of getting a
505 // mutable reference to a string's internal buffer. However, issue 530
506 // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
507 // proposes this as the method. It will officially be part of the standard
508 // for C++0x. This should already work on all current implementations.
509 inline char* string_as_array(string* str) {
510 return str->empty() ? NULL : &*str->begin();
513 } // namespace snappy
515 #endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_