1 // Copyright 2005 Google Inc. All Rights Reserved.
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following disclaimer
11 // in the documentation and/or other materials provided with the
13 // * Neither the name of Google Inc. nor the names of its
14 // contributors may be used to endorse or promote products derived from
15 // this software without specific prior written permission.
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "snappy-internal.h"
31 #include "snappy-sinksource.h"
42 // Any hash function will produce a valid compressed bitstream, but a good
43 // hash function reduces the number of collisions and thus yields better
44 // compression for compressible input, and more speed for incompressible
45 // input. Of course, it doesn't hurt if the hash function is reasonably fast
46 // either, as it gets called a lot.
47 static inline uint32 HashBytes(uint32 bytes, int shift) {
48 uint32 kMul = 0x1e35a7bd;
49 return (bytes * kMul) >> shift;
51 static inline uint32 Hash(const char* p, int shift) {
52 return HashBytes(UNALIGNED_LOAD32(p), shift);
55 size_t MaxCompressedLength(size_t source_len) {
56 // Compressed data can be defined as:
57 // compressed := item* literal*
58 // item := literal* copy
60 // The trailing literal sequence has a space blowup of at most 62/60
61 // since a literal of length 60 needs one tag byte + one extra byte
62 // for length information.
64 // Item blowup is trickier to measure. Suppose the "copy" op copies
65 // 4 bytes of data. Because of a special check in the encoding code,
66 // we produce a 4-byte copy only if the offset is < 65536. Therefore
67 // the copy op takes 3 bytes to encode, and this type of item leads
68 // to at most the 62/60 blowup for representing literals.
70 // Suppose the "copy" op copies 5 bytes of data. If the offset is big
71 // enough, it will take 5 bytes to encode the copy op. Therefore the
72 // worst case here is a one-byte literal followed by a five-byte copy.
73 // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
75 // This last factor dominates the blowup, so the final estimate is:
76 return 32 + source_len + source_len/6;
81 COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
82 COPY_2_BYTE_OFFSET = 2,
83 COPY_4_BYTE_OFFSET = 3
86 // Copy "len" bytes from "src" to "op", one byte at a time. Used for
87 // handling COPY operations where the input and output regions may
88 // overlap. For example, suppose:
92 // After IncrementalCopy(src, op, len), the result will have
93 // eleven copies of "ab"
94 // ababababababababababab
95 // Note that this does not match the semantics of either memcpy()
97 static inline void IncrementalCopy(const char* src, char* op, int len) {
104 // Equivalent to IncrementalCopy except that it can write up to ten extra
105 // bytes after the end of the copy, and that it is faster.
107 // The main part of this loop is a simple copy of eight bytes at a time until
108 // we've copied (at least) the requested amount of bytes. However, if op and
109 // src are less than eight bytes apart (indicating a repeating pattern of
110 // length < 8), we first need to expand the pattern in order to get the correct
111 // results. For instance, if the buffer looks like this, with the eight-byte
112 // <src> and <op> patterns marked as intervals:
118 // a single eight-byte copy from <src> to <op> will repeat the pattern once,
119 // after which we can move <op> two bytes without moving <src>:
125 // and repeat the exercise until the two no longer overlap.
127 // This allows us to do very well in the special case of one single byte
128 // repeated many times, without taking a big hit for more general cases.
130 // The worst case of extra writing past the end of the match occurs when
131 // op - src == 1 and len == 1; the last copy will read from byte positions
132 // [0..7] and write to [4..11], whereas it was only supposed to write to
133 // position 1. Thus, ten excess bytes.
137 const int kMaxIncrementCopyOverflow = 10;
141 static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
142 while (op - src < 8) {
143 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
148 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
155 static inline char* EmitLiteral(char* op,
158 bool allow_fast_path) {
159 int n = len - 1; // Zero-length literals are disallowed
162 *op++ = LITERAL | (n << 2);
164 // The vast majority of copies are below 16 bytes, for which a
165 // call to memcpy is overkill. This fast path can sometimes
166 // copy up to 15 bytes too much, but that is okay in the
167 // main loop, since we have a bit to go on for both sides:
169 // - The input will always have kInputMarginBytes = 15 extra
170 // available bytes, as long as we're in the main loop, and
171 // if not, allow_fast_path = false.
172 // - The output will always have 32 spare bytes (see
173 // MaxCompressedLength).
174 if (allow_fast_path && len <= 16) {
175 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
176 UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
180 // Encode in upcoming bytes
191 *base = LITERAL | ((59+count) << 2);
193 memcpy(op, literal, len);
197 static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
200 DCHECK_LT(offset, 65536);
202 if ((len < 12) && (offset < 2048)) {
203 int len_minus_4 = len - 4;
204 assert(len_minus_4 < 8); // Must fit in 3 bits
205 *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
206 *op++ = offset & 0xff;
208 *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
209 LittleEndian::Store16(op, offset);
215 static inline char* EmitCopy(char* op, int offset, int len) {
216 // Emit 64 byte copies but make sure to keep at least four bytes reserved
218 op = EmitCopyLessThan64(op, offset, 64);
222 // Emit an extra 60 byte copy if have too much data to fit in one copy
224 op = EmitCopyLessThan64(op, offset, 60);
229 op = EmitCopyLessThan64(op, offset, len);
234 bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
236 const char* limit = start + n;
237 if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
246 uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
247 // Use smaller hash table when input.size() is smaller, since we
248 // fill the table, incurring O(hash table size) overhead for
249 // compression, and if the input is short, we won't need that
250 // many hash table entries anyway.
251 assert(kMaxHashTableSize >= 256);
253 while (htsize < kMaxHashTableSize && htsize < input_size) {
256 CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
257 CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
260 if (htsize <= ARRAYSIZE(small_table_)) {
261 table = small_table_;
263 if (large_table_ == NULL) {
264 large_table_ = new uint16[kMaxHashTableSize];
266 table = large_table_;
269 *table_size = htsize;
270 memset(table, 0, htsize * sizeof(*table));
273 } // end namespace internal
275 // For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
276 // equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
277 // empirically found that overlapping loads such as
278 // UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
279 // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
280 static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
281 DCHECK(0 <= offset && offset <= 4) << offset;
282 return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
285 // Flat array compression that does not emit the "uncompressed length"
286 // prefix. Compresses "input" string to the "*op" buffer.
288 // REQUIRES: "input" is at most "kBlockSize" bytes long.
289 // REQUIRES: "op" points to an array of memory that is at least
290 // "MaxCompressedLength(input.size())" in size.
291 // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
292 // REQUIRES: "table_size" is a power of two
294 // Returns an "end" pointer into "op" buffer.
295 // "end - op" is the compressed size of "input".
297 char* CompressFragment(const char* input,
301 const int table_size) {
302 // "ip" is the input pointer, and "op" is the output pointer.
303 const char* ip = input;
304 CHECK_LE(input_size, kBlockSize);
305 CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
306 const int shift = 32 - Bits::Log2Floor(table_size);
307 DCHECK_EQ(kuint32max >> shift, table_size - 1);
308 const char* ip_end = input + input_size;
309 const char* base_ip = ip;
310 // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
311 // [next_emit, ip_end) after the main loop.
312 const char* next_emit = ip;
314 const int kInputMarginBytes = 15;
315 if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
316 const char* ip_limit = input + input_size - kInputMarginBytes;
318 for (uint32 next_hash = Hash(++ip, shift); ; ) {
319 DCHECK_LT(next_emit, ip);
320 // The body of this loop calls EmitLiteral once and then EmitCopy one or
321 // more times. (The exception is that when we're close to exhausting
322 // the input we goto emit_remainder.)
324 // In the first iteration of this loop we're just starting, so
325 // there's nothing to copy, so calling EmitLiteral once is
326 // necessary. And we only start a new iteration when the
327 // current iteration has determined that a call to EmitLiteral will
328 // precede the next call to EmitCopy (if any).
330 // Step 1: Scan forward in the input looking for a 4-byte-long match.
331 // If we get close to exhausting the input then goto emit_remainder.
333 // Heuristic match skipping: If 32 bytes are scanned with no matches
334 // found, start looking only at every other byte. If 32 more bytes are
335 // scanned, look at every third byte, etc.. When a match is found,
336 // immediately go back to looking at every byte. This is a small loss
337 // (~5% performance, ~0.1% density) for compressible data due to more
338 // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
339 // win since the compressor quickly "realizes" the data is incompressible
340 // and doesn't bother looking for matches everywhere.
342 // The "skip" variable keeps track of how many bytes there are since the
343 // last match; dividing it by 32 (ie. right-shifting by five) gives the
344 // number of bytes to move ahead for each iteration.
347 const char* next_ip = ip;
348 const char* candidate;
351 uint32 hash = next_hash;
352 DCHECK_EQ(hash, Hash(ip, shift));
353 uint32 bytes_between_hash_lookups = skip++ >> 5;
354 next_ip = ip + bytes_between_hash_lookups;
355 if (PREDICT_FALSE(next_ip > ip_limit)) {
358 next_hash = Hash(next_ip, shift);
359 candidate = base_ip + table[hash];
360 DCHECK_GE(candidate, base_ip);
361 DCHECK_LT(candidate, ip);
363 table[hash] = ip - base_ip;
364 } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
365 UNALIGNED_LOAD32(candidate)));
367 // Step 2: A 4-byte match has been found. We'll later see if more
368 // than 4 bytes match. But, prior to the match, input
369 // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
370 DCHECK_LE(next_emit + 16, ip_end);
371 op = EmitLiteral(op, next_emit, ip - next_emit, true);
373 // Step 3: Call EmitCopy, and then see if another EmitCopy could
374 // be our next move. Repeat until we find no match for the
375 // input immediately after what was consumed by the last EmitCopy call.
377 // If we exit this loop normally then we need to call EmitLiteral next,
378 // though we don't yet know how big the literal will be. We handle that
379 // by proceeding to the next iteration of the main loop. We also can exit
380 // this loop via goto if we get close to exhausting the input.
381 uint64 input_bytes = 0;
382 uint32 candidate_bytes = 0;
385 // We have a 4-byte match at ip, and no need to emit any
386 // "literal bytes" prior to ip.
387 const char* base = ip;
388 int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
390 int offset = base - candidate;
391 DCHECK_EQ(0, memcmp(base, candidate, matched));
392 op = EmitCopy(op, offset, matched);
393 // We could immediately start working at ip now, but to improve
394 // compression we first update table[Hash(ip - 1, ...)].
395 const char* insert_tail = ip - 1;
397 if (PREDICT_FALSE(ip >= ip_limit)) {
400 input_bytes = UNALIGNED_LOAD64(insert_tail);
401 uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
402 table[prev_hash] = ip - base_ip - 1;
403 uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
404 candidate = base_ip + table[cur_hash];
405 candidate_bytes = UNALIGNED_LOAD32(candidate);
406 table[cur_hash] = ip - base_ip;
407 } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
409 next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
415 // Emit the remaining bytes as a literal
416 if (next_emit < ip_end) {
417 op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
422 } // end namespace internal
424 // Signature of output types needed by decompression code.
425 // The decompression code is templatized on a type that obeys this
426 // signature so that we do not pay virtual function call overhead in
427 // the middle of a tight decompression loop.
429 // class DecompressionWriter {
431 // // Called before decompression
432 // void SetExpectedLength(size_t length);
434 // // Called after decompression
435 // bool CheckLength() const;
437 // // Called repeatedly during decompression
438 // bool Append(const char* ip, uint32 length, bool allow_fast_path);
439 // bool AppendFromSelf(uint32 offset, uint32 length);
442 // "allow_fast_path" is a parameter that says if there is at least 16
443 // readable bytes in "ip". It is currently only used by SnappyArrayWriter.
445 // -----------------------------------------------------------------------
446 // Lookup table for decompression code. Generated by ComputeTable() below.
447 // -----------------------------------------------------------------------
449 // Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
450 static const uint32 wordmask[] = {
451 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
454 // Data stored per entry in lookup table:
455 // Range Bits-used Description
456 // ------------------------------------
457 // 1..64 0..7 Literal/copy length encoded in opcode byte
458 // 0..7 8..10 Copy offset encoded in opcode byte / 256
459 // 0..4 11..13 Extra bytes after opcode
461 // We use eight bits for the length even though 7 would have sufficed
462 // because of efficiency reasons:
463 // (1) Extracting a byte is faster than a bit-field
464 // (2) It properly aligns copy offset so we do not need a <<8
465 static const uint16 char_table[256] = {
466 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
467 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
468 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
469 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
470 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
471 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
472 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
473 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
474 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
475 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
476 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
477 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
478 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
479 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
480 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
481 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
482 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
483 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
484 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
485 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
486 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
487 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
488 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
489 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
490 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
491 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
492 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
493 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
494 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
495 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
496 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
497 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
500 // In debug mode, allow optional computation of the table at startup.
501 // Also, check that the decompression table is correct.
503 DEFINE_bool(snappy_dump_decompression_table, false,
504 "If true, we print the decompression table at startup.");
506 static uint16 MakeEntry(unsigned int extra,
508 unsigned int copy_offset) {
509 // Check that all of the fields fit within the allocated space
510 DCHECK_EQ(extra, extra & 0x7); // At most 3 bits
511 DCHECK_EQ(copy_offset, copy_offset & 0x7); // At most 3 bits
512 DCHECK_EQ(len, len & 0x7f); // At most 7 bits
513 return len | (copy_offset << 8) | (extra << 11);
516 static void ComputeTable() {
519 // Place invalid entries in all places to detect missing initialization
521 for (int i = 0; i < 256; i++) {
525 // Small LITERAL entries. We store (len-1) in the top 6 bits.
526 for (unsigned int len = 1; len <= 60; len++) {
527 dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
531 // Large LITERAL entries. We use 60..63 in the high 6 bits to
532 // encode the number of bytes of length info that follow the opcode.
533 for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
534 // We set the length field in the lookup table to 1 because extra
535 // bytes encode len-1.
536 dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
540 // COPY_1_BYTE_OFFSET.
542 // The tag byte in the compressed data stores len-4 in 3 bits, and
543 // offset/256 in 5 bits. offset%256 is stored in the next byte.
545 // This format is used for length in range [4..11] and offset in
547 for (unsigned int len = 4; len < 12; len++) {
548 for (unsigned int offset = 0; offset < 2048; offset += 256) {
549 dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
550 MakeEntry(1, len, offset>>8);
555 // COPY_2_BYTE_OFFSET.
556 // Tag contains len-1 in top 6 bits, and offset in next two bytes.
557 for (unsigned int len = 1; len <= 64; len++) {
558 dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
562 // COPY_4_BYTE_OFFSET.
563 // Tag contents len-1 in top 6 bits, and offset in next four bytes.
564 for (unsigned int len = 1; len <= 64; len++) {
565 dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
569 // Check that each entry was initialized exactly once.
570 CHECK_EQ(assigned, 256);
571 for (int i = 0; i < 256; i++) {
572 CHECK_NE(dst[i], 0xffff);
575 if (FLAGS_snappy_dump_decompression_table) {
576 printf("static const uint16 char_table[256] = {\n ");
577 for (int i = 0; i < 256; i++) {
580 ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n " : ", ")));
585 // Check that computed table matched recorded table
586 for (int i = 0; i < 256; i++) {
587 CHECK_EQ(dst[i], char_table[i]);
590 REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
593 // Helper class for decompression
594 class SnappyDecompressor {
596 Source* reader_; // Underlying source of bytes to decompress
597 const char* ip_; // Points to next buffered byte
598 const char* ip_limit_; // Points just past buffered bytes
599 uint32 peeked_; // Bytes peeked from reader (need to skip)
600 bool eof_; // Hit end of input without an error?
601 char scratch_[5]; // Temporary buffer for PeekFast() boundaries
603 // Ensure that all of the tag metadata for the next tag is available
604 // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
605 // if (ip_limit_ - ip_ < 5).
607 // Returns true on success, false on error or end of input.
611 explicit SnappyDecompressor(Source* reader)
619 ~SnappyDecompressor() {
620 // Advance past any bytes we peeked at from the reader
621 reader_->Skip(peeked_);
624 // Returns true iff we have hit the end of the input without an error.
629 // Read the uncompressed length stored at the start of the compressed data.
630 // On succcess, stores the length in *result and returns true.
631 // On failure, returns false.
632 bool ReadUncompressedLength(uint32* result) {
633 DCHECK(ip_ == NULL); // Must not have read anything yet
634 // Length is encoded in 1..5 bytes
638 if (shift >= 32) return false;
640 const char* ip = reader_->Peek(&n);
641 if (n == 0) return false;
642 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
644 *result |= static_cast<uint32>(c & 0x7f) << shift;
653 // Process the next item found in the input.
654 // Returns true if successful, false on error or end of input.
655 template <class Writer>
656 void DecompressAllTags(Writer* writer) {
657 const char* ip = ip_;
659 if (ip_limit_ - ip < 5) {
661 if (!RefillTag()) return;
665 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
667 if ((c & 0x3) == LITERAL) {
668 uint32 literal_length = c >> 2;
669 if (PREDICT_FALSE(literal_length >= 60)) {
671 const uint32 literal_length_length = literal_length - 59;
673 LittleEndian::Load32(ip) & wordmask[literal_length_length];
674 ip += literal_length_length;
678 uint32 avail = ip_limit_ - ip;
679 while (avail < literal_length) {
680 if (!writer->Append(ip, avail, false)) return;
681 literal_length -= avail;
682 reader_->Skip(peeked_);
684 ip = reader_->Peek(&n);
687 if (avail == 0) return; // Premature end of input
688 ip_limit_ = ip + avail;
690 bool allow_fast_path = (avail >= 16);
691 if (!writer->Append(ip, literal_length, allow_fast_path)) {
694 ip += literal_length;
696 const uint32 entry = char_table[c];
697 const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
698 const uint32 length = entry & 0xff;
701 // copy_offset/256 is encoded in bits 8..10. By just fetching
702 // those bits, we get copy_offset (since the bit-field starts at
704 const uint32 copy_offset = entry & 0x700;
705 if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
713 bool SnappyDecompressor::RefillTag() {
714 const char* ip = ip_;
715 if (ip == ip_limit_) {
716 // Fetch a new fragment from the reader
717 reader_->Skip(peeked_); // All peeked bytes are used up
719 ip = reader_->Peek(&n);
728 // Read the tag character
729 DCHECK_LT(ip, ip_limit_);
730 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
731 const uint32 entry = char_table[c];
732 const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
733 DCHECK_LE(needed, sizeof(scratch_));
735 // Read more bytes from reader if needed
736 uint32 nbuf = ip_limit_ - ip;
738 // Stitch together bytes from ip and reader to form the word
739 // contents. We store the needed bytes in "scratch_". They
740 // will be consumed immediately by the caller since we do not
741 // read more than we need.
742 memmove(scratch_, ip, nbuf);
743 reader_->Skip(peeked_); // All peeked bytes are used up
745 while (nbuf < needed) {
747 const char* src = reader_->Peek(&length);
748 if (length == 0) return false;
749 uint32 to_add = min<uint32>(needed - nbuf, length);
750 memcpy(scratch_ + nbuf, src, to_add);
752 reader_->Skip(to_add);
754 DCHECK_EQ(nbuf, needed);
756 ip_limit_ = scratch_ + needed;
757 } else if (nbuf < 5) {
758 // Have enough bytes, but move into scratch_ so that we do not
759 // read past end of input
760 memmove(scratch_, ip, nbuf);
761 reader_->Skip(peeked_); // All peeked bytes are used up
764 ip_limit_ = scratch_ + nbuf;
766 // Pass pointer to buffer returned by reader_.
772 template <typename Writer>
773 static bool InternalUncompress(Source* r,
776 // Read the uncompressed length from the front of the compressed input
777 SnappyDecompressor decompressor(r);
778 uint32 uncompressed_len = 0;
779 if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
780 // Protect against possible DoS attack
781 if (static_cast<uint64>(uncompressed_len) > max_len) {
785 writer->SetExpectedLength(uncompressed_len);
787 // Process the entire input
788 decompressor.DecompressAllTags(writer);
789 return (decompressor.eof() && writer->CheckLength());
792 bool GetUncompressedLength(Source* source, uint32* result) {
793 SnappyDecompressor decompressor(source);
794 return decompressor.ReadUncompressedLength(result);
797 size_t Compress(Source* reader, Sink* writer) {
799 int N = reader->Available();
800 char ulength[Varint::kMax32];
801 char* p = Varint::Encode32(ulength, N);
802 writer->Append(ulength, p-ulength);
803 written += (p - ulength);
805 internal::WorkingMemory wmem;
806 char* scratch = NULL;
807 char* scratch_output = NULL;
810 // Get next block to compress (without copying if possible)
811 size_t fragment_size;
812 const char* fragment = reader->Peek(&fragment_size);
813 DCHECK_NE(fragment_size, 0) << ": premature end of input";
814 const int num_to_read = min(N, kBlockSize);
815 size_t bytes_read = fragment_size;
817 int pending_advance = 0;
818 if (bytes_read >= num_to_read) {
819 // Buffer returned by reader is large enough
820 pending_advance = num_to_read;
821 fragment_size = num_to_read;
823 // Read into scratch buffer
824 if (scratch == NULL) {
825 // If this is the last iteration, we want to allocate N bytes
826 // of space, otherwise the max possible kBlockSize space.
827 // num_to_read contains exactly the correct value
828 scratch = new char[num_to_read];
830 memcpy(scratch, fragment, bytes_read);
831 reader->Skip(bytes_read);
833 while (bytes_read < num_to_read) {
834 fragment = reader->Peek(&fragment_size);
835 size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
836 memcpy(scratch + bytes_read, fragment, n);
840 DCHECK_EQ(bytes_read, num_to_read);
842 fragment_size = num_to_read;
844 DCHECK_EQ(fragment_size, num_to_read);
846 // Get encoding table for compression
848 uint16* table = wmem.GetHashTable(num_to_read, &table_size);
850 // Compress input_fragment and append to dest
851 const int max_output = MaxCompressedLength(num_to_read);
853 // Need a scratch buffer for the output, in case the byte sink doesn't
854 // have room for us directly.
855 if (scratch_output == NULL) {
856 scratch_output = new char[max_output];
858 // Since we encode kBlockSize regions followed by a region
859 // which is <= kBlockSize in length, a previously allocated
860 // scratch_output[] region is big enough for this iteration.
862 char* dest = writer->GetAppendBuffer(max_output, scratch_output);
863 char* end = internal::CompressFragment(fragment, fragment_size,
864 dest, table, table_size);
865 writer->Append(dest, end - dest);
866 written += (end - dest);
869 reader->Skip(pending_advance);
873 delete[] scratch_output;
878 // -----------------------------------------------------------------------
879 // Flat array interfaces
880 // -----------------------------------------------------------------------
882 // A type that writes to a flat array.
883 // Note that this is not a "ByteSink", but a type that matches the
884 // Writer template argument to SnappyDecompressor::DecompressAllTags().
885 class SnappyArrayWriter {
892 inline explicit SnappyArrayWriter(char* dst)
897 inline void SetExpectedLength(size_t len) {
898 op_limit_ = op_ + len;
901 inline bool CheckLength() const {
902 return op_ == op_limit_;
905 inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
907 const int space_left = op_limit_ - op;
908 if (allow_fast_path && len <= 16 && space_left >= 16) {
909 // Fast path, used for the majority (about 90%) of dynamic invocations.
910 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
911 UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
913 if (space_left < len) {
922 inline bool AppendFromSelf(uint32 offset, uint32 len) {
924 const int space_left = op_limit_ - op;
926 if (op - base_ <= offset - 1u) { // -1u catches offset==0
929 if (len <= 16 && offset >= 8 && space_left >= 16) {
930 // Fast path, used for the majority (70-80%) of dynamic invocations.
931 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
932 UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
934 if (space_left >= len + kMaxIncrementCopyOverflow) {
935 IncrementalCopyFastPath(op - offset, op, len);
937 if (space_left < len) {
940 IncrementalCopy(op - offset, op, len);
949 bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
950 ByteArraySource reader(compressed, n);
951 return RawUncompress(&reader, uncompressed);
954 bool RawUncompress(Source* compressed, char* uncompressed) {
955 SnappyArrayWriter output(uncompressed);
956 return InternalUncompress(compressed, &output, kuint32max);
959 bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
961 if (!GetUncompressedLength(compressed, n, &ulength)) {
964 // Protect against possible DoS attack
965 if ((static_cast<uint64>(ulength) + uncompressed->size()) >
966 uncompressed->max_size()) {
969 STLStringResizeUninitialized(uncompressed, ulength);
970 return RawUncompress(compressed, n, string_as_array(uncompressed));
974 // A Writer that drops everything on the floor and just does validation
975 class SnappyDecompressionValidator {
981 inline SnappyDecompressionValidator() : produced_(0) { }
982 inline void SetExpectedLength(size_t len) {
985 inline bool CheckLength() const {
986 return expected_ == produced_;
988 inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
990 return produced_ <= expected_;
992 inline bool AppendFromSelf(uint32 offset, uint32 len) {
993 if (produced_ <= offset - 1u) return false; // -1u catches offset==0
995 return produced_ <= expected_;
999 bool IsValidCompressedBuffer(const char* compressed, size_t n) {
1000 ByteArraySource reader(compressed, n);
1001 SnappyDecompressionValidator writer;
1002 return InternalUncompress(&reader, &writer, kuint32max);
1005 void RawCompress(const char* input,
1006 size_t input_length,
1008 size_t* compressed_length) {
1009 ByteArraySource reader(input, input_length);
1010 UncheckedByteArraySink writer(compressed);
1011 Compress(&reader, &writer);
1013 // Compute how many bytes were added
1014 *compressed_length = (writer.CurrentDestination() - compressed);
1017 size_t Compress(const char* input, size_t input_length, string* compressed) {
1018 // Pre-grow the buffer to the max length of the compressed output
1019 compressed->resize(MaxCompressedLength(input_length));
1021 size_t compressed_length;
1022 RawCompress(input, input_length, string_as_array(compressed),
1023 &compressed_length);
1024 compressed->resize(compressed_length);
1025 return compressed_length;
1029 } // end namespace snappy