From 5f8de423f190bbb79a62f804151bc24824fa32d8 Mon Sep 17 00:00:00 2001 From: "Matt A. Tobin" Date: Fri, 2 Feb 2018 04:16:08 -0500 Subject: Add m-esr52 at 52.6.0 --- other-licenses/snappy/src/snappy.cc | 1400 +++++++++++++++++++++++++++++++++++ 1 file changed, 1400 insertions(+) create mode 100644 other-licenses/snappy/src/snappy.cc (limited to 'other-licenses/snappy/src/snappy.cc') diff --git a/other-licenses/snappy/src/snappy.cc b/other-licenses/snappy/src/snappy.cc new file mode 100644 index 000000000..8a3668c69 --- /dev/null +++ b/other-licenses/snappy/src/snappy.cc @@ -0,0 +1,1400 @@ +// Copyright 2005 Google Inc. All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "snappy.h" +#include "snappy-internal.h" +#include "snappy-sinksource.h" + +#include + +#include +#include +#include + + +namespace snappy { + +using internal::COPY_1_BYTE_OFFSET; +using internal::COPY_2_BYTE_OFFSET; +using internal::COPY_4_BYTE_OFFSET; +using internal::LITERAL; +using internal::char_table; +using internal::kMaximumTagLength; +using internal::wordmask; + +// Any hash function will produce a valid compressed bitstream, but a good +// hash function reduces the number of collisions and thus yields better +// compression for compressible input, and more speed for incompressible +// input. Of course, it doesn't hurt if the hash function is reasonably fast +// either, as it gets called a lot. +static inline uint32 HashBytes(uint32 bytes, int shift) { + uint32 kMul = 0x1e35a7bd; + return (bytes * kMul) >> shift; +} +static inline uint32 Hash(const char* p, int shift) { + return HashBytes(UNALIGNED_LOAD32(p), shift); +} + +size_t MaxCompressedLength(size_t source_len) { + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // I.e., 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + return 32 + source_len + source_len/6; +} + +// Copy "len" bytes from "src" to "op", one byte at a time. Used for +// handling COPY operations where the input and output regions may +// overlap. For example, suppose: +// src == "ab" +// op == src + 2 +// len == 20 +// After IncrementalCopy(src, op, len), the result will have +// eleven copies of "ab" +// ababababababababababab +// Note that this does not match the semantics of either memcpy() +// or memmove(). +static inline void IncrementalCopy(const char* src, char* op, ssize_t len) { + assert(len > 0); + do { + *op++ = *src++; + } while (--len > 0); +} + +// Equivalent to IncrementalCopy except that it can write up to ten extra +// bytes after the end of the copy, and that it is faster. +// +// The main part of this loop is a simple copy of eight bytes at a time until +// we've copied (at least) the requested amount of bytes. However, if op and +// src are less than eight bytes apart (indicating a repeating pattern of +// length < 8), we first need to expand the pattern in order to get the correct +// results. For instance, if the buffer looks like this, with the eight-byte +// and patterns marked as intervals: +// +// abxxxxxxxxxxxx +// [------] src +// [------] op +// +// a single eight-byte copy from to will repeat the pattern once, +// after which we can move two bytes without moving : +// +// ababxxxxxxxxxx +// [------] src +// [------] op +// +// and repeat the exercise until the two no longer overlap. +// +// This allows us to do very well in the special case of one single byte +// repeated many times, without taking a big hit for more general cases. +// +// The worst case of extra writing past the end of the match occurs when +// op - src == 1 and len == 1; the last copy will read from byte positions +// [0..7] and write to [4..11], whereas it was only supposed to write to +// position 1. Thus, ten excess bytes. + +namespace { + +const int kMaxIncrementCopyOverflow = 10; + +inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) { + while (PREDICT_FALSE(op - src < 8)) { + UnalignedCopy64(src, op); + len -= op - src; + op += op - src; + } + while (len > 0) { + UnalignedCopy64(src, op); + src += 8; + op += 8; + len -= 8; + } +} + +} // namespace + +static inline char* EmitLiteral(char* op, + const char* literal, + int len, + bool allow_fast_path) { + int n = len - 1; // Zero-length literals are disallowed + if (n < 60) { + // Fits in tag byte + *op++ = LITERAL | (n << 2); + + // The vast majority of copies are below 16 bytes, for which a + // call to memcpy is overkill. This fast path can sometimes + // copy up to 15 bytes too much, but that is okay in the + // main loop, since we have a bit to go on for both sides: + // + // - The input will always have kInputMarginBytes = 15 extra + // available bytes, as long as we're in the main loop, and + // if not, allow_fast_path = false. + // - The output will always have 32 spare bytes (see + // MaxCompressedLength). + if (allow_fast_path && len <= 16) { + UnalignedCopy64(literal, op); + UnalignedCopy64(literal + 8, op + 8); + return op + len; + } + } else { + // Encode in upcoming bytes + char* base = op; + int count = 0; + op++; + while (n > 0) { + *op++ = n & 0xff; + n >>= 8; + count++; + } + assert(count >= 1); + assert(count <= 4); + *base = LITERAL | ((59+count) << 2); + } + memcpy(op, literal, len); + return op + len; +} + +static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) { + assert(len <= 64); + assert(len >= 4); + assert(offset < 65536); + + if ((len < 12) && (offset < 2048)) { + size_t len_minus_4 = len - 4; + assert(len_minus_4 < 8); // Must fit in 3 bits + *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) << 5); + *op++ = offset & 0xff; + } else { + *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2); + LittleEndian::Store16(op, offset); + op += 2; + } + return op; +} + +static inline char* EmitCopy(char* op, size_t offset, int len) { + // Emit 64 byte copies but make sure to keep at least four bytes reserved + while (PREDICT_FALSE(len >= 68)) { + op = EmitCopyLessThan64(op, offset, 64); + len -= 64; + } + + // Emit an extra 60 byte copy if have too much data to fit in one copy + if (len > 64) { + op = EmitCopyLessThan64(op, offset, 60); + len -= 60; + } + + // Emit remainder + op = EmitCopyLessThan64(op, offset, len); + return op; +} + + +bool GetUncompressedLength(const char* start, size_t n, size_t* result) { + uint32 v = 0; + const char* limit = start + n; + if (Varint::Parse32WithLimit(start, limit, &v) != NULL) { + *result = v; + return true; + } else { + return false; + } +} + +namespace internal { +uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) { + // Use smaller hash table when input.size() is smaller, since we + // fill the table, incurring O(hash table size) overhead for + // compression, and if the input is short, we won't need that + // many hash table entries anyway. + assert(kMaxHashTableSize >= 256); + size_t htsize = 256; + while (htsize < kMaxHashTableSize && htsize < input_size) { + htsize <<= 1; + } + + uint16* table; + if (htsize <= ARRAYSIZE(small_table_)) { + table = small_table_; + } else { + if (large_table_ == NULL) { + large_table_ = new uint16[kMaxHashTableSize]; + } + table = large_table_; + } + + *table_size = htsize; + memset(table, 0, htsize * sizeof(*table)); + return table; +} +} // end namespace internal + +// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will +// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have +// empirically found that overlapping loads such as +// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2) +// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32. +// +// We have different versions for 64- and 32-bit; ideally we would avoid the +// two functions and just inline the UNALIGNED_LOAD64 call into +// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever +// enough to avoid loading the value multiple times then. For 64-bit, the load +// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is +// done at GetUint32AtOffset() time. + +#ifdef ARCH_K8 + +typedef uint64 EightBytesReference; + +static inline EightBytesReference GetEightBytesAt(const char* ptr) { + return UNALIGNED_LOAD64(ptr); +} + +static inline uint32 GetUint32AtOffset(uint64 v, int offset) { + assert(offset >= 0); + assert(offset <= 4); + return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset); +} + +#else + +typedef const char* EightBytesReference; + +static inline EightBytesReference GetEightBytesAt(const char* ptr) { + return ptr; +} + +static inline uint32 GetUint32AtOffset(const char* v, int offset) { + assert(offset >= 0); + assert(offset <= 4); + return UNALIGNED_LOAD32(v + offset); +} + +#endif + +// Flat array compression that does not emit the "uncompressed length" +// prefix. Compresses "input" string to the "*op" buffer. +// +// REQUIRES: "input" is at most "kBlockSize" bytes long. +// REQUIRES: "op" points to an array of memory that is at least +// "MaxCompressedLength(input.size())" in size. +// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. +// REQUIRES: "table_size" is a power of two +// +// Returns an "end" pointer into "op" buffer. +// "end - op" is the compressed size of "input". +namespace internal { +char* CompressFragment(const char* input, + size_t input_size, + char* op, + uint16* table, + const int table_size) { + // "ip" is the input pointer, and "op" is the output pointer. + const char* ip = input; + assert(input_size <= kBlockSize); + assert((table_size & (table_size - 1)) == 0); // table must be power of two + const int shift = 32 - Bits::Log2Floor(table_size); + assert(static_cast(kuint32max >> shift) == table_size - 1); + const char* ip_end = input + input_size; + const char* base_ip = ip; + // Bytes in [next_emit, ip) will be emitted as literal bytes. Or + // [next_emit, ip_end) after the main loop. + const char* next_emit = ip; + + const size_t kInputMarginBytes = 15; + if (PREDICT_TRUE(input_size >= kInputMarginBytes)) { + const char* ip_limit = input + input_size - kInputMarginBytes; + + for (uint32 next_hash = Hash(++ip, shift); ; ) { + assert(next_emit < ip); + // The body of this loop calls EmitLiteral once and then EmitCopy one or + // more times. (The exception is that when we're close to exhausting + // the input we goto emit_remainder.) + // + // In the first iteration of this loop we're just starting, so + // there's nothing to copy, so calling EmitLiteral once is + // necessary. And we only start a new iteration when the + // current iteration has determined that a call to EmitLiteral will + // precede the next call to EmitCopy (if any). + // + // Step 1: Scan forward in the input looking for a 4-byte-long match. + // If we get close to exhausting the input then goto emit_remainder. + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match is + // found, immediately go back to looking at every byte. This is a small + // loss (~5% performance, ~0.1% density) for compressible data due to more + // bookkeeping, but for non-compressible data (such as JPEG) it's a huge + // win since the compressor quickly "realizes" the data is incompressible + // and doesn't bother looking for matches everywhere. + // + // The "skip" variable keeps track of how many bytes there are since the + // last match; dividing it by 32 (ie. right-shifting by five) gives the + // number of bytes to move ahead for each iteration. + uint32 skip = 32; + + const char* next_ip = ip; + const char* candidate; + do { + ip = next_ip; + uint32 hash = next_hash; + assert(hash == Hash(ip, shift)); + uint32 bytes_between_hash_lookups = skip >> 5; + skip += bytes_between_hash_lookups; + next_ip = ip + bytes_between_hash_lookups; + if (PREDICT_FALSE(next_ip > ip_limit)) { + goto emit_remainder; + } + next_hash = Hash(next_ip, shift); + candidate = base_ip + table[hash]; + assert(candidate >= base_ip); + assert(candidate < ip); + + table[hash] = ip - base_ip; + } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) != + UNALIGNED_LOAD32(candidate))); + + // Step 2: A 4-byte match has been found. We'll later see if more + // than 4 bytes match. But, prior to the match, input + // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes." + assert(next_emit + 16 <= ip_end); + op = EmitLiteral(op, next_emit, ip - next_emit, true); + + // Step 3: Call EmitCopy, and then see if another EmitCopy could + // be our next move. Repeat until we find no match for the + // input immediately after what was consumed by the last EmitCopy call. + // + // If we exit this loop normally then we need to call EmitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can exit + // this loop via goto if we get close to exhausting the input. + EightBytesReference input_bytes; + uint32 candidate_bytes = 0; + + do { + // We have a 4-byte match at ip, and no need to emit any + // "literal bytes" prior to ip. + const char* base = ip; + int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end); + ip += matched; + size_t offset = base - candidate; + assert(0 == memcmp(base, candidate, matched)); + op = EmitCopy(op, offset, matched); + // We could immediately start working at ip now, but to improve + // compression we first update table[Hash(ip - 1, ...)]. + const char* insert_tail = ip - 1; + next_emit = ip; + if (PREDICT_FALSE(ip >= ip_limit)) { + goto emit_remainder; + } + input_bytes = GetEightBytesAt(insert_tail); + uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift); + table[prev_hash] = ip - base_ip - 1; + uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift); + candidate = base_ip + table[cur_hash]; + candidate_bytes = UNALIGNED_LOAD32(candidate); + table[cur_hash] = ip - base_ip; + } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes); + + next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift); + ++ip; + } + } + + emit_remainder: + // Emit the remaining bytes as a literal + if (next_emit < ip_end) { + op = EmitLiteral(op, next_emit, ip_end - next_emit, false); + } + + return op; +} +} // end namespace internal + +// Signature of output types needed by decompression code. +// The decompression code is templatized on a type that obeys this +// signature so that we do not pay virtual function call overhead in +// the middle of a tight decompression loop. +// +// class DecompressionWriter { +// public: +// // Called before decompression +// void SetExpectedLength(size_t length); +// +// // Called after decompression +// bool CheckLength() const; +// +// // Called repeatedly during decompression +// bool Append(const char* ip, size_t length); +// bool AppendFromSelf(uint32 offset, size_t length); +// +// // The rules for how TryFastAppend differs from Append are somewhat +// // convoluted: +// // +// // - TryFastAppend is allowed to decline (return false) at any +// // time, for any reason -- just "return false" would be +// // a perfectly legal implementation of TryFastAppend. +// // The intention is for TryFastAppend to allow a fast path +// // in the common case of a small append. +// // - TryFastAppend is allowed to read up to bytes +// // from the input buffer, whereas Append is allowed to read +// // . However, if it returns true, it must leave +// // at least five (kMaximumTagLength) bytes in the input buffer +// // afterwards, so that there is always enough space to read the +// // next tag without checking for a refill. +// // - TryFastAppend must always return decline (return false) +// // if is 61 or more, as in this case the literal length is not +// // decoded fully. In practice, this should not be a big problem, +// // as it is unlikely that one would implement a fast path accepting +// // this much data. +// // +// bool TryFastAppend(const char* ip, size_t available, size_t length); +// }; + + +// Helper class for decompression +class SnappyDecompressor { + private: + Source* reader_; // Underlying source of bytes to decompress + const char* ip_; // Points to next buffered byte + const char* ip_limit_; // Points just past buffered bytes + uint32 peeked_; // Bytes peeked from reader (need to skip) + bool eof_; // Hit end of input without an error? + char scratch_[kMaximumTagLength]; // See RefillTag(). + + // Ensure that all of the tag metadata for the next tag is available + // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even + // if (ip_limit_ - ip_ < 5). + // + // Returns true on success, false on error or end of input. + bool RefillTag(); + + public: + explicit SnappyDecompressor(Source* reader) + : reader_(reader), + ip_(NULL), + ip_limit_(NULL), + peeked_(0), + eof_(false) { + } + + ~SnappyDecompressor() { + // Advance past any bytes we peeked at from the reader + reader_->Skip(peeked_); + } + + // Returns true iff we have hit the end of the input without an error. + bool eof() const { + return eof_; + } + + // Read the uncompressed length stored at the start of the compressed data. + // On succcess, stores the length in *result and returns true. + // On failure, returns false. + bool ReadUncompressedLength(uint32* result) { + assert(ip_ == NULL); // Must not have read anything yet + // Length is encoded in 1..5 bytes + *result = 0; + uint32 shift = 0; + while (true) { + if (shift >= 32) return false; + size_t n; + const char* ip = reader_->Peek(&n); + if (n == 0) return false; + const unsigned char c = *(reinterpret_cast(ip)); + reader_->Skip(1); + uint32 val = c & 0x7f; + if (((val << shift) >> shift) != val) return false; + *result |= val << shift; + if (c < 128) { + break; + } + shift += 7; + } + return true; + } + + // Process the next item found in the input. + // Returns true if successful, false on error or end of input. + template + void DecompressAllTags(Writer* writer) { + const char* ip = ip_; + + // We could have put this refill fragment only at the beginning of the loop. + // However, duplicating it at the end of each branch gives the compiler more + // scope to optimize the expression based on the local + // context, which overall increases speed. + #define MAYBE_REFILL() \ + if (ip_limit_ - ip < kMaximumTagLength) { \ + ip_ = ip; \ + if (!RefillTag()) return; \ + ip = ip_; \ + } + + MAYBE_REFILL(); + for ( ;; ) { + const unsigned char c = *(reinterpret_cast(ip++)); + + if ((c & 0x3) == LITERAL) { + size_t literal_length = (c >> 2) + 1u; + if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) { + assert(literal_length < 61); + ip += literal_length; + // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend() + // will not return true unless there's already at least five spare + // bytes in addition to the literal. + continue; + } + if (PREDICT_FALSE(literal_length >= 61)) { + // Long literal. + const size_t literal_length_length = literal_length - 60; + literal_length = + (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1; + ip += literal_length_length; + } + + size_t avail = ip_limit_ - ip; + while (avail < literal_length) { + if (!writer->Append(ip, avail)) return; + literal_length -= avail; + reader_->Skip(peeked_); + size_t n; + ip = reader_->Peek(&n); + avail = n; + peeked_ = avail; + if (avail == 0) return; // Premature end of input + ip_limit_ = ip + avail; + } + if (!writer->Append(ip, literal_length)) { + return; + } + ip += literal_length; + MAYBE_REFILL(); + } else { + const uint32 entry = char_table[c]; + const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11]; + const uint32 length = entry & 0xff; + ip += entry >> 11; + + // copy_offset/256 is encoded in bits 8..10. By just fetching + // those bits, we get copy_offset (since the bit-field starts at + // bit 8). + const uint32 copy_offset = entry & 0x700; + if (!writer->AppendFromSelf(copy_offset + trailer, length)) { + return; + } + MAYBE_REFILL(); + } + } + +#undef MAYBE_REFILL + } +}; + +bool SnappyDecompressor::RefillTag() { + const char* ip = ip_; + if (ip == ip_limit_) { + // Fetch a new fragment from the reader + reader_->Skip(peeked_); // All peeked bytes are used up + size_t n; + ip = reader_->Peek(&n); + peeked_ = n; + if (n == 0) { + eof_ = true; + return false; + } + ip_limit_ = ip + n; + } + + // Read the tag character + assert(ip < ip_limit_); + const unsigned char c = *(reinterpret_cast(ip)); + const uint32 entry = char_table[c]; + const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c' + assert(needed <= sizeof(scratch_)); + + // Read more bytes from reader if needed + uint32 nbuf = ip_limit_ - ip; + if (nbuf < needed) { + // Stitch together bytes from ip and reader to form the word + // contents. We store the needed bytes in "scratch_". They + // will be consumed immediately by the caller since we do not + // read more than we need. + memmove(scratch_, ip, nbuf); + reader_->Skip(peeked_); // All peeked bytes are used up + peeked_ = 0; + while (nbuf < needed) { + size_t length; + const char* src = reader_->Peek(&length); + if (length == 0) return false; + uint32 to_add = min(needed - nbuf, length); + memcpy(scratch_ + nbuf, src, to_add); + nbuf += to_add; + reader_->Skip(to_add); + } + assert(nbuf == needed); + ip_ = scratch_; + ip_limit_ = scratch_ + needed; + } else if (nbuf < kMaximumTagLength) { + // Have enough bytes, but move into scratch_ so that we do not + // read past end of input + memmove(scratch_, ip, nbuf); + reader_->Skip(peeked_); // All peeked bytes are used up + peeked_ = 0; + ip_ = scratch_; + ip_limit_ = scratch_ + nbuf; + } else { + // Pass pointer to buffer returned by reader_. + ip_ = ip; + } + return true; +} + +template +static bool InternalUncompress(Source* r, Writer* writer) { + // Read the uncompressed length from the front of the compressed input + SnappyDecompressor decompressor(r); + uint32 uncompressed_len = 0; + if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false; + return InternalUncompressAllTags(&decompressor, writer, uncompressed_len); +} + +template +static bool InternalUncompressAllTags(SnappyDecompressor* decompressor, + Writer* writer, + uint32 uncompressed_len) { + writer->SetExpectedLength(uncompressed_len); + + // Process the entire input + decompressor->DecompressAllTags(writer); + writer->Flush(); + return (decompressor->eof() && writer->CheckLength()); +} + +bool GetUncompressedLength(Source* source, uint32* result) { + SnappyDecompressor decompressor(source); + return decompressor.ReadUncompressedLength(result); +} + +size_t Compress(Source* reader, Sink* writer) { + size_t written = 0; + size_t N = reader->Available(); + char ulength[Varint::kMax32]; + char* p = Varint::Encode32(ulength, N); + writer->Append(ulength, p-ulength); + written += (p - ulength); + + internal::WorkingMemory wmem; + char* scratch = NULL; + char* scratch_output = NULL; + + while (N > 0) { + // Get next block to compress (without copying if possible) + size_t fragment_size; + const char* fragment = reader->Peek(&fragment_size); + assert(fragment_size != 0); // premature end of input + const size_t num_to_read = min(N, kBlockSize); + size_t bytes_read = fragment_size; + + size_t pending_advance = 0; + if (bytes_read >= num_to_read) { + // Buffer returned by reader is large enough + pending_advance = num_to_read; + fragment_size = num_to_read; + } else { + // Read into scratch buffer + if (scratch == NULL) { + // If this is the last iteration, we want to allocate N bytes + // of space, otherwise the max possible kBlockSize space. + // num_to_read contains exactly the correct value + scratch = new char[num_to_read]; + } + memcpy(scratch, fragment, bytes_read); + reader->Skip(bytes_read); + + while (bytes_read < num_to_read) { + fragment = reader->Peek(&fragment_size); + size_t n = min(fragment_size, num_to_read - bytes_read); + memcpy(scratch + bytes_read, fragment, n); + bytes_read += n; + reader->Skip(n); + } + assert(bytes_read == num_to_read); + fragment = scratch; + fragment_size = num_to_read; + } + assert(fragment_size == num_to_read); + + // Get encoding table for compression + int table_size; + uint16* table = wmem.GetHashTable(num_to_read, &table_size); + + // Compress input_fragment and append to dest + const int max_output = MaxCompressedLength(num_to_read); + + // Need a scratch buffer for the output, in case the byte sink doesn't + // have room for us directly. + if (scratch_output == NULL) { + scratch_output = new char[max_output]; + } else { + // Since we encode kBlockSize regions followed by a region + // which is <= kBlockSize in length, a previously allocated + // scratch_output[] region is big enough for this iteration. + } + char* dest = writer->GetAppendBuffer(max_output, scratch_output); + char* end = internal::CompressFragment(fragment, fragment_size, + dest, table, table_size); + writer->Append(dest, end - dest); + written += (end - dest); + + N -= num_to_read; + reader->Skip(pending_advance); + } + + delete[] scratch; + delete[] scratch_output; + + return written; +} + +// ----------------------------------------------------------------------- +// IOVec interfaces +// ----------------------------------------------------------------------- + +// A type that writes to an iovec. +// Note that this is not a "ByteSink", but a type that matches the +// Writer template argument to SnappyDecompressor::DecompressAllTags(). +class SnappyIOVecWriter { + private: + const struct iovec* output_iov_; + const size_t output_iov_count_; + + // We are currently writing into output_iov_[curr_iov_index_]. + size_t curr_iov_index_; + + // Bytes written to output_iov_[curr_iov_index_] so far. + size_t curr_iov_written_; + + // Total bytes decompressed into output_iov_ so far. + size_t total_written_; + + // Maximum number of bytes that will be decompressed into output_iov_. + size_t output_limit_; + + inline char* GetIOVecPointer(size_t index, size_t offset) { + return reinterpret_cast(output_iov_[index].iov_base) + + offset; + } + + public: + // Does not take ownership of iov. iov must be valid during the + // entire lifetime of the SnappyIOVecWriter. + inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count) + : output_iov_(iov), + output_iov_count_(iov_count), + curr_iov_index_(0), + curr_iov_written_(0), + total_written_(0), + output_limit_(-1) { + } + + inline void SetExpectedLength(size_t len) { + output_limit_ = len; + } + + inline bool CheckLength() const { + return total_written_ == output_limit_; + } + + inline bool Append(const char* ip, size_t len) { + if (total_written_ + len > output_limit_) { + return false; + } + + while (len > 0) { + assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len); + if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) { + // This iovec is full. Go to the next one. + if (curr_iov_index_ + 1 >= output_iov_count_) { + return false; + } + curr_iov_written_ = 0; + ++curr_iov_index_; + } + + const size_t to_write = std::min( + len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_); + memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_), + ip, + to_write); + curr_iov_written_ += to_write; + total_written_ += to_write; + ip += to_write; + len -= to_write; + } + + return true; + } + + inline bool TryFastAppend(const char* ip, size_t available, size_t len) { + const size_t space_left = output_limit_ - total_written_; + if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 && + output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) { + // Fast path, used for the majority (about 95%) of invocations. + char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_); + UnalignedCopy64(ip, ptr); + UnalignedCopy64(ip + 8, ptr + 8); + curr_iov_written_ += len; + total_written_ += len; + return true; + } + + return false; + } + + inline bool AppendFromSelf(size_t offset, size_t len) { + if (offset > total_written_ || offset == 0) { + return false; + } + const size_t space_left = output_limit_ - total_written_; + if (len > space_left) { + return false; + } + + // Locate the iovec from which we need to start the copy. + size_t from_iov_index = curr_iov_index_; + size_t from_iov_offset = curr_iov_written_; + while (offset > 0) { + if (from_iov_offset >= offset) { + from_iov_offset -= offset; + break; + } + + offset -= from_iov_offset; + assert(from_iov_index > 0); + --from_iov_index; + from_iov_offset = output_iov_[from_iov_index].iov_len; + } + + // Copy bytes starting from the iovec pointed to by from_iov_index to + // the current iovec. + while (len > 0) { + assert(from_iov_index <= curr_iov_index_); + if (from_iov_index != curr_iov_index_) { + const size_t to_copy = std::min( + output_iov_[from_iov_index].iov_len - from_iov_offset, + len); + Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy); + len -= to_copy; + if (len > 0) { + ++from_iov_index; + from_iov_offset = 0; + } + } else { + assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len); + size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len - + curr_iov_written_, + len); + if (to_copy == 0) { + // This iovec is full. Go to the next one. + if (curr_iov_index_ + 1 >= output_iov_count_) { + return false; + } + ++curr_iov_index_; + curr_iov_written_ = 0; + continue; + } + if (to_copy > len) { + to_copy = len; + } + IncrementalCopy(GetIOVecPointer(from_iov_index, from_iov_offset), + GetIOVecPointer(curr_iov_index_, curr_iov_written_), + to_copy); + curr_iov_written_ += to_copy; + from_iov_offset += to_copy; + total_written_ += to_copy; + len -= to_copy; + } + } + + return true; + } + + inline void Flush() {} +}; + +bool RawUncompressToIOVec(const char* compressed, size_t compressed_length, + const struct iovec* iov, size_t iov_cnt) { + ByteArraySource reader(compressed, compressed_length); + return RawUncompressToIOVec(&reader, iov, iov_cnt); +} + +bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov, + size_t iov_cnt) { + SnappyIOVecWriter output(iov, iov_cnt); + return InternalUncompress(compressed, &output); +} + +// ----------------------------------------------------------------------- +// Flat array interfaces +// ----------------------------------------------------------------------- + +// A type that writes to a flat array. +// Note that this is not a "ByteSink", but a type that matches the +// Writer template argument to SnappyDecompressor::DecompressAllTags(). +class SnappyArrayWriter { + private: + char* base_; + char* op_; + char* op_limit_; + + public: + inline explicit SnappyArrayWriter(char* dst) + : base_(dst), + op_(dst), + op_limit_(dst) { + } + + inline void SetExpectedLength(size_t len) { + op_limit_ = op_ + len; + } + + inline bool CheckLength() const { + return op_ == op_limit_; + } + + inline bool Append(const char* ip, size_t len) { + char* op = op_; + const size_t space_left = op_limit_ - op; + if (space_left < len) { + return false; + } + memcpy(op, ip, len); + op_ = op + len; + return true; + } + + inline bool TryFastAppend(const char* ip, size_t available, size_t len) { + char* op = op_; + const size_t space_left = op_limit_ - op; + if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) { + // Fast path, used for the majority (about 95%) of invocations. + UnalignedCopy64(ip, op); + UnalignedCopy64(ip + 8, op + 8); + op_ = op + len; + return true; + } else { + return false; + } + } + + inline bool AppendFromSelf(size_t offset, size_t len) { + char* op = op_; + const size_t space_left = op_limit_ - op; + + // Check if we try to append from before the start of the buffer. + // Normally this would just be a check for "produced < offset", + // but "produced <= offset - 1u" is equivalent for every case + // except the one where offset==0, where the right side will wrap around + // to a very big number. This is convenient, as offset==0 is another + // invalid case that we also want to catch, so that we do not go + // into an infinite loop. + assert(op >= base_); + size_t produced = op - base_; + if (produced <= offset - 1u) { + return false; + } + if (len <= 16 && offset >= 8 && space_left >= 16) { + // Fast path, used for the majority (70-80%) of dynamic invocations. + UnalignedCopy64(op - offset, op); + UnalignedCopy64(op - offset + 8, op + 8); + } else { + if (space_left >= len + kMaxIncrementCopyOverflow) { + IncrementalCopyFastPath(op - offset, op, len); + } else { + if (space_left < len) { + return false; + } + IncrementalCopy(op - offset, op, len); + } + } + + op_ = op + len; + return true; + } + inline size_t Produced() const { + return op_ - base_; + } + inline void Flush() {} +}; + +bool RawUncompress(const char* compressed, size_t n, char* uncompressed) { + ByteArraySource reader(compressed, n); + return RawUncompress(&reader, uncompressed); +} + +bool RawUncompress(Source* compressed, char* uncompressed) { + SnappyArrayWriter output(uncompressed); + return InternalUncompress(compressed, &output); +} + +bool Uncompress(const char* compressed, size_t n, string* uncompressed) { + size_t ulength; + if (!GetUncompressedLength(compressed, n, &ulength)) { + return false; + } + // On 32-bit builds: max_size() < kuint32max. Check for that instead + // of crashing (e.g., consider externally specified compressed data). + if (ulength > uncompressed->max_size()) { + return false; + } + STLStringResizeUninitialized(uncompressed, ulength); + return RawUncompress(compressed, n, string_as_array(uncompressed)); +} + +// A Writer that drops everything on the floor and just does validation +class SnappyDecompressionValidator { + private: + size_t expected_; + size_t produced_; + + public: + inline SnappyDecompressionValidator() : expected_(0), produced_(0) { } + inline void SetExpectedLength(size_t len) { + expected_ = len; + } + inline bool CheckLength() const { + return expected_ == produced_; + } + inline bool Append(const char* ip, size_t len) { + produced_ += len; + return produced_ <= expected_; + } + inline bool TryFastAppend(const char* ip, size_t available, size_t length) { + return false; + } + inline bool AppendFromSelf(size_t offset, size_t len) { + // See SnappyArrayWriter::AppendFromSelf for an explanation of + // the "offset - 1u" trick. + if (produced_ <= offset - 1u) return false; + produced_ += len; + return produced_ <= expected_; + } + inline void Flush() {} +}; + +bool IsValidCompressedBuffer(const char* compressed, size_t n) { + ByteArraySource reader(compressed, n); + SnappyDecompressionValidator writer; + return InternalUncompress(&reader, &writer); +} + +bool IsValidCompressed(Source* compressed) { + SnappyDecompressionValidator writer; + return InternalUncompress(compressed, &writer); +} + +void RawCompress(const char* input, + size_t input_length, + char* compressed, + size_t* compressed_length) { + ByteArraySource reader(input, input_length); + UncheckedByteArraySink writer(compressed); + Compress(&reader, &writer); + + // Compute how many bytes were added + *compressed_length = (writer.CurrentDestination() - compressed); +} + +size_t Compress(const char* input, size_t input_length, string* compressed) { + // Pre-grow the buffer to the max length of the compressed output + compressed->resize(MaxCompressedLength(input_length)); + + size_t compressed_length; + RawCompress(input, input_length, string_as_array(compressed), + &compressed_length); + compressed->resize(compressed_length); + return compressed_length; +} + +// ----------------------------------------------------------------------- +// Sink interface +// ----------------------------------------------------------------------- + +// A type that decompresses into a Sink. The template parameter +// Allocator must export one method "char* Allocate(int size);", which +// allocates a buffer of "size" and appends that to the destination. +template +class SnappyScatteredWriter { + Allocator allocator_; + + // We need random access into the data generated so far. Therefore + // we keep track of all of the generated data as an array of blocks. + // All of the blocks except the last have length kBlockSize. + vector blocks_; + size_t expected_; + + // Total size of all fully generated blocks so far + size_t full_size_; + + // Pointer into current output block + char* op_base_; // Base of output block + char* op_ptr_; // Pointer to next unfilled byte in block + char* op_limit_; // Pointer just past block + + inline size_t Size() const { + return full_size_ + (op_ptr_ - op_base_); + } + + bool SlowAppend(const char* ip, size_t len); + bool SlowAppendFromSelf(size_t offset, size_t len); + + public: + inline explicit SnappyScatteredWriter(const Allocator& allocator) + : allocator_(allocator), + full_size_(0), + op_base_(NULL), + op_ptr_(NULL), + op_limit_(NULL) { + } + + inline void SetExpectedLength(size_t len) { + assert(blocks_.empty()); + expected_ = len; + } + + inline bool CheckLength() const { + return Size() == expected_; + } + + // Return the number of bytes actually uncompressed so far + inline size_t Produced() const { + return Size(); + } + + inline bool Append(const char* ip, size_t len) { + size_t avail = op_limit_ - op_ptr_; + if (len <= avail) { + // Fast path + memcpy(op_ptr_, ip, len); + op_ptr_ += len; + return true; + } else { + return SlowAppend(ip, len); + } + } + + inline bool TryFastAppend(const char* ip, size_t available, size_t length) { + char* op = op_ptr_; + const int space_left = op_limit_ - op; + if (length <= 16 && available >= 16 + kMaximumTagLength && + space_left >= 16) { + // Fast path, used for the majority (about 95%) of invocations. + UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip)); + UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8)); + op_ptr_ = op + length; + return true; + } else { + return false; + } + } + + inline bool AppendFromSelf(size_t offset, size_t len) { + // See SnappyArrayWriter::AppendFromSelf for an explanation of + // the "offset - 1u" trick. + if (offset - 1u < op_ptr_ - op_base_) { + const size_t space_left = op_limit_ - op_ptr_; + if (space_left >= len + kMaxIncrementCopyOverflow) { + // Fast path: src and dst in current block. + IncrementalCopyFastPath(op_ptr_ - offset, op_ptr_, len); + op_ptr_ += len; + return true; + } + } + return SlowAppendFromSelf(offset, len); + } + + // Called at the end of the decompress. We ask the allocator + // write all blocks to the sink. + inline void Flush() { allocator_.Flush(Produced()); } +}; + +template +bool SnappyScatteredWriter::SlowAppend(const char* ip, size_t len) { + size_t avail = op_limit_ - op_ptr_; + while (len > avail) { + // Completely fill this block + memcpy(op_ptr_, ip, avail); + op_ptr_ += avail; + assert(op_limit_ - op_ptr_ == 0); + full_size_ += (op_ptr_ - op_base_); + len -= avail; + ip += avail; + + // Bounds check + if (full_size_ + len > expected_) { + return false; + } + + // Make new block + size_t bsize = min(kBlockSize, expected_ - full_size_); + op_base_ = allocator_.Allocate(bsize); + op_ptr_ = op_base_; + op_limit_ = op_base_ + bsize; + blocks_.push_back(op_base_); + avail = bsize; + } + + memcpy(op_ptr_, ip, len); + op_ptr_ += len; + return true; +} + +template +bool SnappyScatteredWriter::SlowAppendFromSelf(size_t offset, + size_t len) { + // Overflow check + // See SnappyArrayWriter::AppendFromSelf for an explanation of + // the "offset - 1u" trick. + const size_t cur = Size(); + if (offset - 1u >= cur) return false; + if (expected_ - cur < len) return false; + + // Currently we shouldn't ever hit this path because Compress() chops the + // input into blocks and does not create cross-block copies. However, it is + // nice if we do not rely on that, since we can get better compression if we + // allow cross-block copies and thus might want to change the compressor in + // the future. + size_t src = cur - offset; + while (len-- > 0) { + char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)]; + Append(&c, 1); + src++; + } + return true; +} + +class SnappySinkAllocator { + public: + explicit SnappySinkAllocator(Sink* dest): dest_(dest) {} + ~SnappySinkAllocator() {} + + char* Allocate(int size) { + Datablock block(new char[size], size); + blocks_.push_back(block); + return block.data; + } + + // We flush only at the end, because the writer wants + // random access to the blocks and once we hand the + // block over to the sink, we can't access it anymore. + // Also we don't write more than has been actually written + // to the blocks. + void Flush(size_t size) { + size_t size_written = 0; + size_t block_size; + for (int i = 0; i < blocks_.size(); ++i) { + block_size = min(blocks_[i].size, size - size_written); + dest_->AppendAndTakeOwnership(blocks_[i].data, block_size, + &SnappySinkAllocator::Deleter, NULL); + size_written += block_size; + } + blocks_.clear(); + } + + private: + struct Datablock { + char* data; + size_t size; + Datablock(char* p, size_t s) : data(p), size(s) {} + }; + + static void Deleter(void* arg, const char* bytes, size_t size) { + delete[] bytes; + } + + Sink* dest_; + vector blocks_; + + // Note: copying this object is allowed +}; + +size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) { + SnappySinkAllocator allocator(uncompressed); + SnappyScatteredWriter writer(allocator); + InternalUncompress(compressed, &writer); + return writer.Produced(); +} + +bool Uncompress(Source* compressed, Sink* uncompressed) { + // Read the uncompressed length from the front of the compressed input + SnappyDecompressor decompressor(compressed); + uint32 uncompressed_len = 0; + if (!decompressor.ReadUncompressedLength(&uncompressed_len)) { + return false; + } + + char c; + size_t allocated_size; + char* buf = uncompressed->GetAppendBufferVariable( + 1, uncompressed_len, &c, 1, &allocated_size); + + // If we can get a flat buffer, then use it, otherwise do block by block + // uncompression + if (allocated_size >= uncompressed_len) { + SnappyArrayWriter writer(buf); + bool result = InternalUncompressAllTags( + &decompressor, &writer, uncompressed_len); + uncompressed->Append(buf, writer.Produced()); + return result; + } else { + SnappySinkAllocator allocator(uncompressed); + SnappyScatteredWriter writer(allocator); + return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len); + } +} + +} // end namespace snappy -- cgit v1.2.3