diff options
Diffstat (limited to 'libraries/xz-embedded')
-rw-r--r-- | libraries/xz-embedded/CMakeLists.txt | 16 | ||||
-rw-r--r-- | libraries/xz-embedded/include/xz.h | 36 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_config.h | 24 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_crc32.c | 38 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_crc64.c | 38 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_dec_bcj.c | 902 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_dec_lzma2.c | 1810 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_dec_stream.c | 1198 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_lzma2.h | 46 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_private.h | 6 | ||||
-rw-r--r-- | libraries/xz-embedded/src/xz_stream.h | 8 | ||||
-rw-r--r-- | libraries/xz-embedded/xzminidec.c | 216 |
12 files changed, 2169 insertions, 2169 deletions
diff --git a/libraries/xz-embedded/CMakeLists.txt b/libraries/xz-embedded/CMakeLists.txt index 5f744671..86ac60c8 100644 --- a/libraries/xz-embedded/CMakeLists.txt +++ b/libraries/xz-embedded/CMakeLists.txt @@ -9,18 +9,18 @@ option(XZ_BUILD_MINIDEC "Build a tiny utility that decompresses xz streams" OFF) # tweak this list and xz.h to fit your needs set(XZ_SOURCES - src/xz_crc32.c - src/xz_crc64.c - src/xz_dec_lzma2.c - src/xz_dec_stream.c -# src/xz_dec_bcj.c + src/xz_crc32.c + src/xz_crc64.c + src/xz_dec_lzma2.c + src/xz_dec_stream.c +# src/xz_dec_bcj.c ) add_library(xz-embedded STATIC ${XZ_SOURCES}) target_include_directories(xz-embedded PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") set_property(TARGET xz-embedded PROPERTY C_STANDARD 99) if(${XZ_BUILD_MINIDEC}) - add_executable(xzminidec xzminidec.c) - target_link_libraries(xzminidec xz-embedded) - set_property(TARGET xzminidec PROPERTY C_STANDARD 99) + add_executable(xzminidec xzminidec.c) + target_link_libraries(xzminidec xz-embedded) + set_property(TARGET xzminidec PROPERTY C_STANDARD 99) endif() diff --git a/libraries/xz-embedded/include/xz.h b/libraries/xz-embedded/include/xz.h index eef8ef69..3779124c 100644 --- a/libraries/xz-embedded/include/xz.h +++ b/libraries/xz-embedded/include/xz.h @@ -69,9 +69,9 @@ extern "C" { */ enum xz_mode { - XZ_SINGLE, - XZ_PREALLOC, - XZ_DYNALLOC + XZ_SINGLE, + XZ_PREALLOC, + XZ_DYNALLOC }; /** @@ -126,15 +126,15 @@ enum xz_mode */ enum xz_ret { - XZ_OK, - XZ_STREAM_END, - XZ_UNSUPPORTED_CHECK, - XZ_MEM_ERROR, - XZ_MEMLIMIT_ERROR, - XZ_FORMAT_ERROR, - XZ_OPTIONS_ERROR, - XZ_DATA_ERROR, - XZ_BUF_ERROR + XZ_OK, + XZ_STREAM_END, + XZ_UNSUPPORTED_CHECK, + XZ_MEM_ERROR, + XZ_MEMLIMIT_ERROR, + XZ_FORMAT_ERROR, + XZ_OPTIONS_ERROR, + XZ_DATA_ERROR, + XZ_BUF_ERROR }; /** @@ -155,13 +155,13 @@ enum xz_ret */ struct xz_buf { - const uint8_t *in; - size_t in_pos; - size_t in_size; + const uint8_t *in; + size_t in_pos; + size_t in_size; - uint8_t *out; - size_t out_pos; - size_t out_size; + uint8_t *out; + size_t out_pos; + size_t out_size; }; /** diff --git a/libraries/xz-embedded/src/xz_config.h b/libraries/xz-embedded/src/xz_config.h index 40805b75..effdb1bd 100644 --- a/libraries/xz-embedded/src/xz_config.h +++ b/libraries/xz-embedded/src/xz_config.h @@ -74,36 +74,36 @@ typedef unsigned char bool; #ifndef get_unaligned_le32 static inline uint32_t get_unaligned_le32(const uint8_t *buf) { - return (uint32_t)buf[0] | ((uint32_t)buf[1] << 8) | ((uint32_t)buf[2] << 16) | - ((uint32_t)buf[3] << 24); + return (uint32_t)buf[0] | ((uint32_t)buf[1] << 8) | ((uint32_t)buf[2] << 16) | + ((uint32_t)buf[3] << 24); } #endif #ifndef get_unaligned_be32 static inline uint32_t get_unaligned_be32(const uint8_t *buf) { - return (uint32_t)(buf[0] << 24) | ((uint32_t)buf[1] << 16) | ((uint32_t)buf[2] << 8) | - (uint32_t)buf[3]; + return (uint32_t)(buf[0] << 24) | ((uint32_t)buf[1] << 16) | ((uint32_t)buf[2] << 8) | + (uint32_t)buf[3]; } #endif #ifndef put_unaligned_le32 static inline void put_unaligned_le32(uint32_t val, uint8_t *buf) { - buf[0] = (uint8_t)val; - buf[1] = (uint8_t)(val >> 8); - buf[2] = (uint8_t)(val >> 16); - buf[3] = (uint8_t)(val >> 24); + buf[0] = (uint8_t)val; + buf[1] = (uint8_t)(val >> 8); + buf[2] = (uint8_t)(val >> 16); + buf[3] = (uint8_t)(val >> 24); } #endif #ifndef put_unaligned_be32 static inline void put_unaligned_be32(uint32_t val, uint8_t *buf) { - buf[0] = (uint8_t)(val >> 24); - buf[1] = (uint8_t)(val >> 16); - buf[2] = (uint8_t)(val >> 8); - buf[3] = (uint8_t)val; + buf[0] = (uint8_t)(val >> 24); + buf[1] = (uint8_t)(val >> 16); + buf[2] = (uint8_t)(val >> 8); + buf[3] = (uint8_t)val; } #endif diff --git a/libraries/xz-embedded/src/xz_crc32.c b/libraries/xz-embedded/src/xz_crc32.c index c412662b..65d9d5b8 100644 --- a/libraries/xz-embedded/src/xz_crc32.c +++ b/libraries/xz-embedded/src/xz_crc32.c @@ -29,33 +29,33 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256]; XZ_EXTERN void xz_crc32_init(void) { - const uint32_t poly = 0xEDB88320; + const uint32_t poly = 0xEDB88320; - uint32_t i; - uint32_t j; - uint32_t r; + uint32_t i; + uint32_t j; + uint32_t r; - for (i = 0; i < 256; ++i) - { - r = i; - for (j = 0; j < 8; ++j) - r = (r >> 1) ^ (poly & ~((r & 1) - 1)); + for (i = 0; i < 256; ++i) + { + r = i; + for (j = 0; j < 8; ++j) + r = (r >> 1) ^ (poly & ~((r & 1) - 1)); - xz_crc32_table[i] = r; - } + xz_crc32_table[i] = r; + } - return; + return; } XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) { - crc = ~crc; + crc = ~crc; - while (size != 0) - { - crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); - --size; - } + while (size != 0) + { + crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); + --size; + } - return ~crc; + return ~crc; } diff --git a/libraries/xz-embedded/src/xz_crc64.c b/libraries/xz-embedded/src/xz_crc64.c index 4794b9d3..0f711d8d 100644 --- a/libraries/xz-embedded/src/xz_crc64.c +++ b/libraries/xz-embedded/src/xz_crc64.c @@ -20,33 +20,33 @@ STATIC_RW_DATA uint64_t xz_crc64_table[256]; XZ_EXTERN void xz_crc64_init(void) { - const uint64_t poly = 0xC96C5795D7870F42; + const uint64_t poly = 0xC96C5795D7870F42; - uint32_t i; - uint32_t j; - uint64_t r; + uint32_t i; + uint32_t j; + uint64_t r; - for (i = 0; i < 256; ++i) - { - r = i; - for (j = 0; j < 8; ++j) - r = (r >> 1) ^ (poly & ~((r & 1) - 1)); + for (i = 0; i < 256; ++i) + { + r = i; + for (j = 0; j < 8; ++j) + r = (r >> 1) ^ (poly & ~((r & 1) - 1)); - xz_crc64_table[i] = r; - } + xz_crc64_table[i] = r; + } - return; + return; } XZ_EXTERN uint64_t xz_crc64(const uint8_t *buf, size_t size, uint64_t crc) { - crc = ~crc; + crc = ~crc; - while (size != 0) - { - crc = xz_crc64_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); - --size; - } + while (size != 0) + { + crc = xz_crc64_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); + --size; + } - return ~crc; + return ~crc; } diff --git a/libraries/xz-embedded/src/xz_dec_bcj.c b/libraries/xz-embedded/src/xz_dec_bcj.c index 9ffda3bd..a79fa76d 100644 --- a/libraries/xz-embedded/src/xz_dec_bcj.c +++ b/libraries/xz-embedded/src/xz_dec_bcj.c @@ -18,64 +18,64 @@ struct xz_dec_bcj { - /* Type of the BCJ filter being used */ - enum - { - BCJ_X86 = 4, /* x86 or x86-64 */ - BCJ_POWERPC = 5, /* Big endian only */ - BCJ_IA64 = 6, /* Big or little endian */ - BCJ_ARM = 7, /* Little endian only */ - BCJ_ARMTHUMB = 8, /* Little endian only */ - BCJ_SPARC = 9 /* Big or little endian */ - } type; - - /* - * Return value of the next filter in the chain. We need to preserve - * this information across calls, because we must not call the next - * filter anymore once it has returned XZ_STREAM_END. - */ - enum xz_ret ret; - - /* True if we are operating in single-call mode. */ - bool single_call; - - /* - * Absolute position relative to the beginning of the uncompressed - * data (in a single .xz Block). We care only about the lowest 32 - * bits so this doesn't need to be uint64_t even with big files. - */ - uint32_t pos; - - /* x86 filter state */ - uint32_t x86_prev_mask; - - /* Temporary space to hold the variables from struct xz_buf */ - uint8_t *out; - size_t out_pos; - size_t out_size; - - struct - { - /* Amount of already filtered data in the beginning of buf */ - size_t filtered; - - /* Total amount of data currently stored in buf */ - size_t size; - - /* - * Buffer to hold a mix of filtered and unfiltered data. This - * needs to be big enough to hold Alignment + 2 * Look-ahead: - * - * Type Alignment Look-ahead - * x86 1 4 - * PowerPC 4 0 - * IA-64 16 0 - * ARM 4 0 - * ARM-Thumb 2 2 - * SPARC 4 0 - */ - uint8_t buf[16]; - } temp; + /* Type of the BCJ filter being used */ + enum + { + BCJ_X86 = 4, /* x86 or x86-64 */ + BCJ_POWERPC = 5, /* Big endian only */ + BCJ_IA64 = 6, /* Big or little endian */ + BCJ_ARM = 7, /* Little endian only */ + BCJ_ARMTHUMB = 8, /* Little endian only */ + BCJ_SPARC = 9 /* Big or little endian */ + } type; + + /* + * Return value of the next filter in the chain. We need to preserve + * this information across calls, because we must not call the next + * filter anymore once it has returned XZ_STREAM_END. + */ + enum xz_ret ret; + + /* True if we are operating in single-call mode. */ + bool single_call; + + /* + * Absolute position relative to the beginning of the uncompressed + * data (in a single .xz Block). We care only about the lowest 32 + * bits so this doesn't need to be uint64_t even with big files. + */ + uint32_t pos; + + /* x86 filter state */ + uint32_t x86_prev_mask; + + /* Temporary space to hold the variables from struct xz_buf */ + uint8_t *out; + size_t out_pos; + size_t out_size; + + struct + { + /* Amount of already filtered data in the beginning of buf */ + size_t filtered; + + /* Total amount of data currently stored in buf */ + size_t size; + + /* + * Buffer to hold a mix of filtered and unfiltered data. This + * needs to be big enough to hold Alignment + 2 * Look-ahead: + * + * Type Alignment Look-ahead + * x86 1 4 + * PowerPC 4 0 + * IA-64 16 0 + * ARM 4 0 + * ARM-Thumb 2 2 + * SPARC 4 0 + */ + uint8_t buf[16]; + } temp; }; #ifdef XZ_DEC_X86 @@ -85,264 +85,264 @@ struct xz_dec_bcj */ static inline int bcj_x86_test_msbyte(uint8_t b) { - return b == 0x00 || b == 0xFF; + return b == 0x00 || b == 0xFF; } static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - static const bool mask_to_allowed_status[8] = {true, true, true, false, - true, false, false, false}; - - static const uint8_t mask_to_bit_num[8] = {0, 1, 2, 2, 3, 3, 3, 3}; - - size_t i; - size_t prev_pos = (size_t) - 1; - uint32_t prev_mask = s->x86_prev_mask; - uint32_t src; - uint32_t dest; - uint32_t j; - uint8_t b; - - if (size <= 4) - return 0; - - size -= 4; - for (i = 0; i < size; ++i) - { - if ((buf[i] & 0xFE) != 0xE8) - continue; - - prev_pos = i - prev_pos; - if (prev_pos > 3) - { - prev_mask = 0; - } - else - { - prev_mask = (prev_mask << (prev_pos - 1)) & 7; - if (prev_mask != 0) - { - b = buf[i + 4 - mask_to_bit_num[prev_mask]]; - if (!mask_to_allowed_status[prev_mask] || bcj_x86_test_msbyte(b)) - { - prev_pos = i; - prev_mask = (prev_mask << 1) | 1; - continue; - } - } - } - - prev_pos = i; - - if (bcj_x86_test_msbyte(buf[i + 4])) - { - src = get_unaligned_le32(buf + i + 1); - while (true) - { - dest = src - (s->pos + (uint32_t)i + 5); - if (prev_mask == 0) - break; - - j = mask_to_bit_num[prev_mask] * 8; - b = (uint8_t)(dest >> (24 - j)); - if (!bcj_x86_test_msbyte(b)) - break; - - src = dest ^ (((uint32_t)1 << (32 - j)) - 1); - } - - dest &= 0x01FFFFFF; - dest |= (uint32_t)0 - (dest & 0x01000000); - put_unaligned_le32(dest, buf + i + 1); - i += 4; - } - else - { - prev_mask = (prev_mask << 1) | 1; - } - } - - prev_pos = i - prev_pos; - s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); - return i; + static const bool mask_to_allowed_status[8] = {true, true, true, false, + true, false, false, false}; + + static const uint8_t mask_to_bit_num[8] = {0, 1, 2, 2, 3, 3, 3, 3}; + + size_t i; + size_t prev_pos = (size_t) - 1; + uint32_t prev_mask = s->x86_prev_mask; + uint32_t src; + uint32_t dest; + uint32_t j; + uint8_t b; + + if (size <= 4) + return 0; + + size -= 4; + for (i = 0; i < size; ++i) + { + if ((buf[i] & 0xFE) != 0xE8) + continue; + + prev_pos = i - prev_pos; + if (prev_pos > 3) + { + prev_mask = 0; + } + else + { + prev_mask = (prev_mask << (prev_pos - 1)) & 7; + if (prev_mask != 0) + { + b = buf[i + 4 - mask_to_bit_num[prev_mask]]; + if (!mask_to_allowed_status[prev_mask] || bcj_x86_test_msbyte(b)) + { + prev_pos = i; + prev_mask = (prev_mask << 1) | 1; + continue; + } + } + } + + prev_pos = i; + + if (bcj_x86_test_msbyte(buf[i + 4])) + { + src = get_unaligned_le32(buf + i + 1); + while (true) + { + dest = src - (s->pos + (uint32_t)i + 5); + if (prev_mask == 0) + break; + + j = mask_to_bit_num[prev_mask] * 8; + b = (uint8_t)(dest >> (24 - j)); + if (!bcj_x86_test_msbyte(b)) + break; + + src = dest ^ (((uint32_t)1 << (32 - j)) - 1); + } + + dest &= 0x01FFFFFF; + dest |= (uint32_t)0 - (dest & 0x01000000); + put_unaligned_le32(dest, buf + i + 1); + i += 4; + } + else + { + prev_mask = (prev_mask << 1) | 1; + } + } + + prev_pos = i - prev_pos; + s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); + return i; } #endif #ifdef XZ_DEC_POWERPC static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t instr; - - for (i = 0; i + 4 <= size; i += 4) - { - instr = get_unaligned_be32(buf + i); - if ((instr & 0xFC000003) == 0x48000001) - { - instr &= 0x03FFFFFC; - instr -= s->pos + (uint32_t)i; - instr &= 0x03FFFFFC; - instr |= 0x48000001; - put_unaligned_be32(instr, buf + i); - } - } - - return i; + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) + { + instr = get_unaligned_be32(buf + i); + if ((instr & 0xFC000003) == 0x48000001) + { + instr &= 0x03FFFFFC; + instr -= s->pos + (uint32_t)i; + instr &= 0x03FFFFFC; + instr |= 0x48000001; + put_unaligned_be32(instr, buf + i); + } + } + + return i; } #endif #ifdef XZ_DEC_IA64 static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - static const uint8_t branch_table[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 4, 4, 6, 6, 0, 0, 7, 7, 4, 4, 0, 0, 4, 4, 0, 0}; - - /* - * The local variables take a little bit stack space, but it's less - * than what LZMA2 decoder takes, so it doesn't make sense to reduce - * stack usage here without doing that for the LZMA2 decoder too. - */ - - /* Loop counters */ - size_t i; - size_t j; - - /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ - uint32_t slot; - - /* Bitwise offset of the instruction indicated by slot */ - uint32_t bit_pos; - - /* bit_pos split into byte and bit parts */ - uint32_t byte_pos; - uint32_t bit_res; - - /* Address part of an instruction */ - uint32_t addr; - - /* Mask used to detect which instructions to convert */ - uint32_t mask; - - /* 41-bit instruction stored somewhere in the lowest 48 bits */ - uint64_t instr; - - /* Instruction normalized with bit_res for easier manipulation */ - uint64_t norm; - - for (i = 0; i + 16 <= size; i += 16) - { - mask = branch_table[buf[i] & 0x1F]; - for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) - { - if (((mask >> slot) & 1) == 0) - continue; - - byte_pos = bit_pos >> 3; - bit_res = bit_pos & 7; - instr = 0; - for (j = 0; j < 6; ++j) - instr |= (uint64_t)(buf[i + j + byte_pos]) << (8 * j); - - norm = instr >> bit_res; - - if (((norm >> 37) & 0x0F) == 0x05 && ((norm >> 9) & 0x07) == 0) - { - addr = (norm >> 13) & 0x0FFFFF; - addr |= ((uint32_t)(norm >> 36) & 1) << 20; - addr <<= 4; - addr -= s->pos + (uint32_t)i; - addr >>= 4; - - norm &= ~((uint64_t)0x8FFFFF << 13); - norm |= (uint64_t)(addr & 0x0FFFFF) << 13; - norm |= (uint64_t)(addr & 0x100000) << (36 - 20); - - instr &= (1 << bit_res) - 1; - instr |= norm << bit_res; - - for (j = 0; j < 6; j++) - buf[i + j + byte_pos] = (uint8_t)(instr >> (8 * j)); - } - } - } - - return i; + static const uint8_t branch_table[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 4, 4, 6, 6, 0, 0, 7, 7, 4, 4, 0, 0, 4, 4, 0, 0}; + + /* + * The local variables take a little bit stack space, but it's less + * than what LZMA2 decoder takes, so it doesn't make sense to reduce + * stack usage here without doing that for the LZMA2 decoder too. + */ + + /* Loop counters */ + size_t i; + size_t j; + + /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ + uint32_t slot; + + /* Bitwise offset of the instruction indicated by slot */ + uint32_t bit_pos; + + /* bit_pos split into byte and bit parts */ + uint32_t byte_pos; + uint32_t bit_res; + + /* Address part of an instruction */ + uint32_t addr; + + /* Mask used to detect which instructions to convert */ + uint32_t mask; + + /* 41-bit instruction stored somewhere in the lowest 48 bits */ + uint64_t instr; + + /* Instruction normalized with bit_res for easier manipulation */ + uint64_t norm; + + for (i = 0; i + 16 <= size; i += 16) + { + mask = branch_table[buf[i] & 0x1F]; + for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) + { + if (((mask >> slot) & 1) == 0) + continue; + + byte_pos = bit_pos >> 3; + bit_res = bit_pos & 7; + instr = 0; + for (j = 0; j < 6; ++j) + instr |= (uint64_t)(buf[i + j + byte_pos]) << (8 * j); + + norm = instr >> bit_res; + + if (((norm >> 37) & 0x0F) == 0x05 && ((norm >> 9) & 0x07) == 0) + { + addr = (norm >> 13) & 0x0FFFFF; + addr |= ((uint32_t)(norm >> 36) & 1) << 20; + addr <<= 4; + addr -= s->pos + (uint32_t)i; + addr >>= 4; + + norm &= ~((uint64_t)0x8FFFFF << 13); + norm |= (uint64_t)(addr & 0x0FFFFF) << 13; + norm |= (uint64_t)(addr & 0x100000) << (36 - 20); + + instr &= (1 << bit_res) - 1; + instr |= norm << bit_res; + + for (j = 0; j < 6; j++) + buf[i + j + byte_pos] = (uint8_t)(instr >> (8 * j)); + } + } + } + + return i; } #endif #ifdef XZ_DEC_ARM static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t addr; - - for (i = 0; i + 4 <= size; i += 4) - { - if (buf[i + 3] == 0xEB) - { - addr = - (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) | ((uint32_t)buf[i + 2] << 16); - addr <<= 2; - addr -= s->pos + (uint32_t)i + 8; - addr >>= 2; - buf[i] = (uint8_t)addr; - buf[i + 1] = (uint8_t)(addr >> 8); - buf[i + 2] = (uint8_t)(addr >> 16); - } - } - - return i; + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 4) + { + if (buf[i + 3] == 0xEB) + { + addr = + (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) | ((uint32_t)buf[i + 2] << 16); + addr <<= 2; + addr -= s->pos + (uint32_t)i + 8; + addr >>= 2; + buf[i] = (uint8_t)addr; + buf[i + 1] = (uint8_t)(addr >> 8); + buf[i + 2] = (uint8_t)(addr >> 16); + } + } + + return i; } #endif #ifdef XZ_DEC_ARMTHUMB static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t addr; - - for (i = 0; i + 4 <= size; i += 2) - { - if ((buf[i + 1] & 0xF8) == 0xF0 && (buf[i + 3] & 0xF8) == 0xF8) - { - addr = (((uint32_t)buf[i + 1] & 0x07) << 19) | ((uint32_t)buf[i] << 11) | - (((uint32_t)buf[i + 3] & 0x07) << 8) | (uint32_t)buf[i + 2]; - addr <<= 1; - addr -= s->pos + (uint32_t)i + 4; - addr >>= 1; - buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); - buf[i] = (uint8_t)(addr >> 11); - buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); - buf[i + 2] = (uint8_t)addr; - i += 2; - } - } - - return i; + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 2) + { + if ((buf[i + 1] & 0xF8) == 0xF0 && (buf[i + 3] & 0xF8) == 0xF8) + { + addr = (((uint32_t)buf[i + 1] & 0x07) << 19) | ((uint32_t)buf[i] << 11) | + (((uint32_t)buf[i + 3] & 0x07) << 8) | (uint32_t)buf[i + 2]; + addr <<= 1; + addr -= s->pos + (uint32_t)i + 4; + addr >>= 1; + buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); + buf[i] = (uint8_t)(addr >> 11); + buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); + buf[i + 2] = (uint8_t)addr; + i += 2; + } + } + + return i; } #endif #ifdef XZ_DEC_SPARC static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - size_t i; - uint32_t instr; - - for (i = 0; i + 4 <= size; i += 4) - { - instr = get_unaligned_be32(buf + i); - if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) - { - instr <<= 2; - instr -= s->pos + (uint32_t)i; - instr >>= 2; - instr = - ((uint32_t)0x40000000 - (instr & 0x400000)) | 0x40000000 | (instr & 0x3FFFFF); - put_unaligned_be32(instr, buf + i); - } - } - - return i; + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) + { + instr = get_unaligned_be32(buf + i); + if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) + { + instr <<= 2; + instr -= s->pos + (uint32_t)i; + instr >>= 2; + instr = + ((uint32_t)0x40000000 - (instr & 0x400000)) | 0x40000000 | (instr & 0x3FFFFF); + put_unaligned_be32(instr, buf + i); + } + } + + return i; } #endif @@ -356,51 +356,51 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) */ static void bcj_apply(struct xz_dec_bcj *s, uint8_t *buf, size_t *pos, size_t size) { - size_t filtered; + size_t filtered; - buf += *pos; - size -= *pos; + buf += *pos; + size -= *pos; - switch (s->type) - { + switch (s->type) + { #ifdef XZ_DEC_X86 - case BCJ_X86: - filtered = bcj_x86(s, buf, size); - break; + case BCJ_X86: + filtered = bcj_x86(s, buf, size); + break; #endif #ifdef XZ_DEC_POWERPC - case BCJ_POWERPC: - filtered = bcj_powerpc(s, buf, size); - break; + case BCJ_POWERPC: + filtered = bcj_powerpc(s, buf, size); + break; #endif #ifdef XZ_DEC_IA64 - case BCJ_IA64: - filtered = bcj_ia64(s, buf, size); - break; + case BCJ_IA64: + filtered = bcj_ia64(s, buf, size); + break; #endif #ifdef XZ_DEC_ARM - case BCJ_ARM: - filtered = bcj_arm(s, buf, size); - break; + case BCJ_ARM: + filtered = bcj_arm(s, buf, size); + break; #endif #ifdef XZ_DEC_ARMTHUMB - case BCJ_ARMTHUMB: - filtered = bcj_armthumb(s, buf, size); - break; + case BCJ_ARMTHUMB: + filtered = bcj_armthumb(s, buf, size); + break; #endif #ifdef XZ_DEC_SPARC - case BCJ_SPARC: - filtered = bcj_sparc(s, buf, size); - break; + case BCJ_SPARC: + filtered = bcj_sparc(s, buf, size); + break; #endif - default: - /* Never reached but silence compiler warnings. */ - filtered = 0; - break; - } - - *pos += filtered; - s->pos += filtered; + default: + /* Never reached but silence compiler warnings. */ + filtered = 0; + break; + } + + *pos += filtered; + s->pos += filtered; } /* @@ -410,15 +410,15 @@ static void bcj_apply(struct xz_dec_bcj *s, uint8_t *buf, size_t *pos, size_t si */ static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) { - size_t copy_size; + size_t copy_size; - copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); - memcpy(b->out + b->out_pos, s->temp.buf, copy_size); - b->out_pos += copy_size; + copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); + memcpy(b->out + b->out_pos, s->temp.buf, copy_size); + b->out_pos += copy_size; - s->temp.filtered -= copy_size; - s->temp.size -= copy_size; - memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); + s->temp.filtered -= copy_size; + s->temp.size -= copy_size; + memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); } /* @@ -427,162 +427,162 @@ static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) * some buffering. */ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2, - struct xz_buf *b) + struct xz_buf *b) { - size_t out_start; - - /* - * Flush pending already filtered data to the output buffer. Return - * immediatelly if we couldn't flush everything, or if the next - * filter in the chain had already returned XZ_STREAM_END. - */ - if (s->temp.filtered > 0) - { - bcj_flush(s, b); - if (s->temp.filtered > 0) - return XZ_OK; - - if (s->ret == XZ_STREAM_END) - return XZ_STREAM_END; - } - - /* - * If we have more output space than what is currently pending in - * temp, copy the unfiltered data from temp to the output buffer - * and try to fill the output buffer by decoding more data from the - * next filter in the chain. Apply the BCJ filter on the new data - * in the output buffer. If everything cannot be filtered, copy it - * to temp and rewind the output buffer position accordingly. - * - * This needs to be always run when temp.size == 0 to handle a special - * case where the output buffer is full and the next filter has no - * more output coming but hasn't returned XZ_STREAM_END yet. - */ - if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) - { - out_start = b->out_pos; - memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); - b->out_pos += s->temp.size; - - s->ret = xz_dec_lzma2_run(lzma2, b); - if (s->ret != XZ_STREAM_END && (s->ret != XZ_OK || s->single_call)) - return s->ret; - - bcj_apply(s, b->out, &out_start, b->out_pos); - - /* - * As an exception, if the next filter returned XZ_STREAM_END, - * we can do that too, since the last few bytes that remain - * unfiltered are meant to remain unfiltered. - */ - if (s->ret == XZ_STREAM_END) - return XZ_STREAM_END; - - s->temp.size = b->out_pos - out_start; - b->out_pos -= s->temp.size; - memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); - - /* - * If there wasn't enough input to the next filter to fill - * the output buffer with unfiltered data, there's no point - * to try decoding more data to temp. - */ - if (b->out_pos + s->temp.size < b->out_size) - return XZ_OK; - } - - /* - * We have unfiltered data in temp. If the output buffer isn't full - * yet, try to fill the temp buffer by decoding more data from the - * next filter. Apply the BCJ filter on temp. Then we hopefully can - * fill the actual output buffer by copying filtered data from temp. - * A mix of filtered and unfiltered data may be left in temp; it will - * be taken care on the next call to this function. - */ - if (b->out_pos < b->out_size) - { - /* Make b->out{,_pos,_size} temporarily point to s->temp. */ - s->out = b->out; - s->out_pos = b->out_pos; - s->out_size = b->out_size; - b->out = s->temp.buf; - b->out_pos = s->temp.size; - b->out_size = sizeof(s->temp.buf); - - s->ret = xz_dec_lzma2_run(lzma2, b); - - s->temp.size = b->out_pos; - b->out = s->out; - b->out_pos = s->out_pos; - b->out_size = s->out_size; - - if (s->ret != XZ_OK && s->ret != XZ_STREAM_END) - return s->ret; - - bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); - - /* - * If the next filter returned XZ_STREAM_END, we mark that - * everything is filtered, since the last unfiltered bytes - * of the stream are meant to be left as is. - */ - if (s->ret == XZ_STREAM_END) - s->temp.filtered = s->temp.size; - - bcj_flush(s, b); - if (s->temp.filtered > 0) - return XZ_OK; - } - - return s->ret; + size_t out_start; + + /* + * Flush pending already filtered data to the output buffer. Return + * immediatelly if we couldn't flush everything, or if the next + * filter in the chain had already returned XZ_STREAM_END. + */ + if (s->temp.filtered > 0) + { + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + } + + /* + * If we have more output space than what is currently pending in + * temp, copy the unfiltered data from temp to the output buffer + * and try to fill the output buffer by decoding more data from the + * next filter in the chain. Apply the BCJ filter on the new data + * in the output buffer. If everything cannot be filtered, copy it + * to temp and rewind the output buffer position accordingly. + * + * This needs to be always run when temp.size == 0 to handle a special + * case where the output buffer is full and the next filter has no + * more output coming but hasn't returned XZ_STREAM_END yet. + */ + if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) + { + out_start = b->out_pos; + memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); + b->out_pos += s->temp.size; + + s->ret = xz_dec_lzma2_run(lzma2, b); + if (s->ret != XZ_STREAM_END && (s->ret != XZ_OK || s->single_call)) + return s->ret; + + bcj_apply(s, b->out, &out_start, b->out_pos); + + /* + * As an exception, if the next filter returned XZ_STREAM_END, + * we can do that too, since the last few bytes that remain + * unfiltered are meant to remain unfiltered. + */ + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + + s->temp.size = b->out_pos - out_start; + b->out_pos -= s->temp.size; + memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); + + /* + * If there wasn't enough input to the next filter to fill + * the output buffer with unfiltered data, there's no point + * to try decoding more data to temp. + */ + if (b->out_pos + s->temp.size < b->out_size) + return XZ_OK; + } + + /* + * We have unfiltered data in temp. If the output buffer isn't full + * yet, try to fill the temp buffer by decoding more data from the + * next filter. Apply the BCJ filter on temp. Then we hopefully can + * fill the actual output buffer by copying filtered data from temp. + * A mix of filtered and unfiltered data may be left in temp; it will + * be taken care on the next call to this function. + */ + if (b->out_pos < b->out_size) + { + /* Make b->out{,_pos,_size} temporarily point to s->temp. */ + s->out = b->out; + s->out_pos = b->out_pos; + s->out_size = b->out_size; + b->out = s->temp.buf; + b->out_pos = s->temp.size; + b->out_size = sizeof(s->temp.buf); + + s->ret = xz_dec_lzma2_run(lzma2, b); + + s->temp.size = b->out_pos; + b->out = s->out; + b->out_pos = s->out_pos; + b->out_size = s->out_size; + + if (s->ret != XZ_OK && s->ret != XZ_STREAM_END) + return s->ret; + + bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); + + /* + * If the next filter returned XZ_STREAM_END, we mark that + * everything is filtered, since the last unfiltered bytes + * of the stream are meant to be left as is. + */ + if (s->ret == XZ_STREAM_END) + s->temp.filtered = s->temp.size; + + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + } + + return s->ret; } XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call) { - struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL); - if (s != NULL) - s->single_call = single_call; + struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s != NULL) + s->single_call = single_call; - return s; + return s; } XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id) { - switch (id) - { + switch (id) + { #ifdef XZ_DEC_X86 - case BCJ_X86: + case BCJ_X86: #endif #ifdef XZ_DEC_POWERPC - case BCJ_POWERPC: + case BCJ_POWERPC: #endif #ifdef XZ_DEC_IA64 - case BCJ_IA64: + case BCJ_IA64: #endif #ifdef XZ_DEC_ARM - case BCJ_ARM: + case BCJ_ARM: #endif #ifdef XZ_DEC_ARMTHUMB - case BCJ_ARMTHUMB: + case BCJ_ARMTHUMB: #endif #ifdef XZ_DEC_SPARC - case BCJ_SPARC: + case BCJ_SPARC: #endif - break; + break; - default: - /* Unsupported Filter ID */ - return XZ_OPTIONS_ERROR; - } + default: + /* Unsupported Filter ID */ + return XZ_OPTIONS_ERROR; + } - s->type = id; - s->ret = XZ_OK; - s->pos = 0; - s->x86_prev_mask = 0; - s->temp.filtered = 0; - s->temp.size = 0; + s->type = id; + s->ret = XZ_OK; + s->pos = 0; + s->x86_prev_mask = 0; + s->temp.filtered = 0; + s->temp.size = 0; - return XZ_OK; + return XZ_OK; } #endif diff --git a/libraries/xz-embedded/src/xz_dec_lzma2.c b/libraries/xz-embedded/src/xz_dec_lzma2.c index 3d7b9a2e..365ace2b 100644 --- a/libraries/xz-embedded/src/xz_dec_lzma2.c +++ b/libraries/xz-embedded/src/xz_dec_lzma2.c @@ -43,244 +43,244 @@ */ struct dictionary { - /* Beginning of the history buffer */ - uint8_t *buf; - - /* Old position in buf (before decoding more data) */ - size_t start; - - /* Position in buf */ - size_t pos; - - /* - * How full dictionary is. This is used to detect corrupt input that - * would read beyond the beginning of the uncompressed stream. - */ - size_t full; - - /* Write limit; we don't write to buf[limit] or later bytes. */ - size_t limit; - - /* - * End of the dictionary buffer. In multi-call mode, this is - * the same as the dictionary size. In single-call mode, this - * indicates the size of the output buffer. - */ - size_t end; - - /* - * Size of the dictionary as specified in Block Header. This is used - * together with "full" to detect corrupt input that would make us - * read beyond the beginning of the uncompressed stream. - */ - uint32_t size; - - /* - * Maximum allowed dictionary size in multi-call mode. - * This is ignored in single-call mode. - */ - uint32_t size_max; - - /* - * Amount of memory currently allocated for the dictionary. - * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, - * size_max is always the same as the allocated size.) - */ - uint32_t allocated; - - /* Operation mode */ - enum xz_mode mode; + /* Beginning of the history buffer */ + uint8_t *buf; + + /* Old position in buf (before decoding more data) */ + size_t start; + + /* Position in buf */ + size_t pos; + + /* + * How full dictionary is. This is used to detect corrupt input that + * would read beyond the beginning of the uncompressed stream. + */ + size_t full; + + /* Write limit; we don't write to buf[limit] or later bytes. */ + size_t limit; + + /* + * End of the dictionary buffer. In multi-call mode, this is + * the same as the dictionary size. In single-call mode, this + * indicates the size of the output buffer. + */ + size_t end; + + /* + * Size of the dictionary as specified in Block Header. This is used + * together with "full" to detect corrupt input that would make us + * read beyond the beginning of the uncompressed stream. + */ + uint32_t size; + + /* + * Maximum allowed dictionary size in multi-call mode. + * This is ignored in single-call mode. + */ + uint32_t size_max; + + /* + * Amount of memory currently allocated for the dictionary. + * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, + * size_max is always the same as the allocated size.) + */ + uint32_t allocated; + + /* Operation mode */ + enum xz_mode mode; }; /* Range decoder */ struct rc_dec { - uint32_t range; - uint32_t code; - - /* - * Number of initializing bytes remaining to be read - * by rc_read_init(). - */ - uint32_t init_bytes_left; - - /* - * Buffer from which we read our input. It can be either - * temp.buf or the caller-provided input buffer. - */ - const uint8_t *in; - size_t in_pos; - size_t in_limit; + uint32_t range; + uint32_t code; + + /* + * Number of initializing bytes remaining to be read + * by rc_read_init(). + */ + uint32_t init_bytes_left; + + /* + * Buffer from which we read our input. It can be either + * temp.buf or the caller-provided input buffer. + */ + const uint8_t *in; + size_t in_pos; + size_t in_limit; }; /* Probabilities for a length decoder. */ struct lzma_len_dec { - /* Probability of match length being at least 10 */ - uint16_t choice; + /* Probability of match length being at least 10 */ + uint16_t choice; - /* Probability of match length being at least 18 */ - uint16_t choice2; + /* Probability of match length being at least 18 */ + uint16_t choice2; - /* Probabilities for match lengths 2-9 */ - uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; + /* Probabilities for match lengths 2-9 */ + uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; - /* Probabilities for match lengths 10-17 */ - uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; + /* Probabilities for match lengths 10-17 */ + uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; - /* Probabilities for match lengths 18-273 */ - uint16_t high[LEN_HIGH_SYMBOLS]; + /* Probabilities for match lengths 18-273 */ + uint16_t high[LEN_HIGH_SYMBOLS]; }; struct lzma_dec { - /* Distances of latest four matches */ - uint32_t rep0; - uint32_t rep1; - uint32_t rep2; - uint32_t rep3; - - /* Types of the most recently seen LZMA symbols */ - enum lzma_state state; - - /* - * Length of a match. This is updated so that dict_repeat can - * be called again to finish repeating the whole match. - */ - uint32_t len; - - /* - * LZMA properties or related bit masks (number of literal - * context bits, a mask dervied from the number of literal - * position bits, and a mask dervied from the number - * position bits) - */ - uint32_t lc; - uint32_t literal_pos_mask; /* (1 << lp) - 1 */ - uint32_t pos_mask; /* (1 << pb) - 1 */ - - /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ - uint16_t is_match[STATES][POS_STATES_MAX]; - - /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ - uint16_t is_rep[STATES]; - - /* - * If 0, distance of a repeated match is rep0. - * Otherwise check is_rep1. - */ - uint16_t is_rep0[STATES]; - - /* - * If 0, distance of a repeated match is rep1. - * Otherwise check is_rep2. - */ - uint16_t is_rep1[STATES]; - - /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ - uint16_t is_rep2[STATES]; - - /* - * If 1, the repeated match has length of one byte. Otherwise - * the length is decoded from rep_len_decoder. - */ - uint16_t is_rep0_long[STATES][POS_STATES_MAX]; - - /* - * Probability tree for the highest two bits of the match - * distance. There is a separate probability tree for match - * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. - */ - uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; - - /* - * Probility trees for additional bits for match distance - * when the distance is in the range [4, 127]. - */ - uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; - - /* - * Probability tree for the lowest four bits of a match - * distance that is equal to or greater than 128. - */ - uint16_t dist_align[ALIGN_SIZE]; - - /* Length of a normal match */ - struct lzma_len_dec match_len_dec; - - /* Length of a repeated match */ - struct lzma_len_dec rep_len_dec; - - /* Probabilities of literals */ - uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; + /* Distances of latest four matches */ + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + + /* Types of the most recently seen LZMA symbols */ + enum lzma_state state; + + /* + * Length of a match. This is updated so that dict_repeat can + * be called again to finish repeating the whole match. + */ + uint32_t len; + + /* + * LZMA properties or related bit masks (number of literal + * context bits, a mask dervied from the number of literal + * position bits, and a mask dervied from the number + * position bits) + */ + uint32_t lc; + uint32_t literal_pos_mask; /* (1 << lp) - 1 */ + uint32_t pos_mask; /* (1 << pb) - 1 */ + + /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ + uint16_t is_match[STATES][POS_STATES_MAX]; + + /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ + uint16_t is_rep[STATES]; + + /* + * If 0, distance of a repeated match is rep0. + * Otherwise check is_rep1. + */ + uint16_t is_rep0[STATES]; + + /* + * If 0, distance of a repeated match is rep1. + * Otherwise check is_rep2. + */ + uint16_t is_rep1[STATES]; + + /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ + uint16_t is_rep2[STATES]; + + /* + * If 1, the repeated match has length of one byte. Otherwise + * the length is decoded from rep_len_decoder. + */ + uint16_t is_rep0_long[STATES][POS_STATES_MAX]; + + /* + * Probability tree for the highest two bits of the match + * distance. There is a separate probability tree for match + * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. + */ + uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; + + /* + * Probility trees for additional bits for match distance + * when the distance is in the range [4, 127]. + */ + uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; + + /* + * Probability tree for the lowest four bits of a match + * distance that is equal to or greater than 128. + */ + uint16_t dist_align[ALIGN_SIZE]; + + /* Length of a normal match */ + struct lzma_len_dec match_len_dec; + + /* Length of a repeated match */ + struct lzma_len_dec rep_len_dec; + + /* Probabilities of literals */ + uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; }; struct lzma2_dec { - /* Position in xz_dec_lzma2_run(). */ - enum lzma2_seq - { - SEQ_CONTROL, - SEQ_UNCOMPRESSED_1, - SEQ_UNCOMPRESSED_2, - SEQ_COMPRESSED_0, - SEQ_COMPRESSED_1, - SEQ_PROPERTIES, - SEQ_LZMA_PREPARE, - SEQ_LZMA_RUN, - SEQ_COPY - } sequence; - - /* Next position after decoding the compressed size of the chunk. */ - enum lzma2_seq next_sequence; - - /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ - uint32_t uncompressed; - - /* - * Compressed size of LZMA chunk or compressed/uncompressed - * size of uncompressed chunk (64 KiB at maximum) - */ - uint32_t compressed; - - /* - * True if dictionary reset is needed. This is false before - * the first chunk (LZMA or uncompressed). - */ - bool need_dict_reset; - - /* - * True if new LZMA properties are needed. This is false - * before the first LZMA chunk. - */ - bool need_props; + /* Position in xz_dec_lzma2_run(). */ + enum lzma2_seq + { + SEQ_CONTROL, + SEQ_UNCOMPRESSED_1, + SEQ_UNCOMPRESSED_2, + SEQ_COMPRESSED_0, + SEQ_COMPRESSED_1, + SEQ_PROPERTIES, + SEQ_LZMA_PREPARE, + SEQ_LZMA_RUN, + SEQ_COPY + } sequence; + + /* Next position after decoding the compressed size of the chunk. */ + enum lzma2_seq next_sequence; + + /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ + uint32_t uncompressed; + + /* + * Compressed size of LZMA chunk or compressed/uncompressed + * size of uncompressed chunk (64 KiB at maximum) + */ + uint32_t compressed; + + /* + * True if dictionary reset is needed. This is false before + * the first chunk (LZMA or uncompressed). + */ + bool need_dict_reset; + + /* + * True if new LZMA properties are needed. This is false + * before the first LZMA chunk. + */ + bool need_props; }; struct xz_dec_lzma2 { - /* - * The order below is important on x86 to reduce code size and - * it shouldn't hurt on other platforms. Everything up to and - * including lzma.pos_mask are in the first 128 bytes on x86-32, - * which allows using smaller instructions to access those - * variables. On x86-64, fewer variables fit into the first 128 - * bytes, but this is still the best order without sacrificing - * the readability by splitting the structures. - */ - struct rc_dec rc; - struct dictionary dict; - struct lzma2_dec lzma2; - struct lzma_dec lzma; - - /* - * Temporary buffer which holds small number of input bytes between - * decoder calls. See lzma2_lzma() for details. - */ - struct - { - uint32_t size; - uint8_t buf[3 * LZMA_IN_REQUIRED]; - } temp; + /* + * The order below is important on x86 to reduce code size and + * it shouldn't hurt on other platforms. Everything up to and + * including lzma.pos_mask are in the first 128 bytes on x86-32, + * which allows using smaller instructions to access those + * variables. On x86-64, fewer variables fit into the first 128 + * bytes, but this is still the best order without sacrificing + * the readability by splitting the structures. + */ + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + + /* + * Temporary buffer which holds small number of input bytes between + * decoder calls. See lzma2_lzma() for details. + */ + struct + { + uint32_t size; + uint8_t buf[3 * LZMA_IN_REQUIRED]; + } temp; }; /************** @@ -293,31 +293,31 @@ struct xz_dec_lzma2 */ static void dict_reset(struct dictionary *dict, struct xz_buf *b) { - if (DEC_IS_SINGLE(dict->mode)) - { - dict->buf = b->out + b->out_pos; - dict->end = b->out_size - b->out_pos; - } - - dict->start = 0; - dict->pos = 0; - dict->limit = 0; - dict->full = 0; + if (DEC_IS_SINGLE(dict->mode)) + { + dict->buf = b->out + b->out_pos; + dict->end = b->out_size - b->out_pos; + } + + dict->start = 0; + dict->pos = 0; + dict->limit = 0; + dict->full = 0; } /* Set dictionary write limit */ static void dict_limit(struct dictionary *dict, size_t out_max) { - if (dict->end - dict->pos <= out_max) - dict->limit = dict->end; - else - dict->limit = dict->pos + out_max; + if (dict->end - dict->pos <= out_max) + dict->limit = dict->end; + else + dict->limit = dict->pos + out_max; } /* Return true if at least one byte can be written into the dictionary. */ static inline bool dict_has_space(const struct dictionary *dict) { - return dict->pos < dict->limit; + return dict->pos < dict->limit; } /* @@ -328,12 +328,12 @@ static inline bool dict_has_space(const struct dictionary *dict) */ static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist) { - size_t offset = dict->pos - dist - 1; + size_t offset = dict->pos - dist - 1; - if (dist >= dict->pos) - offset += dict->end; + if (dist >= dict->pos) + offset += dict->end; - return dict->full > 0 ? dict->buf[offset] : 0; + return dict->full > 0 ? dict->buf[offset] : 0; } /* @@ -341,10 +341,10 @@ static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist) */ static inline void dict_put(struct dictionary *dict, uint8_t byte) { - dict->buf[dict->pos++] = byte; + dict->buf[dict->pos++] = byte; - if (dict->full < dict->pos) - dict->full = dict->pos; + if (dict->full < dict->pos) + dict->full = dict->pos; } /* @@ -354,66 +354,66 @@ static inline void dict_put(struct dictionary *dict, uint8_t byte) */ static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) { - size_t back; - uint32_t left; + size_t back; + uint32_t left; - if (dist >= dict->full || dist >= dict->size) - return false; + if (dist >= dict->full || dist >= dict->size) + return false; - left = min_t(size_t, dict->limit - dict->pos, *len); - *len -= left; + left = min_t(size_t, dict->limit - dict->pos, *len); + *len -= left; - back = dict->pos - dist - 1; - if (dist >= dict->pos) - back += dict->end; + back = dict->pos - dist - 1; + if (dist >= dict->pos) + back += dict->end; - do - { - dict->buf[dict->pos++] = dict->buf[back++]; - if (back == dict->end) - back = 0; - } while (--left > 0); + do + { + dict->buf[dict->pos++] = dict->buf[back++]; + if (back == dict->end) + back = 0; + } while (--left > 0); - if (dict->full < dict->pos) - dict->full = dict->pos; + if (dict->full < dict->pos) + dict->full = dict->pos; - return true; + return true; } /* Copy uncompressed data as is from input to dictionary and output buffers. */ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, uint32_t *left) { - size_t copy_size; + size_t copy_size; - while (*left > 0 && b->in_pos < b->in_size && b->out_pos < b->out_size) - { - copy_size = min(b->in_size - b->in_pos, b->out_size - b->out_pos); - if (copy_size > dict->end - dict->pos) - copy_size = dict->end - dict->pos; - if (copy_size > *left) - copy_size = *left; + while (*left > 0 && b->in_pos < b->in_size && b->out_pos < b->out_size) + { + copy_size = min(b->in_size - b->in_pos, b->out_size - b->out_pos); + if (copy_size > dict->end - dict->pos) + copy_size = dict->end - dict->pos; + if (copy_size > *left) + copy_size = *left; - *left -= copy_size; + *left -= copy_size; - memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); - dict->pos += copy_size; + memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); + dict->pos += copy_size; - if (dict->full < dict->pos) - dict->full = dict->pos; + if (dict->full < dict->pos) + dict->full = dict->pos; - if (DEC_IS_MULTI(dict->mode)) - { - if (dict->pos == dict->end) - dict->pos = 0; + if (DEC_IS_MULTI(dict->mode)) + { + if (dict->pos == dict->end) + dict->pos = 0; - memcpy(b->out + b->out_pos, b->in + b->in_pos, copy_size); - } + memcpy(b->out + b->out_pos, b->in + b->in_pos, copy_size); + } - dict->start = dict->pos; + dict->start = dict->pos; - b->out_pos += copy_size; - b->in_pos += copy_size; - } + b->out_pos += copy_size; + b->in_pos += copy_size; + } } /* @@ -423,19 +423,19 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, uint32_ */ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) { - size_t copy_size = dict->pos - dict->start; + size_t copy_size = dict->pos - dict->start; - if (DEC_IS_MULTI(dict->mode)) - { - if (dict->pos == dict->end) - dict->pos = 0; + if (DEC_IS_MULTI(dict->mode)) + { + if (dict->pos == dict->end) + dict->pos = 0; - memcpy(b->out + b->out_pos, dict->buf + dict->start, copy_size); - } + memcpy(b->out + b->out_pos, dict->buf + dict->start, copy_size); + } - dict->start = dict->pos; - b->out_pos += copy_size; - return copy_size; + dict->start = dict->pos; + b->out_pos += copy_size; + return copy_size; } /***************** @@ -445,9 +445,9 @@ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) /* Reset the range decoder. */ static void rc_reset(struct rc_dec *rc) { - rc->range = (uint32_t) - 1; - rc->code = 0; - rc->init_bytes_left = RC_INIT_BYTES; + rc->range = (uint32_t) - 1; + rc->code = 0; + rc->init_bytes_left = RC_INIT_BYTES; } /* @@ -456,22 +456,22 @@ static void rc_reset(struct rc_dec *rc) */ static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b) { - while (rc->init_bytes_left > 0) - { - if (b->in_pos == b->in_size) - return false; + while (rc->init_bytes_left > 0) + { + if (b->in_pos == b->in_size) + return false; - rc->code = (rc->code << 8) + b->in[b->in_pos++]; - --rc->init_bytes_left; - } + rc->code = (rc->code << 8) + b->in[b->in_pos++]; + --rc->init_bytes_left; + } - return true; + return true; } /* Return true if there may not be enough input for the next decoding loop. */ static inline bool rc_limit_exceeded(const struct rc_dec *rc) { - return rc->in_pos > rc->in_limit; + return rc->in_pos > rc->in_limit; } /* @@ -480,17 +480,17 @@ static inline bool rc_limit_exceeded(const struct rc_dec *rc) */ static inline bool rc_is_finished(const struct rc_dec *rc) { - return rc->code == 0; + return rc->code == 0; } /* Read the next input byte if needed. */ static __always_inline void rc_normalize(struct rc_dec *rc) { - if (rc->range < RC_TOP_VALUE) - { - rc->range <<= RC_SHIFT_BITS; - rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; - } + if (rc->range < RC_TOP_VALUE) + { + rc->range <<= RC_SHIFT_BITS; + rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; + } } /* @@ -506,79 +506,79 @@ static __always_inline void rc_normalize(struct rc_dec *rc) */ static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) { - uint32_t bound; - int bit; - - rc_normalize(rc); - bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; - if (rc->code < bound) - { - rc->range = bound; - *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; - bit = 0; - } - else - { - rc->range -= bound; - rc->code -= bound; - *prob -= *prob >> RC_MOVE_BITS; - bit = 1; - } - - return bit; + uint32_t bound; + int bit; + + rc_normalize(rc); + bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; + if (rc->code < bound) + { + rc->range = bound; + *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; + bit = 0; + } + else + { + rc->range -= bound; + rc->code -= bound; + *prob -= *prob >> RC_MOVE_BITS; + bit = 1; + } + + return bit; } /* Decode a bittree starting from the most significant bit. */ static __always_inline uint32_t rc_bittree(struct rc_dec *rc, uint16_t *probs, uint32_t limit) { - uint32_t symbol = 1; + uint32_t symbol = 1; - do - { - if (rc_bit(rc, &probs[symbol])) - symbol = (symbol << 1) + 1; - else - symbol <<= 1; - } while (symbol < limit); + do + { + if (rc_bit(rc, &probs[symbol])) + symbol = (symbol << 1) + 1; + else + symbol <<= 1; + } while (symbol < limit); - return symbol; + return symbol; } /* Decode a bittree starting from the least significant bit. */ static __always_inline void rc_bittree_reverse(struct rc_dec *rc, uint16_t *probs, - uint32_t *dest, uint32_t limit) + uint32_t *dest, uint32_t limit) { - uint32_t symbol = 1; - uint32_t i = 0; - - do - { - if (rc_bit(rc, &probs[symbol])) - { - symbol = (symbol << 1) + 1; - *dest += 1 << i; - } - else - { - symbol <<= 1; - } - } while (++i < limit); + uint32_t symbol = 1; + uint32_t i = 0; + + do + { + if (rc_bit(rc, &probs[symbol])) + { + symbol = (symbol << 1) + 1; + *dest += 1 << i; + } + else + { + symbol <<= 1; + } + } while (++i < limit); } /* Decode direct bits (fixed fifty-fifty probability) */ static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) { - uint32_t mask; - - do - { - rc_normalize(rc); - rc->range >>= 1; - rc->code -= rc->range; - mask = (uint32_t)0 - (rc->code >> 31); - rc->code += rc->range & mask; - *dest = (*dest << 1) + (mask + 1); - } while (--limit > 0); + uint32_t mask; + + do + { + rc_normalize(rc); + rc->range >>= 1; + rc->code -= rc->range; + mask = (uint32_t)0 - (rc->code >> 31); + rc->code += rc->range & mask; + *dest = (*dest << 1) + (mask + 1); + } while (--limit > 0); } /******** @@ -588,128 +588,128 @@ static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) /* Get pointer to literal coder probability array. */ static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s) { - uint32_t prev_byte = dict_get(&s->dict, 0); - uint32_t low = prev_byte >> (8 - s->lzma.lc); - uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; - return s->lzma.literal[low + high]; + uint32_t prev_byte = dict_get(&s->dict, 0); + uint32_t low = prev_byte >> (8 - s->lzma.lc); + uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; + return s->lzma.literal[low + high]; } /* Decode a literal (one 8-bit byte) */ static void lzma_literal(struct xz_dec_lzma2 *s) { - uint16_t *probs; - uint32_t symbol; - uint32_t match_byte; - uint32_t match_bit; - uint32_t offset; - uint32_t i; - - probs = lzma_literal_probs(s); - - if (lzma_state_is_literal(s->lzma.state)) - { - symbol = rc_bittree(&s->rc, probs, 0x100); - } - else - { - symbol = 1; - match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; - offset = 0x100; - - do - { - match_bit = match_byte & offset; - match_byte <<= 1; - i = offset + match_bit + symbol; - - if (rc_bit(&s->rc, &probs[i])) - { - symbol = (symbol << 1) + 1; - offset &= match_bit; - } - else - { - symbol <<= 1; - offset &= ~match_bit; - } - } while (symbol < 0x100); - } - - dict_put(&s->dict, (uint8_t)symbol); - lzma_state_literal(&s->lzma.state); + uint16_t *probs; + uint32_t symbol; + uint32_t match_byte; + uint32_t match_bit; + uint32_t offset; + uint32_t i; + + probs = lzma_literal_probs(s); + + if (lzma_state_is_literal(s->lzma.state)) + { + symbol = rc_bittree(&s->rc, probs, 0x100); + } + else + { + symbol = 1; + match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; + offset = 0x100; + + do + { + match_bit = match_byte & offset; + match_byte <<= 1; + i = offset + match_bit + symbol; + + if (rc_bit(&s->rc, &probs[i])) + { + symbol = (symbol << 1) + 1; + offset &= match_bit; + } + else + { + symbol <<= 1; + offset &= ~match_bit; + } + } while (symbol < 0x100); + } + + dict_put(&s->dict, (uint8_t)symbol); + lzma_state_literal(&s->lzma.state); } /* Decode the length of the match into s->lzma.len. */ static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, uint32_t pos_state) { - uint16_t *probs; - uint32_t limit; - - if (!rc_bit(&s->rc, &l->choice)) - { - probs = l->low[pos_state]; - limit = LEN_LOW_SYMBOLS; - s->lzma.len = MATCH_LEN_MIN; - } - else - { - if (!rc_bit(&s->rc, &l->choice2)) - { - probs = l->mid[pos_state]; - limit = LEN_MID_SYMBOLS; - s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; - } - else - { - probs = l->high; - limit = LEN_HIGH_SYMBOLS; - s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; - } - } - - s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; + uint16_t *probs; + uint32_t limit; + + if (!rc_bit(&s->rc, &l->choice)) + { + probs = l->low[pos_state]; + limit = LEN_LOW_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN; + } + else + { + if (!rc_bit(&s->rc, &l->choice2)) + { + probs = l->mid[pos_state]; + limit = LEN_MID_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; + } + else + { + probs = l->high; + limit = LEN_HIGH_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; + } + } + + s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; } /* Decode a match. The distance will be stored in s->lzma.rep0. */ static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) { - uint16_t *probs; - uint32_t dist_slot; - uint32_t limit; - - lzma_state_match(&s->lzma.state); - - s->lzma.rep3 = s->lzma.rep2; - s->lzma.rep2 = s->lzma.rep1; - s->lzma.rep1 = s->lzma.rep0; - - lzma_len(s, &s->lzma.match_len_dec, pos_state); - - probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; - dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; - - if (dist_slot < DIST_MODEL_START) - { - s->lzma.rep0 = dist_slot; - } - else - { - limit = (dist_slot >> 1) - 1; - s->lzma.rep0 = 2 + (dist_slot & 1); - - if (dist_slot < DIST_MODEL_END) - { - s->lzma.rep0 <<= limit; - probs = s->lzma.dist_special + s->lzma.rep0 - dist_slot - 1; - rc_bittree_reverse(&s->rc, probs, &s->lzma.rep0, limit); - } - else - { - rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); - s->lzma.rep0 <<= ALIGN_BITS; - rc_bittree_reverse(&s->rc, s->lzma.dist_align, &s->lzma.rep0, ALIGN_BITS); - } - } + uint16_t *probs; + uint32_t dist_slot; + uint32_t limit; + + lzma_state_match(&s->lzma.state); + + s->lzma.rep3 = s->lzma.rep2; + s->lzma.rep2 = s->lzma.rep1; + s->lzma.rep1 = s->lzma.rep0; + + lzma_len(s, &s->lzma.match_len_dec, pos_state); + + probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; + dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; + + if (dist_slot < DIST_MODEL_START) + { + s->lzma.rep0 = dist_slot; + } + else + { + limit = (dist_slot >> 1) - 1; + s->lzma.rep0 = 2 + (dist_slot & 1); + + if (dist_slot < DIST_MODEL_END) + { + s->lzma.rep0 <<= limit; + probs = s->lzma.dist_special + s->lzma.rep0 - dist_slot - 1; + rc_bittree_reverse(&s->rc, probs, &s->lzma.rep0, limit); + } + else + { + rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); + s->lzma.rep0 <<= ALIGN_BITS; + rc_bittree_reverse(&s->rc, s->lzma.dist_align, &s->lzma.rep0, ALIGN_BITS); + } + } } /* @@ -718,89 +718,89 @@ static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) */ static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) { - uint32_t tmp; - - if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) - { - if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[s->lzma.state][pos_state])) - { - lzma_state_short_rep(&s->lzma.state); - s->lzma.len = 1; - return; - } - } - else - { - if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) - { - tmp = s->lzma.rep1; - } - else - { - if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) - { - tmp = s->lzma.rep2; - } - else - { - tmp = s->lzma.rep3; - s->lzma.rep3 = s->lzma.rep2; - } - - s->lzma.rep2 = s->lzma.rep1; - } - - s->lzma.rep1 = s->lzma.rep0; - s->lzma.rep0 = tmp; - } - - lzma_state_long_rep(&s->lzma.state); - lzma_len(s, &s->lzma.rep_len_dec, pos_state); + uint32_t tmp; + + if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) + { + if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[s->lzma.state][pos_state])) + { + lzma_state_short_rep(&s->lzma.state); + s->lzma.len = 1; + return; + } + } + else + { + if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) + { + tmp = s->lzma.rep1; + } + else + { + if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) + { + tmp = s->lzma.rep2; + } + else + { + tmp = s->lzma.rep3; + s->lzma.rep3 = s->lzma.rep2; + } + + s->lzma.rep2 = s->lzma.rep1; + } + + s->lzma.rep1 = s->lzma.rep0; + s->lzma.rep0 = tmp; + } + + lzma_state_long_rep(&s->lzma.state); + lzma_len(s, &s->lzma.rep_len_dec, pos_state); } /* LZMA decoder core */ static bool lzma_main(struct xz_dec_lzma2 *s) { - uint32_t pos_state; - - /* - * If the dictionary was reached during the previous call, try to - * finish the possibly pending repeat in the dictionary. - */ - if (dict_has_space(&s->dict) && s->lzma.len > 0) - dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); - - /* - * Decode more LZMA symbols. One iteration may consume up to - * LZMA_IN_REQUIRED - 1 bytes. - */ - while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) - { - pos_state = s->dict.pos & s->lzma.pos_mask; - - if (!rc_bit(&s->rc, &s->lzma.is_match[s->lzma.state][pos_state])) - { - lzma_literal(s); - } - else - { - if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) - lzma_rep_match(s, pos_state); - else - lzma_match(s, pos_state); - - if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) - return false; - } - } - - /* - * Having the range decoder always normalized when we are outside - * this function makes it easier to correctly handle end of the chunk. - */ - rc_normalize(&s->rc); - - return true; + uint32_t pos_state; + + /* + * If the dictionary was reached during the previous call, try to + * finish the possibly pending repeat in the dictionary. + */ + if (dict_has_space(&s->dict) && s->lzma.len > 0) + dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); + + /* + * Decode more LZMA symbols. One iteration may consume up to + * LZMA_IN_REQUIRED - 1 bytes. + */ + while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) + { + pos_state = s->dict.pos & s->lzma.pos_mask; + + if (!rc_bit(&s->rc, &s->lzma.is_match[s->lzma.state][pos_state])) + { + lzma_literal(s); + } + else + { + if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) + lzma_rep_match(s, pos_state); + else + lzma_match(s, pos_state); + + if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) + return false; + } + } + + /* + * Having the range decoder always normalized when we are outside + * this function makes it easier to correctly handle end of the chunk. + */ + rc_normalize(&s->rc); + + return true; } /* @@ -809,29 +809,29 @@ static bool lzma_main(struct xz_dec_lzma2 *s) */ static void lzma_reset(struct xz_dec_lzma2 *s) { - uint16_t *probs; - size_t i; - - s->lzma.state = STATE_LIT_LIT; - s->lzma.rep0 = 0; - s->lzma.rep1 = 0; - s->lzma.rep2 = 0; - s->lzma.rep3 = 0; - - /* - * All probabilities are initialized to the same value. This hack - * makes the code smaller by avoiding a separate loop for each - * probability array. - * - * This could be optimized so that only that part of literal - * probabilities that are actually required. In the common case - * we would write 12 KiB less. - */ - probs = s->lzma.is_match[0]; - for (i = 0; i < PROBS_TOTAL; ++i) - probs[i] = RC_BIT_MODEL_TOTAL / 2; - - rc_reset(&s->rc); + uint16_t *probs; + size_t i; + + s->lzma.state = STATE_LIT_LIT; + s->lzma.rep0 = 0; + s->lzma.rep1 = 0; + s->lzma.rep2 = 0; + s->lzma.rep3 = 0; + + /* + * All probabilities are initialized to the same value. This hack + * makes the code smaller by avoiding a separate loop for each + * probability array. + * + * This could be optimized so that only that part of literal + * probabilities that are actually required. In the common case + * we would write 12 KiB less. + */ + probs = s->lzma.is_match[0]; + for (i = 0; i < PROBS_TOTAL; ++i) + probs[i] = RC_BIT_MODEL_TOTAL / 2; + + rc_reset(&s->rc); } /* @@ -841,35 +841,35 @@ static void lzma_reset(struct xz_dec_lzma2 *s) */ static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) { - if (props > (4 * 5 + 4) * 9 + 8) - return false; + if (props > (4 * 5 + 4) * 9 + 8) + return false; - s->lzma.pos_mask = 0; - while (props >= 9 * 5) - { - props -= 9 * 5; - ++s->lzma.pos_mask; - } + s->lzma.pos_mask = 0; + while (props >= 9 * 5) + { + props -= 9 * 5; + ++s->lzma.pos_mask; + } - s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; + s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; - s->lzma.literal_pos_mask = 0; - while (props >= 9) - { - props -= 9; - ++s->lzma.literal_pos_mask; - } + s->lzma.literal_pos_mask = 0; + while (props >= 9) + { + props -= 9; + ++s->lzma.literal_pos_mask; + } - s->lzma.lc = props; + s->lzma.lc = props; - if (s->lzma.lc + s->lzma.literal_pos_mask > 4) - return false; + if (s->lzma.lc + s->lzma.literal_pos_mask > 4) + return false; - s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; + s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; - lzma_reset(s); + lzma_reset(s); - return true; + return true; } /********* @@ -890,89 +890,89 @@ static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) */ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) { - size_t in_avail; - uint32_t tmp; - - in_avail = b->in_size - b->in_pos; - if (s->temp.size > 0 || s->lzma2.compressed == 0) - { - tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; - if (tmp > s->lzma2.compressed - s->temp.size) - tmp = s->lzma2.compressed - s->temp.size; - if (tmp > in_avail) - tmp = in_avail; - - memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); - - if (s->temp.size + tmp == s->lzma2.compressed) - { - memzero(s->temp.buf + s->temp.size + tmp, sizeof(s->temp.buf) - s->temp.size - tmp); - s->rc.in_limit = s->temp.size + tmp; - } - else if (s->temp.size + tmp < LZMA_IN_REQUIRED) - { - s->temp.size += tmp; - b->in_pos += tmp; - return true; - } - else - { - s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; - } - - s->rc.in = s->temp.buf; - s->rc.in_pos = 0; - - if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) - return false; - - s->lzma2.compressed -= s->rc.in_pos; - - if (s->rc.in_pos < s->temp.size) - { - s->temp.size -= s->rc.in_pos; - memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, s->temp.size); - return true; - } - - b->in_pos += s->rc.in_pos - s->temp.size; - s->temp.size = 0; - } - - in_avail = b->in_size - b->in_pos; - if (in_avail >= LZMA_IN_REQUIRED) - { - s->rc.in = b->in; - s->rc.in_pos = b->in_pos; - - if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) - s->rc.in_limit = b->in_pos + s->lzma2.compressed; - else - s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; - - if (!lzma_main(s)) - return false; - - in_avail = s->rc.in_pos - b->in_pos; - if (in_avail > s->lzma2.compressed) - return false; - - s->lzma2.compressed -= in_avail; - b->in_pos = s->rc.in_pos; - } - - in_avail = b->in_size - b->in_pos; - if (in_avail < LZMA_IN_REQUIRED) - { - if (in_avail > s->lzma2.compressed) - in_avail = s->lzma2.compressed; - - memcpy(s->temp.buf, b->in + b->in_pos, in_avail); - s->temp.size = in_avail; - b->in_pos += in_avail; - } - - return true; + size_t in_avail; + uint32_t tmp; + + in_avail = b->in_size - b->in_pos; + if (s->temp.size > 0 || s->lzma2.compressed == 0) + { + tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; + if (tmp > s->lzma2.compressed - s->temp.size) + tmp = s->lzma2.compressed - s->temp.size; + if (tmp > in_avail) + tmp = in_avail; + + memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); + + if (s->temp.size + tmp == s->lzma2.compressed) + { + memzero(s->temp.buf + s->temp.size + tmp, sizeof(s->temp.buf) - s->temp.size - tmp); + s->rc.in_limit = s->temp.size + tmp; + } + else if (s->temp.size + tmp < LZMA_IN_REQUIRED) + { + s->temp.size += tmp; + b->in_pos += tmp; + return true; + } + else + { + s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; + } + + s->rc.in = s->temp.buf; + s->rc.in_pos = 0; + + if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) + return false; + + s->lzma2.compressed -= s->rc.in_pos; + + if (s->rc.in_pos < s->temp.size) + { + s->temp.size -= s->rc.in_pos; + memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, s->temp.size); + return true; + } + + b->in_pos += s->rc.in_pos - s->temp.size; + s->temp.size = 0; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail >= LZMA_IN_REQUIRED) + { + s->rc.in = b->in; + s->rc.in_pos = b->in_pos; + + if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) + s->rc.in_limit = b->in_pos + s->lzma2.compressed; + else + s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; + + if (!lzma_main(s)) + return false; + + in_avail = s->rc.in_pos - b->in_pos; + if (in_avail > s->lzma2.compressed) + return false; + + s->lzma2.compressed -= in_avail; + b->in_pos = s->rc.in_pos; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail < LZMA_IN_REQUIRED) + { + if (in_avail > s->lzma2.compressed) + in_avail = s->lzma2.compressed; + + memcpy(s->temp.buf, b->in + b->in_pos, in_avail); + s->temp.size = in_avail; + b->in_pos += in_avail; + } + + return true; } /* @@ -981,251 +981,251 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) */ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b) { - uint32_t tmp; - - while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) - { - switch (s->lzma2.sequence) - { - case SEQ_CONTROL: - /* - * LZMA2 control byte - * - * Exact values: - * 0x00 End marker - * 0x01 Dictionary reset followed by - * an uncompressed chunk - * 0x02 Uncompressed chunk (no dictionary reset) - * - * Highest three bits (s->control & 0xE0): - * 0xE0 Dictionary reset, new properties and state - * reset, followed by LZMA compressed chunk - * 0xC0 New properties and state reset, followed - * by LZMA compressed chunk (no dictionary - * reset) - * 0xA0 State reset using old properties, - * followed by LZMA compressed chunk (no - * dictionary reset) - * 0x80 LZMA chunk (no dictionary or state reset) - * - * For LZMA compressed chunks, the lowest five bits - * (s->control & 1F) are the highest bits of the - * uncompressed size (bits 16-20). - * - * A new LZMA2 stream must begin with a dictionary - * reset. The first LZMA chunk must set new - * properties and reset the LZMA state. - * - * Values that don't match anything described above - * are invalid and we return XZ_DATA_ERROR. - */ - tmp = b->in[b->in_pos++]; - - if (tmp == 0x00) - return XZ_STREAM_END; - - if (tmp >= 0xE0 || tmp == 0x01) - { - s->lzma2.need_props = true; - s->lzma2.need_dict_reset = false; - dict_reset(&s->dict, b); - } - else if (s->lzma2.need_dict_reset) - { - return XZ_DATA_ERROR; - } - - if (tmp >= 0x80) - { - s->lzma2.uncompressed = (tmp & 0x1F) << 16; - s->lzma2.sequence = SEQ_UNCOMPRESSED_1; - - if (tmp >= 0xC0) - { - /* - * When there are new properties, - * state reset is done at - * SEQ_PROPERTIES. - */ - s->lzma2.need_props = false; - s->lzma2.next_sequence = SEQ_PROPERTIES; - } - else if (s->lzma2.need_props) - { - return XZ_DATA_ERROR; - } - else - { - s->lzma2.next_sequence = SEQ_LZMA_PREPARE; - if (tmp >= 0xA0) - lzma_reset(s); - } - } - else - { - if (tmp > 0x02) - return XZ_DATA_ERROR; - - s->lzma2.sequence = SEQ_COMPRESSED_0; - s->lzma2.next_sequence = SEQ_COPY; - } - - break; - - case SEQ_UNCOMPRESSED_1: - s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] << 8; - s->lzma2.sequence = SEQ_UNCOMPRESSED_2; - break; - - case SEQ_UNCOMPRESSED_2: - s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] + 1; - s->lzma2.sequence = SEQ_COMPRESSED_0; - break; - - case SEQ_COMPRESSED_0: - s->lzma2.compressed = (uint32_t)b->in[b->in_pos++] << 8; - s->lzma2.sequence = SEQ_COMPRESSED_1; - break; - - case SEQ_COMPRESSED_1: - s->lzma2.compressed += (uint32_t)b->in[b->in_pos++] + 1; - s->lzma2.sequence = s->lzma2.next_sequence; - break; - - case SEQ_PROPERTIES: - if (!lzma_props(s, b->in[b->in_pos++])) - return XZ_DATA_ERROR; - - s->lzma2.sequence = SEQ_LZMA_PREPARE; - - case SEQ_LZMA_PREPARE: - if (s->lzma2.compressed < RC_INIT_BYTES) - return XZ_DATA_ERROR; - - if (!rc_read_init(&s->rc, b)) - return XZ_OK; - - s->lzma2.compressed -= RC_INIT_BYTES; - s->lzma2.sequence = SEQ_LZMA_RUN; - - case SEQ_LZMA_RUN: - /* - * Set dictionary limit to indicate how much we want - * to be encoded at maximum. Decode new data into the - * dictionary. Flush the new data from dictionary to - * b->out. Check if we finished decoding this chunk. - * In case the dictionary got full but we didn't fill - * the output buffer yet, we may run this loop - * multiple times without changing s->lzma2.sequence. - */ - dict_limit(&s->dict, - min_t(size_t, b->out_size - b->out_pos, s->lzma2.uncompressed)); - if (!lzma2_lzma(s, b)) - return XZ_DATA_ERROR; - - s->lzma2.uncompressed -= dict_flush(&s->dict, b); - - if (s->lzma2.uncompressed == 0) - { - if (s->lzma2.compressed > 0 || s->lzma.len > 0 || !rc_is_finished(&s->rc)) - return XZ_DATA_ERROR; - - rc_reset(&s->rc); - s->lzma2.sequence = SEQ_CONTROL; - } - else if (b->out_pos == b->out_size || - (b->in_pos == b->in_size && s->temp.size < s->lzma2.compressed)) - { - return XZ_OK; - } - - break; - - case SEQ_COPY: - dict_uncompressed(&s->dict, b, &s->lzma2.compressed); - if (s->lzma2.compressed > 0) - return XZ_OK; - - s->lzma2.sequence = SEQ_CONTROL; - break; - } - } - - return XZ_OK; + uint32_t tmp; + + while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) + { + switch (s->lzma2.sequence) + { + case SEQ_CONTROL: + /* + * LZMA2 control byte + * + * Exact values: + * 0x00 End marker + * 0x01 Dictionary reset followed by + * an uncompressed chunk + * 0x02 Uncompressed chunk (no dictionary reset) + * + * Highest three bits (s->control & 0xE0): + * 0xE0 Dictionary reset, new properties and state + * reset, followed by LZMA compressed chunk + * 0xC0 New properties and state reset, followed + * by LZMA compressed chunk (no dictionary + * reset) + * 0xA0 State reset using old properties, + * followed by LZMA compressed chunk (no + * dictionary reset) + * 0x80 LZMA chunk (no dictionary or state reset) + * + * For LZMA compressed chunks, the lowest five bits + * (s->control & 1F) are the highest bits of the + * uncompressed size (bits 16-20). + * + * A new LZMA2 stream must begin with a dictionary + * reset. The first LZMA chunk must set new + * properties and reset the LZMA state. + * + * Values that don't match anything described above + * are invalid and we return XZ_DATA_ERROR. + */ + tmp = b->in[b->in_pos++]; + + if (tmp == 0x00) + return XZ_STREAM_END; + + if (tmp >= 0xE0 || tmp == 0x01) + { + s->lzma2.need_props = true; + s->lzma2.need_dict_reset = false; + dict_reset(&s->dict, b); + } + else if (s->lzma2.need_dict_reset) + { + return XZ_DATA_ERROR; + } + + if (tmp >= 0x80) + { + s->lzma2.uncompressed = (tmp & 0x1F) << 16; + s->lzma2.sequence = SEQ_UNCOMPRESSED_1; + + if (tmp >= 0xC0) + { + /* + * When there are new properties, + * state reset is done at + * SEQ_PROPERTIES. + */ + s->lzma2.need_props = false; + s->lzma2.next_sequence = SEQ_PROPERTIES; + } + else if (s->lzma2.need_props) + { + return XZ_DATA_ERROR; + } + else + { + s->lzma2.next_sequence = SEQ_LZMA_PREPARE; + if (tmp >= 0xA0) + lzma_reset(s); + } + } + else + { + if (tmp > 0x02) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_COMPRESSED_0; + s->lzma2.next_sequence = SEQ_COPY; + } + + break; + + case SEQ_UNCOMPRESSED_1: + s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_UNCOMPRESSED_2; + break; + + case SEQ_UNCOMPRESSED_2: + s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = SEQ_COMPRESSED_0; + break; + + case SEQ_COMPRESSED_0: + s->lzma2.compressed = (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_COMPRESSED_1; + break; + + case SEQ_COMPRESSED_1: + s->lzma2.compressed += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = s->lzma2.next_sequence; + break; + + case SEQ_PROPERTIES: + if (!lzma_props(s, b->in[b->in_pos++])) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_LZMA_PREPARE; + + case SEQ_LZMA_PREPARE: + if (s->lzma2.compressed < RC_INIT_BYTES) + return XZ_DATA_ERROR; + + if (!rc_read_init(&s->rc, b)) + return XZ_OK; + + s->lzma2.compressed -= RC_INIT_BYTES; + s->lzma2.sequence = SEQ_LZMA_RUN; + + case SEQ_LZMA_RUN: + /* + * Set dictionary limit to indicate how much we want + * to be encoded at maximum. Decode new data into the + * dictionary. Flush the new data from dictionary to + * b->out. Check if we finished decoding this chunk. + * In case the dictionary got full but we didn't fill + * the output buffer yet, we may run this loop + * multiple times without changing s->lzma2.sequence. + */ + dict_limit(&s->dict, + min_t(size_t, b->out_size - b->out_pos, s->lzma2.uncompressed)); + if (!lzma2_lzma(s, b)) + return XZ_DATA_ERROR; + + s->lzma2.uncompressed -= dict_flush(&s->dict, b); + + if (s->lzma2.uncompressed == 0) + { + if (s->lzma2.compressed > 0 || s->lzma.len > 0 || !rc_is_finished(&s->rc)) + return XZ_DATA_ERROR; + + rc_reset(&s->rc); + s->lzma2.sequence = SEQ_CONTROL; + } + else if (b->out_pos == b->out_size || + (b->in_pos == b->in_size && s->temp.size < s->lzma2.compressed)) + { + return XZ_OK; + } + + break; + + case SEQ_COPY: + dict_uncompressed(&s->dict, b, &s->lzma2.compressed); + if (s->lzma2.compressed > 0) + return XZ_OK; + + s->lzma2.sequence = SEQ_CONTROL; + break; + } + } + + return XZ_OK; } XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max) { - struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); - if (s == NULL) - return NULL; - - s->dict.mode = mode; - s->dict.size_max = dict_max; - - if (DEC_IS_PREALLOC(mode)) - { - s->dict.buf = vmalloc(dict_max); - if (s->dict.buf == NULL) - { - kfree(s); - return NULL; - } - } - else if (DEC_IS_DYNALLOC(mode)) - { - s->dict.buf = NULL; - s->dict.allocated = 0; - } - - return s; + struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->dict.mode = mode; + s->dict.size_max = dict_max; + + if (DEC_IS_PREALLOC(mode)) + { + s->dict.buf = vmalloc(dict_max); + if (s->dict.buf == NULL) + { + kfree(s); + return NULL; + } + } + else if (DEC_IS_DYNALLOC(mode)) + { + s->dict.buf = NULL; + s->dict.allocated = 0; + } + + return s; } XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) { - /* This limits dictionary size to 3 GiB to keep parsing simpler. */ - if (props > 39) - return XZ_OPTIONS_ERROR; - - s->dict.size = 2 + (props & 1); - s->dict.size <<= (props >> 1) + 11; - - if (DEC_IS_MULTI(s->dict.mode)) - { - if (s->dict.size > s->dict.size_max) - return XZ_MEMLIMIT_ERROR; - - s->dict.end = s->dict.size; - - if (DEC_IS_DYNALLOC(s->dict.mode)) - { - if (s->dict.allocated < s->dict.size) - { - vfree(s->dict.buf); - s->dict.buf = vmalloc(s->dict.size); - if (s->dict.buf == NULL) - { - s->dict.allocated = 0; - return XZ_MEM_ERROR; - } - } - } - } - - s->lzma.len = 0; - - s->lzma2.sequence = SEQ_CONTROL; - s->lzma2.need_dict_reset = true; - - s->temp.size = 0; - - return XZ_OK; + /* This limits dictionary size to 3 GiB to keep parsing simpler. */ + if (props > 39) + return XZ_OPTIONS_ERROR; + + s->dict.size = 2 + (props & 1); + s->dict.size <<= (props >> 1) + 11; + + if (DEC_IS_MULTI(s->dict.mode)) + { + if (s->dict.size > s->dict.size_max) + return XZ_MEMLIMIT_ERROR; + + s->dict.end = s->dict.size; + + if (DEC_IS_DYNALLOC(s->dict.mode)) + { + if (s->dict.allocated < s->dict.size) + { + vfree(s->dict.buf); + s->dict.buf = vmalloc(s->dict.size); + if (s->dict.buf == NULL) + { + s->dict.allocated = 0; + return XZ_MEM_ERROR; + } + } + } + } + + s->lzma.len = 0; + + s->lzma2.sequence = SEQ_CONTROL; + s->lzma2.need_dict_reset = true; + + s->temp.size = 0; + + return XZ_OK; } XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s) { - if (DEC_IS_MULTI(s->dict.mode)) - vfree(s->dict.buf); + if (DEC_IS_MULTI(s->dict.mode)) + vfree(s->dict.buf); - kfree(s); + kfree(s); } diff --git a/libraries/xz-embedded/src/xz_dec_stream.c b/libraries/xz-embedded/src/xz_dec_stream.c index 6e935ded..f8d7be05 100644 --- a/libraries/xz-embedded/src/xz_dec_stream.c +++ b/libraries/xz-embedded/src/xz_dec_stream.c @@ -19,146 +19,146 @@ /* Hash used to validate the Index field */ struct xz_dec_hash { - vli_type unpadded; - vli_type uncompressed; - uint32_t crc32; + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; }; struct xz_dec { - /* Position in dec_main() */ - enum - { - SEQ_STREAM_HEADER, - SEQ_BLOCK_START, - SEQ_BLOCK_HEADER, - SEQ_BLOCK_UNCOMPRESS, - SEQ_BLOCK_PADDING, - SEQ_BLOCK_CHECK, - SEQ_INDEX, - SEQ_INDEX_PADDING, - SEQ_INDEX_CRC32, - SEQ_STREAM_FOOTER - } sequence; - - /* Position in variable-length integers and Check fields */ - uint32_t pos; - - /* Variable-length integer decoded by dec_vli() */ - vli_type vli; - - /* Saved in_pos and out_pos */ - size_t in_start; - size_t out_start; + /* Position in dec_main() */ + enum + { + SEQ_STREAM_HEADER, + SEQ_BLOCK_START, + SEQ_BLOCK_HEADER, + SEQ_BLOCK_UNCOMPRESS, + SEQ_BLOCK_PADDING, + SEQ_BLOCK_CHECK, + SEQ_INDEX, + SEQ_INDEX_PADDING, + SEQ_INDEX_CRC32, + SEQ_STREAM_FOOTER + } sequence; + + /* Position in variable-length integers and Check fields */ + uint32_t pos; + + /* Variable-length integer decoded by dec_vli() */ + vli_type vli; + + /* Saved in_pos and out_pos */ + size_t in_start; + size_t out_start; #ifdef XZ_USE_CRC64 - /* CRC32 or CRC64 value in Block or CRC32 value in Index */ - uint64_t crc; + /* CRC32 or CRC64 value in Block or CRC32 value in Index */ + uint64_t crc; #else - /* CRC32 value in Block or Index */ - uint32_t crc; + /* CRC32 value in Block or Index */ + uint32_t crc; #endif - /* Type of the integrity check calculated from uncompressed data */ - enum xz_check check_type; - - /* Operation mode */ - enum xz_mode mode; - - /* - * True if the next call to xz_dec_run() is allowed to return - * XZ_BUF_ERROR. - */ - bool allow_buf_error; - - /* Information stored in Block Header */ - struct - { - /* - * Value stored in the Compressed Size field, or - * VLI_UNKNOWN if Compressed Size is not present. - */ - vli_type compressed; - - /* - * Value stored in the Uncompressed Size field, or - * VLI_UNKNOWN if Uncompressed Size is not present. - */ - vli_type uncompressed; - - /* Size of the Block Header field */ - uint32_t size; - } block_header; - - /* Information collected when decoding Blocks */ - struct - { - /* Observed compressed size of the current Block */ - vli_type compressed; - - /* Observed uncompressed size of the current Block */ - vli_type uncompressed; - - /* Number of Blocks decoded so far */ - vli_type count; - - /* - * Hash calculated from the Block sizes. This is used to - * validate the Index field. - */ - struct xz_dec_hash hash; - } block; - - /* Variables needed when verifying the Index field */ - struct - { - /* Position in dec_index() */ - enum - { - SEQ_INDEX_COUNT, - SEQ_INDEX_UNPADDED, - SEQ_INDEX_UNCOMPRESSED - } sequence; - - /* Size of the Index in bytes */ - vli_type size; - - /* Number of Records (matches block.count in valid files) */ - vli_type count; - - /* - * Hash calculated from the Records (matches block.hash in - * valid files). - */ - struct xz_dec_hash hash; - } index; - - /* - * Temporary buffer needed to hold Stream Header, Block Header, - * and Stream Footer. The Block Header is the biggest (1 KiB) - * so we reserve space according to that. buf[] has to be aligned - * to a multiple of four bytes; the size_t variables before it - * should guarantee this. - */ - struct - { - size_t pos; - size_t size; - uint8_t buf[1024]; - } temp; - - struct xz_dec_lzma2 *lzma2; + /* Type of the integrity check calculated from uncompressed data */ + enum xz_check check_type; + + /* Operation mode */ + enum xz_mode mode; + + /* + * True if the next call to xz_dec_run() is allowed to return + * XZ_BUF_ERROR. + */ + bool allow_buf_error; + + /* Information stored in Block Header */ + struct + { + /* + * Value stored in the Compressed Size field, or + * VLI_UNKNOWN if Compressed Size is not present. + */ + vli_type compressed; + + /* + * Value stored in the Uncompressed Size field, or + * VLI_UNKNOWN if Uncompressed Size is not present. + */ + vli_type uncompressed; + + /* Size of the Block Header field */ + uint32_t size; + } block_header; + + /* Information collected when decoding Blocks */ + struct + { + /* Observed compressed size of the current Block */ + vli_type compressed; + + /* Observed uncompressed size of the current Block */ + vli_type uncompressed; + + /* Number of Blocks decoded so far */ + vli_type count; + + /* + * Hash calculated from the Block sizes. This is used to + * validate the Index field. + */ + struct xz_dec_hash hash; + } block; + + /* Variables needed when verifying the Index field */ + struct + { + /* Position in dec_index() */ + enum + { + SEQ_INDEX_COUNT, + SEQ_INDEX_UNPADDED, + SEQ_INDEX_UNCOMPRESSED + } sequence; + + /* Size of the Index in bytes */ + vli_type size; + + /* Number of Records (matches block.count in valid files) */ + vli_type count; + + /* + * Hash calculated from the Records (matches block.hash in + * valid files). + */ + struct xz_dec_hash hash; + } index; + + /* + * Temporary buffer needed to hold Stream Header, Block Header, + * and Stream Footer. The Block Header is the biggest (1 KiB) + * so we reserve space according to that. buf[] has to be aligned + * to a multiple of four bytes; the size_t variables before it + * should guarantee this. + */ + struct + { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + + struct xz_dec_lzma2 *lzma2; #ifdef XZ_DEC_BCJ - struct xz_dec_bcj *bcj; - bool bcj_active; + struct xz_dec_bcj *bcj; + bool bcj_active; #endif }; #ifdef XZ_DEC_ANY_CHECK /* Sizes of the Check field with different Check IDs */ static const uint8_t check_sizes[16] = {0, 4, 4, 4, 8, 8, 8, 16, - 16, 16, 32, 32, 32, 64, 64, 64}; + 16, 16, 32, 32, 32, 64, 64, 64}; #endif /* @@ -169,52 +169,52 @@ static const uint8_t check_sizes[16] = {0, 4, 4, 4, 8, 8, 8, 16, */ static bool fill_temp(struct xz_dec *s, struct xz_buf *b) { - size_t copy_size = min_t(size_t, b->in_size - b->in_pos, s->temp.size - s->temp.pos); + size_t copy_size = min_t(size_t, b->in_size - b->in_pos, s->temp.size - s->temp.pos); - memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); - b->in_pos += copy_size; - s->temp.pos += copy_size; + memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); + b->in_pos += copy_size; + s->temp.pos += copy_size; - if (s->temp.pos == s->temp.size) - { - s->temp.pos = 0; - return true; - } + if (s->temp.pos == s->temp.size) + { + s->temp.pos = 0; + return true; + } - return false; + return false; } /* Decode a variable-length integer (little-endian base-128 encoding) */ static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, size_t *in_pos, size_t in_size) { - uint8_t byte; + uint8_t byte; - if (s->pos == 0) - s->vli = 0; + if (s->pos == 0) + s->vli = 0; - while (*in_pos < in_size) - { - byte = in[*in_pos]; - ++*in_pos; + while (*in_pos < in_size) + { + byte = in[*in_pos]; + ++*in_pos; - s->vli |= (vli_type)(byte & 0x7F) << s->pos; + s->vli |= (vli_type)(byte & 0x7F) << s->pos; - if ((byte & 0x80) == 0) - { - /* Don't allow non-minimal encodings. */ - if (byte == 0 && s->pos != 0) - return XZ_DATA_ERROR; + if ((byte & 0x80) == 0) + { + /* Don't allow non-minimal encodings. */ + if (byte == 0 && s->pos != 0) + return XZ_DATA_ERROR; - s->pos = 0; - return XZ_STREAM_END; - } + s->pos = 0; + return XZ_STREAM_END; + } - s->pos += 7; - if (s->pos == 7 * VLI_BYTES_MAX) - return XZ_DATA_ERROR; - } + s->pos += 7; + if (s->pos == 7 * VLI_BYTES_MAX) + return XZ_DATA_ERROR; + } - return XZ_OK; + return XZ_OK; } /* @@ -231,73 +231,73 @@ static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, size_t *in_pos, */ static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) { - enum xz_ret ret; + enum xz_ret ret; - s->in_start = b->in_pos; - s->out_start = b->out_pos; + s->in_start = b->in_pos; + s->out_start = b->out_pos; #ifdef XZ_DEC_BCJ - if (s->bcj_active) - ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); - else + if (s->bcj_active) + ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); + else #endif - ret = xz_dec_lzma2_run(s->lzma2, b); + ret = xz_dec_lzma2_run(s->lzma2, b); - s->block.compressed += b->in_pos - s->in_start; - s->block.uncompressed += b->out_pos - s->out_start; + s->block.compressed += b->in_pos - s->in_start; + s->block.uncompressed += b->out_pos - s->out_start; - /* - * There is no need to separately check for VLI_UNKNOWN, since - * the observed sizes are always smaller than VLI_UNKNOWN. - */ - if (s->block.compressed > s->block_header.compressed || - s->block.uncompressed > s->block_header.uncompressed) - return XZ_DATA_ERROR; + /* + * There is no need to separately check for VLI_UNKNOWN, since + * the observed sizes are always smaller than VLI_UNKNOWN. + */ + if (s->block.compressed > s->block_header.compressed || + s->block.uncompressed > s->block_header.uncompressed) + return XZ_DATA_ERROR; - if (s->check_type == XZ_CHECK_CRC32) - s->crc = xz_crc32(b->out + s->out_start, b->out_pos - s->out_start, s->crc); + if (s->check_type == XZ_CHECK_CRC32) + s->crc = xz_crc32(b->out + s->out_start, b->out_pos - s->out_start, s->crc); #ifdef XZ_USE_CRC64 - else if (s->check_type == XZ_CHECK_CRC64) - s->crc = xz_crc64(b->out + s->out_start, b->out_pos - s->out_start, s->crc); + else if (s->check_type == XZ_CHECK_CRC64) + s->crc = xz_crc64(b->out + s->out_start, b->out_pos - s->out_start, s->crc); #endif - if (ret == XZ_STREAM_END) - { - if (s->block_header.compressed != VLI_UNKNOWN && - s->block_header.compressed != s->block.compressed) - return XZ_DATA_ERROR; + if (ret == XZ_STREAM_END) + { + if (s->block_header.compressed != VLI_UNKNOWN && + s->block_header.compressed != s->block.compressed) + return XZ_DATA_ERROR; - if (s->block_header.uncompressed != VLI_UNKNOWN && - s->block_header.uncompressed != s->block.uncompressed) - return XZ_DATA_ERROR; + if (s->block_header.uncompressed != VLI_UNKNOWN && + s->block_header.uncompressed != s->block.uncompressed) + return XZ_DATA_ERROR; - s->block.hash.unpadded += s->block_header.size + s->block.compressed; + s->block.hash.unpadded += s->block_header.size + s->block.compressed; #ifdef XZ_DEC_ANY_CHECK - s->block.hash.unpadded += check_sizes[s->check_type]; + s->block.hash.unpadded += check_sizes[s->check_type]; #else - if (s->check_type == XZ_CHECK_CRC32) - s->block.hash.unpadded += 4; - else if (IS_CRC64(s->check_type)) - s->block.hash.unpadded += 8; + if (s->check_type == XZ_CHECK_CRC32) + s->block.hash.unpadded += 4; + else if (IS_CRC64(s->check_type)) + s->block.hash.unpadded += 8; #endif - s->block.hash.uncompressed += s->block.uncompressed; - s->block.hash.crc32 = xz_crc32((const uint8_t *)&s->block.hash, sizeof(s->block.hash), - s->block.hash.crc32); + s->block.hash.uncompressed += s->block.uncompressed; + s->block.hash.crc32 = xz_crc32((const uint8_t *)&s->block.hash, sizeof(s->block.hash), + s->block.hash.crc32); - ++s->block.count; - } + ++s->block.count; + } - return ret; + return ret; } /* Update the Index size and the CRC32 value. */ static void index_update(struct xz_dec *s, const struct xz_buf *b) { - size_t in_used = b->in_pos - s->in_start; - s->index.size += in_used; - s->crc = xz_crc32(b->in + s->in_start, in_used, s->crc); + size_t in_used = b->in_pos - s->in_start; + s->index.size += in_used; + s->crc = xz_crc32(b->in + s->in_start, in_used, s->crc); } /* @@ -310,49 +310,49 @@ static void index_update(struct xz_dec *s, const struct xz_buf *b) */ static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) { - enum xz_ret ret; - - do - { - ret = dec_vli(s, b->in, &b->in_pos, b->in_size); - if (ret != XZ_STREAM_END) - { - index_update(s, b); - return ret; - } - - switch (s->index.sequence) - { - case SEQ_INDEX_COUNT: - s->index.count = s->vli; - - /* - * Validate that the Number of Records field - * indicates the same number of Records as - * there were Blocks in the Stream. - */ - if (s->index.count != s->block.count) - return XZ_DATA_ERROR; - - s->index.sequence = SEQ_INDEX_UNPADDED; - break; - - case SEQ_INDEX_UNPADDED: - s->index.hash.unpadded += s->vli; - s->index.sequence = SEQ_INDEX_UNCOMPRESSED; - break; - - case SEQ_INDEX_UNCOMPRESSED: - s->index.hash.uncompressed += s->vli; - s->index.hash.crc32 = xz_crc32((const uint8_t *)&s->index.hash, - sizeof(s->index.hash), s->index.hash.crc32); - --s->index.count; - s->index.sequence = SEQ_INDEX_UNPADDED; - break; - } - } while (s->index.count > 0); - - return XZ_STREAM_END; + enum xz_ret ret; + + do + { + ret = dec_vli(s, b->in, &b->in_pos, b->in_size); + if (ret != XZ_STREAM_END) + { + index_update(s, b); + return ret; + } + + switch (s->index.sequence) + { + case SEQ_INDEX_COUNT: + s->index.count = s->vli; + + /* + * Validate that the Number of Records field + * indicates the same number of Records as + * there were Blocks in the Stream. + */ + if (s->index.count != s->block.count) + return XZ_DATA_ERROR; + + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + + case SEQ_INDEX_UNPADDED: + s->index.hash.unpadded += s->vli; + s->index.sequence = SEQ_INDEX_UNCOMPRESSED; + break; + + case SEQ_INDEX_UNCOMPRESSED: + s->index.hash.uncompressed += s->vli; + s->index.hash.crc32 = xz_crc32((const uint8_t *)&s->index.hash, + sizeof(s->index.hash), s->index.hash.crc32); + --s->index.count; + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + } + } while (s->index.count > 0); + + return XZ_STREAM_END; } /* @@ -362,22 +362,22 @@ static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) */ static enum xz_ret crc_validate(struct xz_dec *s, struct xz_buf *b, uint32_t bits) { - do - { - if (b->in_pos == b->in_size) - return XZ_OK; + do + { + if (b->in_pos == b->in_size) + return XZ_OK; - if (((s->crc >> s->pos) & 0xFF) != b->in[b->in_pos++]) - return XZ_DATA_ERROR; + if (((s->crc >> s->pos) & 0xFF) != b->in[b->in_pos++]) + return XZ_DATA_ERROR; - s->pos += 8; + s->pos += 8; - } while (s->pos < bits); + } while (s->pos < bits); - s->crc = 0; - s->pos = 0; + s->crc = 0; + s->pos = 0; - return XZ_STREAM_END; + return XZ_STREAM_END; } #ifdef XZ_DEC_ANY_CHECK @@ -387,358 +387,358 @@ static enum xz_ret crc_validate(struct xz_dec *s, struct xz_buf *b, uint32_t bit */ static bool check_skip(struct xz_dec *s, struct xz_buf *b) { - while (s->pos < check_sizes[s->check_type]) - { - if (b->in_pos == b->in_size) - return false; + while (s->pos < check_sizes[s->check_type]) + { + if (b->in_pos == b->in_size) + return false; - ++b->in_pos; - ++s->pos; - } + ++b->in_pos; + ++s->pos; + } - s->pos = 0; + s->pos = 0; - return true; + return true; } #endif /* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ static enum xz_ret dec_stream_header(struct xz_dec *s) { - if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) - return XZ_FORMAT_ERROR; + if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) + return XZ_FORMAT_ERROR; - if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) != - get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) - return XZ_DATA_ERROR; + if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) != + get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) + return XZ_DATA_ERROR; - if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) - return XZ_OPTIONS_ERROR; + if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) + return XZ_OPTIONS_ERROR; - /* - * Of integrity checks, we support none (Check ID = 0), - * CRC32 (Check ID = 1), and optionally CRC64 (Check ID = 4). - * However, if XZ_DEC_ANY_CHECK is defined, we will accept other - * check types too, but then the check won't be verified and - * a warning (XZ_UNSUPPORTED_CHECK) will be given. - */ - s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; + /* + * Of integrity checks, we support none (Check ID = 0), + * CRC32 (Check ID = 1), and optionally CRC64 (Check ID = 4). + * However, if XZ_DEC_ANY_CHECK is defined, we will accept other + * check types too, but then the check won't be verified and + * a warning (XZ_UNSUPPORTED_CHECK) will be given. + */ + s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; #ifdef XZ_DEC_ANY_CHECK - if (s->check_type > XZ_CHECK_MAX) - return XZ_OPTIONS_ERROR; + if (s->check_type > XZ_CHECK_MAX) + return XZ_OPTIONS_ERROR; - if (s->check_type > XZ_CHECK_CRC32 && !IS_CRC64(s->check_type)) - return XZ_UNSUPPORTED_CHECK; + if (s->check_type > XZ_CHECK_CRC32 && !IS_CRC64(s->check_type)) + return XZ_UNSUPPORTED_CHECK; #else - if (s->check_type > XZ_CHECK_CRC32 && !IS_CRC64(s->check_type)) - return XZ_OPTIONS_ERROR; + if (s->check_type > XZ_CHECK_CRC32 && !IS_CRC64(s->check_type)) + return XZ_OPTIONS_ERROR; #endif - return XZ_OK; + return XZ_OK; } /* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ static enum xz_ret dec_stream_footer(struct xz_dec *s) { - if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) - return XZ_DATA_ERROR; - - if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) - return XZ_DATA_ERROR; - - /* - * Validate Backward Size. Note that we never added the size of the - * Index CRC32 field to s->index.size, thus we use s->index.size / 4 - * instead of s->index.size / 4 - 1. - */ - if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) - return XZ_DATA_ERROR; - - if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) - return XZ_DATA_ERROR; - - /* - * Use XZ_STREAM_END instead of XZ_OK to be more convenient - * for the caller. - */ - return XZ_STREAM_END; + if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) + return XZ_DATA_ERROR; + + if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) + return XZ_DATA_ERROR; + + /* + * Validate Backward Size. Note that we never added the size of the + * Index CRC32 field to s->index.size, thus we use s->index.size / 4 + * instead of s->index.size / 4 - 1. + */ + if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) + return XZ_DATA_ERROR; + + if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) + return XZ_DATA_ERROR; + + /* + * Use XZ_STREAM_END instead of XZ_OK to be more convenient + * for the caller. + */ + return XZ_STREAM_END; } /* Decode the Block Header and initialize the filter chain. */ static enum xz_ret dec_block_header(struct xz_dec *s) { - enum xz_ret ret; + enum xz_ret ret; - /* - * Validate the CRC32. We know that the temp buffer is at least - * eight bytes so this is safe. - */ - s->temp.size -= 4; - if (xz_crc32(s->temp.buf, s->temp.size, 0) != get_le32(s->temp.buf + s->temp.size)) - return XZ_DATA_ERROR; + /* + * Validate the CRC32. We know that the temp buffer is at least + * eight bytes so this is safe. + */ + s->temp.size -= 4; + if (xz_crc32(s->temp.buf, s->temp.size, 0) != get_le32(s->temp.buf + s->temp.size)) + return XZ_DATA_ERROR; - s->temp.pos = 2; + s->temp.pos = 2; /* * Catch unsupported Block Flags. We support only one or two filters * in the chain, so we catch that with the same test. */ #ifdef XZ_DEC_BCJ - if (s->temp.buf[1] & 0x3E) + if (s->temp.buf[1] & 0x3E) #else - if (s->temp.buf[1] & 0x3F) + if (s->temp.buf[1] & 0x3F) #endif - return XZ_OPTIONS_ERROR; - - /* Compressed Size */ - if (s->temp.buf[1] & 0x40) - { - if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) - return XZ_DATA_ERROR; - - s->block_header.compressed = s->vli; - } - else - { - s->block_header.compressed = VLI_UNKNOWN; - } - - /* Uncompressed Size */ - if (s->temp.buf[1] & 0x80) - { - if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) - return XZ_DATA_ERROR; - - s->block_header.uncompressed = s->vli; - } - else - { - s->block_header.uncompressed = VLI_UNKNOWN; - } + return XZ_OPTIONS_ERROR; + + /* Compressed Size */ + if (s->temp.buf[1] & 0x40) + { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.compressed = s->vli; + } + else + { + s->block_header.compressed = VLI_UNKNOWN; + } + + /* Uncompressed Size */ + if (s->temp.buf[1] & 0x80) + { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.uncompressed = s->vli; + } + else + { + s->block_header.uncompressed = VLI_UNKNOWN; + } #ifdef XZ_DEC_BCJ - /* If there are two filters, the first one must be a BCJ filter. */ - s->bcj_active = s->temp.buf[1] & 0x01; - if (s->bcj_active) - { - if (s->temp.size - s->temp.pos < 2) - return XZ_OPTIONS_ERROR; - - ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); - if (ret != XZ_OK) - return ret; - - /* - * We don't support custom start offset, - * so Size of Properties must be zero. - */ - if (s->temp.buf[s->temp.pos++] != 0x00) - return XZ_OPTIONS_ERROR; - } + /* If there are two filters, the first one must be a BCJ filter. */ + s->bcj_active = s->temp.buf[1] & 0x01; + if (s->bcj_active) + { + if (s->temp.size - s->temp.pos < 2) + return XZ_OPTIONS_ERROR; + + ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* + * We don't support custom start offset, + * so Size of Properties must be zero. + */ + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + } #endif - /* Valid Filter Flags always take at least two bytes. */ - if (s->temp.size - s->temp.pos < 2) - return XZ_DATA_ERROR; + /* Valid Filter Flags always take at least two bytes. */ + if (s->temp.size - s->temp.pos < 2) + return XZ_DATA_ERROR; - /* Filter ID = LZMA2 */ - if (s->temp.buf[s->temp.pos++] != 0x21) - return XZ_OPTIONS_ERROR; + /* Filter ID = LZMA2 */ + if (s->temp.buf[s->temp.pos++] != 0x21) + return XZ_OPTIONS_ERROR; - /* Size of Properties = 1-byte Filter Properties */ - if (s->temp.buf[s->temp.pos++] != 0x01) - return XZ_OPTIONS_ERROR; + /* Size of Properties = 1-byte Filter Properties */ + if (s->temp.buf[s->temp.pos++] != 0x01) + return XZ_OPTIONS_ERROR; - /* Filter Properties contains LZMA2 dictionary size. */ - if (s->temp.size - s->temp.pos < 1) - return XZ_DATA_ERROR; + /* Filter Properties contains LZMA2 dictionary size. */ + if (s->temp.size - s->temp.pos < 1) + return XZ_DATA_ERROR; - ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); - if (ret != XZ_OK) - return ret; + ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; - /* The rest must be Header Padding. */ - while (s->temp.pos < s->temp.size) - if (s->temp.buf[s->temp.pos++] != 0x00) - return XZ_OPTIONS_ERROR; + /* The rest must be Header Padding. */ + while (s->temp.pos < s->temp.size) + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; - s->temp.pos = 0; - s->block.compressed = 0; - s->block.uncompressed = 0; + s->temp.pos = 0; + s->block.compressed = 0; + s->block.uncompressed = 0; - return XZ_OK; + return XZ_OK; } static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) { - enum xz_ret ret; - - /* - * Store the start position for the case when we are in the middle - * of the Index field. - */ - s->in_start = b->in_pos; - - while (true) - { - switch (s->sequence) - { - case SEQ_STREAM_HEADER: - /* - * Stream Header is copied to s->temp, and then - * decoded from there. This way if the caller - * gives us only little input at a time, we can - * still keep the Stream Header decoding code - * simple. Similar approach is used in many places - * in this file. - */ - if (!fill_temp(s, b)) - return XZ_OK; - - /* - * If dec_stream_header() returns - * XZ_UNSUPPORTED_CHECK, it is still possible - * to continue decoding if working in multi-call - * mode. Thus, update s->sequence before calling - * dec_stream_header(). - */ - s->sequence = SEQ_BLOCK_START; - - ret = dec_stream_header(s); - if (ret != XZ_OK) - return ret; - - case SEQ_BLOCK_START: - /* We need one byte of input to continue. */ - if (b->in_pos == b->in_size) - return XZ_OK; - - /* See if this is the beginning of the Index field. */ - if (b->in[b->in_pos] == 0) - { - s->in_start = b->in_pos++; - s->sequence = SEQ_INDEX; - break; - } - - /* - * Calculate the size of the Block Header and - * prepare to decode it. - */ - s->block_header.size = ((uint32_t)b->in[b->in_pos] + 1) * 4; - - s->temp.size = s->block_header.size; - s->temp.pos = 0; - s->sequence = SEQ_BLOCK_HEADER; - - case SEQ_BLOCK_HEADER: - if (!fill_temp(s, b)) - return XZ_OK; - - ret = dec_block_header(s); - if (ret != XZ_OK) - return ret; - - s->sequence = SEQ_BLOCK_UNCOMPRESS; - - case SEQ_BLOCK_UNCOMPRESS: - ret = dec_block(s, b); - if (ret != XZ_STREAM_END) - return ret; - - s->sequence = SEQ_BLOCK_PADDING; - - case SEQ_BLOCK_PADDING: - /* - * Size of Compressed Data + Block Padding - * must be a multiple of four. We don't need - * s->block.compressed for anything else - * anymore, so we use it here to test the size - * of the Block Padding field. - */ - while (s->block.compressed & 3) - { - if (b->in_pos == b->in_size) - return XZ_OK; - - if (b->in[b->in_pos++] != 0) - return XZ_DATA_ERROR; - - ++s->block.compressed; - } - - s->sequence = SEQ_BLOCK_CHECK; - - case SEQ_BLOCK_CHECK: - if (s->check_type == XZ_CHECK_CRC32) - { - ret = crc_validate(s, b, 32); - if (ret != XZ_STREAM_END) - return ret; - } - else if (IS_CRC64(s->check_type)) - { - ret = crc_validate(s, b, 64); - if (ret != XZ_STREAM_END) - return ret; - } + enum xz_ret ret; + + /* + * Store the start position for the case when we are in the middle + * of the Index field. + */ + s->in_start = b->in_pos; + + while (true) + { + switch (s->sequence) + { + case SEQ_STREAM_HEADER: + /* + * Stream Header is copied to s->temp, and then + * decoded from there. This way if the caller + * gives us only little input at a time, we can + * still keep the Stream Header decoding code + * simple. Similar approach is used in many places + * in this file. + */ + if (!fill_temp(s, b)) + return XZ_OK; + + /* + * If dec_stream_header() returns + * XZ_UNSUPPORTED_CHECK, it is still possible + * to continue decoding if working in multi-call + * mode. Thus, update s->sequence before calling + * dec_stream_header(). + */ + s->sequence = SEQ_BLOCK_START; + + ret = dec_stream_header(s); + if (ret != XZ_OK) + return ret; + + case SEQ_BLOCK_START: + /* We need one byte of input to continue. */ + if (b->in_pos == b->in_size) + return XZ_OK; + + /* See if this is the beginning of the Index field. */ + if (b->in[b->in_pos] == 0) + { + s->in_start = b->in_pos++; + s->sequence = SEQ_INDEX; + break; + } + + /* + * Calculate the size of the Block Header and + * prepare to decode it. + */ + s->block_header.size = ((uint32_t)b->in[b->in_pos] + 1) * 4; + + s->temp.size = s->block_header.size; + s->temp.pos = 0; + s->sequence = SEQ_BLOCK_HEADER; + + case SEQ_BLOCK_HEADER: + if (!fill_temp(s, b)) + return XZ_OK; + + ret = dec_block_header(s); + if (ret != XZ_OK) + return ret; + + s->sequence = SEQ_BLOCK_UNCOMPRESS; + + case SEQ_BLOCK_UNCOMPRESS: + ret = dec_block(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_BLOCK_PADDING; + + case SEQ_BLOCK_PADDING: + /* + * Size of Compressed Data + Block Padding + * must be a multiple of four. We don't need + * s->block.compressed for anything else + * anymore, so we use it here to test the size + * of the Block Padding field. + */ + while (s->block.compressed & 3) + { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + + ++s->block.compressed; + } + + s->sequence = SEQ_BLOCK_CHECK; + + case SEQ_BLOCK_CHECK: + if (s->check_type == XZ_CHECK_CRC32) + { + ret = crc_validate(s, b, 32); + if (ret != XZ_STREAM_END) + return ret; + } + else if (IS_CRC64(s->check_type)) + { + ret = crc_validate(s, b, 64); + if (ret != XZ_STREAM_END) + return ret; + } #ifdef XZ_DEC_ANY_CHECK - else if (!check_skip(s, b)) - { - return XZ_OK; - } + else if (!check_skip(s, b)) + { + return XZ_OK; + } #endif - s->sequence = SEQ_BLOCK_START; - break; + s->sequence = SEQ_BLOCK_START; + break; - case SEQ_INDEX: - ret = dec_index(s, b); - if (ret != XZ_STREAM_END) - return ret; + case SEQ_INDEX: + ret = dec_index(s, b); + if (ret != XZ_STREAM_END) + return ret; - s->sequence = SEQ_INDEX_PADDING; + s->sequence = SEQ_INDEX_PADDING; - case SEQ_INDEX_PADDING: - while ((s->index.size + (b->in_pos - s->in_start)) & 3) - { - if (b->in_pos == b->in_size) - { - index_update(s, b); - return XZ_OK; - } + case SEQ_INDEX_PADDING: + while ((s->index.size + (b->in_pos - s->in_start)) & 3) + { + if (b->in_pos == b->in_size) + { + index_update(s, b); + return XZ_OK; + } - if (b->in[b->in_pos++] != 0) - return XZ_DATA_ERROR; - } + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + } - /* Finish the CRC32 value and Index size. */ - index_update(s, b); + /* Finish the CRC32 value and Index size. */ + index_update(s, b); - /* Compare the hashes to validate the Index field. */ - if (!memeq(&s->block.hash, &s->index.hash, sizeof(s->block.hash))) - return XZ_DATA_ERROR; + /* Compare the hashes to validate the Index field. */ + if (!memeq(&s->block.hash, &s->index.hash, sizeof(s->block.hash))) + return XZ_DATA_ERROR; - s->sequence = SEQ_INDEX_CRC32; + s->sequence = SEQ_INDEX_CRC32; - case SEQ_INDEX_CRC32: - ret = crc_validate(s, b, 32); - if (ret != XZ_STREAM_END) - return ret; + case SEQ_INDEX_CRC32: + ret = crc_validate(s, b, 32); + if (ret != XZ_STREAM_END) + return ret; - s->temp.size = STREAM_HEADER_SIZE; - s->sequence = SEQ_STREAM_FOOTER; + s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_FOOTER; - case SEQ_STREAM_FOOTER: - if (!fill_temp(s, b)) - return XZ_OK; + case SEQ_STREAM_FOOTER: + if (!fill_temp(s, b)) + return XZ_OK; - return dec_stream_footer(s); - } - } + return dec_stream_footer(s); + } + } - /* Never reached */ + /* Never reached */ } /* @@ -768,93 +768,93 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) */ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) { - size_t in_start; - size_t out_start; - enum xz_ret ret; - - if (DEC_IS_SINGLE(s->mode)) - xz_dec_reset(s); - - in_start = b->in_pos; - out_start = b->out_pos; - ret = dec_main(s, b); - - if (DEC_IS_SINGLE(s->mode)) - { - if (ret == XZ_OK) - ret = b->in_pos == b->in_size ? XZ_DATA_ERROR : XZ_BUF_ERROR; - - if (ret != XZ_STREAM_END) - { - b->in_pos = in_start; - b->out_pos = out_start; - } - } - else if (ret == XZ_OK && in_start == b->in_pos && out_start == b->out_pos) - { - if (s->allow_buf_error) - ret = XZ_BUF_ERROR; - - s->allow_buf_error = true; - } - else - { - s->allow_buf_error = false; - } - - return ret; + size_t in_start; + size_t out_start; + enum xz_ret ret; + + if (DEC_IS_SINGLE(s->mode)) + xz_dec_reset(s); + + in_start = b->in_pos; + out_start = b->out_pos; + ret = dec_main(s, b); + + if (DEC_IS_SINGLE(s->mode)) + { + if (ret == XZ_OK) + ret = b->in_pos == b->in_size ? XZ_DATA_ERROR : XZ_BUF_ERROR; + + if (ret != XZ_STREAM_END) + { + b->in_pos = in_start; + b->out_pos = out_start; + } + } + else if (ret == XZ_OK && in_start == b->in_pos && out_start == b->out_pos) + { + if (s->allow_buf_error) + ret = XZ_BUF_ERROR; + + s->allow_buf_error = true; + } + else + { + s->allow_buf_error = false; + } + + return ret; } XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) { - struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); - if (s == NULL) - return NULL; + struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; - s->mode = mode; + s->mode = mode; #ifdef XZ_DEC_BCJ - s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); - if (s->bcj == NULL) - goto error_bcj; + s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); + if (s->bcj == NULL) + goto error_bcj; #endif - s->lzma2 = xz_dec_lzma2_create(mode, dict_max); - if (s->lzma2 == NULL) - goto error_lzma2; + s->lzma2 = xz_dec_lzma2_create(mode, dict_max); + if (s->lzma2 == NULL) + goto error_lzma2; - xz_dec_reset(s); - return s; + xz_dec_reset(s); + return s; error_lzma2: #ifdef XZ_DEC_BCJ - xz_dec_bcj_end(s->bcj); + xz_dec_bcj_end(s->bcj); error_bcj: #endif - kfree(s); - return NULL; + kfree(s); + return NULL; } XZ_EXTERN void xz_dec_reset(struct xz_dec *s) { - s->sequence = SEQ_STREAM_HEADER; - s->allow_buf_error = false; - s->pos = 0; - s->crc = 0; - memzero(&s->block, sizeof(s->block)); - memzero(&s->index, sizeof(s->index)); - s->temp.pos = 0; - s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_HEADER; + s->allow_buf_error = false; + s->pos = 0; + s->crc = 0; + memzero(&s->block, sizeof(s->block)); + memzero(&s->index, sizeof(s->index)); + s->temp.pos = 0; + s->temp.size = STREAM_HEADER_SIZE; } XZ_EXTERN void xz_dec_end(struct xz_dec *s) { - if (s != NULL) - { - xz_dec_lzma2_end(s->lzma2); + if (s != NULL) + { + xz_dec_lzma2_end(s->lzma2); #ifdef XZ_DEC_BCJ - xz_dec_bcj_end(s->bcj); + xz_dec_bcj_end(s->bcj); #endif - kfree(s); - } + kfree(s); + } } diff --git a/libraries/xz-embedded/src/xz_lzma2.h b/libraries/xz-embedded/src/xz_lzma2.h index 3976033a..82a425f2 100644 --- a/libraries/xz-embedded/src/xz_lzma2.h +++ b/libraries/xz-embedded/src/xz_lzma2.h @@ -41,18 +41,18 @@ */ enum lzma_state { - STATE_LIT_LIT, - STATE_MATCH_LIT_LIT, - STATE_REP_LIT_LIT, - STATE_SHORTREP_LIT_LIT, - STATE_MATCH_LIT, - STATE_REP_LIT, - STATE_SHORTREP_LIT, - STATE_LIT_MATCH, - STATE_LIT_LONGREP, - STATE_LIT_SHORTREP, - STATE_NONLIT_MATCH, - STATE_NONLIT_REP + STATE_LIT_LIT, + STATE_MATCH_LIT_LIT, + STATE_REP_LIT_LIT, + STATE_SHORTREP_LIT_LIT, + STATE_MATCH_LIT, + STATE_REP_LIT, + STATE_SHORTREP_LIT, + STATE_LIT_MATCH, + STATE_LIT_LONGREP, + STATE_LIT_SHORTREP, + STATE_NONLIT_MATCH, + STATE_NONLIT_REP }; /* Total number of states */ @@ -64,36 +64,36 @@ enum lzma_state /* Indicate that the latest symbol was a literal. */ static inline void lzma_state_literal(enum lzma_state *state) { - if (*state <= STATE_SHORTREP_LIT_LIT) - *state = STATE_LIT_LIT; - else if (*state <= STATE_LIT_SHORTREP) - *state -= 3; - else - *state -= 6; + if (*state <= STATE_SHORTREP_LIT_LIT) + *state = STATE_LIT_LIT; + else if (*state <= STATE_LIT_SHORTREP) + *state -= 3; + else + *state -= 6; } /* Indicate that the latest symbol was a match. */ static inline void lzma_state_match(enum lzma_state *state) { - *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; + *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; } /* Indicate that the latest state was a long repeated match. */ static inline void lzma_state_long_rep(enum lzma_state *state) { - *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; + *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; } /* Indicate that the latest symbol was a short match. */ static inline void lzma_state_short_rep(enum lzma_state *state) { - *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; + *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; } /* Test if the previous symbol was a literal. */ static inline bool lzma_state_is_literal(enum lzma_state state) { - return state < LIT_STATES; + return state < LIT_STATES; } /* Each literal coder is divided in three sections: @@ -147,7 +147,7 @@ static inline bool lzma_state_is_literal(enum lzma_state state) */ static inline uint32_t lzma_get_dist_state(uint32_t len) { - return len < DIST_STATES + MATCH_LEN_MIN ? len - MATCH_LEN_MIN : DIST_STATES - 1; + return len < DIST_STATES + MATCH_LEN_MIN ? len - MATCH_LEN_MIN : DIST_STATES - 1; } /* diff --git a/libraries/xz-embedded/src/xz_private.h b/libraries/xz-embedded/src/xz_private.h index 55a3af1c..1b616430 100644 --- a/libraries/xz-embedded/src/xz_private.h +++ b/libraries/xz-embedded/src/xz_private.h @@ -94,8 +94,8 @@ */ #ifndef XZ_DEC_BCJ #if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) || defined(XZ_DEC_IA64) || \ - defined(XZ_DEC_ARM) || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) || \ - defined(XZ_DEC_SPARC) + defined(XZ_DEC_ARM) || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) || \ + defined(XZ_DEC_SPARC) #define XZ_DEC_BCJ #endif #endif @@ -141,7 +141,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id); * must be called directly. */ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2, - struct xz_buf *b); + struct xz_buf *b); /* Free the memory allocated for the BCJ filters. */ #define xz_dec_bcj_end(s) kfree(s) diff --git a/libraries/xz-embedded/src/xz_stream.h b/libraries/xz-embedded/src/xz_stream.h index c0e191e6..b3d2c9fd 100644 --- a/libraries/xz-embedded/src/xz_stream.h +++ b/libraries/xz-embedded/src/xz_stream.h @@ -50,10 +50,10 @@ typedef uint64_t vli_type; /* Integrity Check types */ enum xz_check { - XZ_CHECK_NONE = 0, - XZ_CHECK_CRC32 = 1, - XZ_CHECK_CRC64 = 4, - XZ_CHECK_SHA256 = 10 + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10 }; /* Maximum possible Check ID */ diff --git a/libraries/xz-embedded/xzminidec.c b/libraries/xz-embedded/xzminidec.c index bb62c3ac..44f60602 100644 --- a/libraries/xz-embedded/xzminidec.c +++ b/libraries/xz-embedded/xzminidec.c @@ -24,121 +24,121 @@ static uint8_t out[BUFSIZ]; int main(int argc, char **argv) { - struct xz_buf b; - struct xz_dec *s; - enum xz_ret ret; - const char *msg; - - if (argc >= 2 && strcmp(argv[1], "--help") == 0) - { - fputs("Uncompress a .xz file from stdin to stdout.\n" - "Arguments other than `--help' are ignored.\n", - stdout); - return 0; - } - - xz_crc32_init(); + struct xz_buf b; + struct xz_dec *s; + enum xz_ret ret; + const char *msg; + + if (argc >= 2 && strcmp(argv[1], "--help") == 0) + { + fputs("Uncompress a .xz file from stdin to stdout.\n" + "Arguments other than `--help' are ignored.\n", + stdout); + return 0; + } + + xz_crc32_init(); #ifdef XZ_USE_CRC64 - xz_crc64_init(); + xz_crc64_init(); #endif - /* - * Support up to 64 MiB dictionary. The actually needed memory - * is allocated once the headers have been parsed. - */ - s = xz_dec_init(XZ_DYNALLOC, 1 << 26); - if (s == NULL) - { - msg = "Memory allocation failed\n"; - goto error; - } - - b.in = in; - b.in_pos = 0; - b.in_size = 0; - b.out = out; - b.out_pos = 0; - b.out_size = BUFSIZ; - - while (true) - { - if (b.in_pos == b.in_size) - { - b.in_size = fread(in, 1, sizeof(in), stdin); - b.in_pos = 0; - } - - ret = xz_dec_run(s, &b); - - if (b.out_pos == sizeof(out)) - { - if (fwrite(out, 1, b.out_pos, stdout) != b.out_pos) - { - msg = "Write error\n"; - goto error; - } - - b.out_pos = 0; - } - - if (ret == XZ_OK) - continue; + /* + * Support up to 64 MiB dictionary. The actually needed memory + * is allocated once the headers have been parsed. + */ + s = xz_dec_init(XZ_DYNALLOC, 1 << 26); + if (s == NULL) + { + msg = "Memory allocation failed\n"; + goto error; + } + + b.in = in; + b.in_pos = 0; + b.in_size = 0; + b.out = out; + b.out_pos = 0; + b.out_size = BUFSIZ; + + while (true) + { + if (b.in_pos == b.in_size) + { + b.in_size = fread(in, 1, sizeof(in), stdin); + b.in_pos = 0; + } + + ret = xz_dec_run(s, &b); + + if (b.out_pos == sizeof(out)) + { + if (fwrite(out, 1, b.out_pos, stdout) != b.out_pos) + { + msg = "Write error\n"; + goto error; + } + + b.out_pos = 0; + } + + if (ret == XZ_OK) + continue; #ifdef XZ_DEC_ANY_CHECK - if (ret == XZ_UNSUPPORTED_CHECK) - { - fputs(argv[0], stderr); - fputs(": ", stderr); - fputs("Unsupported check; not verifying " - "file integrity\n", - stderr); - continue; - } + if (ret == XZ_UNSUPPORTED_CHECK) + { + fputs(argv[0], stderr); + fputs(": ", stderr); + fputs("Unsupported check; not verifying " + "file integrity\n", + stderr); + continue; + } #endif - if (fwrite(out, 1, b.out_pos, stdout) != b.out_pos || fclose(stdout)) - { - msg = "Write error\n"; - goto error; - } - - switch (ret) - { - case XZ_STREAM_END: - xz_dec_end(s); - return 0; - - case XZ_MEM_ERROR: - msg = "Memory allocation failed\n"; - goto error; - - case XZ_MEMLIMIT_ERROR: - msg = "Memory usage limit reached\n"; - goto error; - - case XZ_FORMAT_ERROR: - msg = "Not a .xz file\n"; - goto error; - - case XZ_OPTIONS_ERROR: - msg = "Unsupported options in the .xz headers\n"; - goto error; - - case XZ_DATA_ERROR: - case XZ_BUF_ERROR: - msg = "File is corrupt\n"; - goto error; - - default: - msg = "Bug!\n"; - goto error; - } - } + if (fwrite(out, 1, b.out_pos, stdout) != b.out_pos || fclose(stdout)) + { + msg = "Write error\n"; + goto error; + } + + switch (ret) + { + case XZ_STREAM_END: + xz_dec_end(s); + return 0; + + case XZ_MEM_ERROR: + msg = "Memory allocation failed\n"; + goto error; + + case XZ_MEMLIMIT_ERROR: + msg = "Memory usage limit reached\n"; + goto error; + + case XZ_FORMAT_ERROR: + msg = "Not a .xz file\n"; + goto error; + + case XZ_OPTIONS_ERROR: + msg = "Unsupported options in the .xz headers\n"; + goto error; + + case XZ_DATA_ERROR: + case XZ_BUF_ERROR: + msg = "File is corrupt\n"; + goto error; + + default: + msg = "Bug!\n"; + goto error; + } + } error: - xz_dec_end(s); - fputs(argv[0], stderr); - fputs(": ", stderr); - fputs(msg, stderr); - return 1; + xz_dec_end(s); + fputs(argv[0], stderr); + fputs(": ", stderr); + fputs(msg, stderr); + return 1; } |