diff options
Diffstat (limited to 'depends/xz-embedded/src')
-rw-r--r-- | depends/xz-embedded/src/xz_config.h | 35 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_crc32.c | 8 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_crc64.c | 8 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_dec_bcj.c | 148 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_dec_lzma2.c | 324 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_dec_stream.c | 207 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_lzma2.h | 8 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_private.h | 124 | ||||
-rw-r--r-- | depends/xz-embedded/src/xz_stream.h | 14 |
9 files changed, 478 insertions, 398 deletions
diff --git a/depends/xz-embedded/src/xz_config.h b/depends/xz-embedded/src/xz_config.h index eb9dac1a..40805b75 100644 --- a/depends/xz-embedded/src/xz_config.h +++ b/depends/xz-embedded/src/xz_config.h @@ -27,11 +27,11 @@ */ #ifdef _MSC_VER typedef unsigned char bool; -# define true 1 -# define false 0 -# define inline __inline +#define true 1 +#define false 0 +#define inline __inline #else -# include <stdbool.h> +#include <stdbool.h> #endif #include <stdlib.h> @@ -48,7 +48,7 @@ typedef unsigned char bool; #define memzero(buf, size) memset(buf, 0, size) #ifndef min -# define min(x, y) ((x) < (y) ? (x) : (y)) +#define min(x, y) ((x) < (y) ? (x) : (y)) #endif #define min_t(type, x, y) min(x, y) @@ -63,32 +63,27 @@ typedef unsigned char bool; * so if you want to change it, you need to #undef it first. */ #ifndef __always_inline -# ifdef __GNUC__ -# define __always_inline \ - inline __attribute__((__always_inline__)) -# else -# define __always_inline inline -# endif +#ifdef __GNUC__ +#define __always_inline inline __attribute__((__always_inline__)) +#else +#define __always_inline inline +#endif #endif /* Inline functions to access unaligned unsigned 32-bit integers */ #ifndef get_unaligned_le32 static inline uint32_t get_unaligned_le32(const uint8_t *buf) { - return (uint32_t)buf[0] - | ((uint32_t)buf[1] << 8) - | ((uint32_t)buf[2] << 16) - | ((uint32_t)buf[3] << 24); + return (uint32_t)buf[0] | ((uint32_t)buf[1] << 8) | ((uint32_t)buf[2] << 16) | + ((uint32_t)buf[3] << 24); } #endif #ifndef get_unaligned_be32 static inline uint32_t get_unaligned_be32(const uint8_t *buf) { - return (uint32_t)(buf[0] << 24) - | ((uint32_t)buf[1] << 16) - | ((uint32_t)buf[2] << 8) - | (uint32_t)buf[3]; + return (uint32_t)(buf[0] << 24) | ((uint32_t)buf[1] << 16) | ((uint32_t)buf[2] << 8) | + (uint32_t)buf[3]; } #endif @@ -118,7 +113,7 @@ static inline void put_unaligned_be32(uint32_t val, uint8_t *buf) * could save a few bytes in code size. */ #ifndef get_le32 -# define get_le32 get_unaligned_le32 +#define get_le32 get_unaligned_le32 #endif #endif diff --git a/depends/xz-embedded/src/xz_crc32.c b/depends/xz-embedded/src/xz_crc32.c index 34532d14..c412662b 100644 --- a/depends/xz-embedded/src/xz_crc32.c +++ b/depends/xz-embedded/src/xz_crc32.c @@ -22,7 +22,7 @@ * See <linux/decompress/mm.h> for details. */ #ifndef STATIC_RW_DATA -# define STATIC_RW_DATA static +#define STATIC_RW_DATA static #endif STATIC_RW_DATA uint32_t xz_crc32_table[256]; @@ -35,7 +35,8 @@ XZ_EXTERN void xz_crc32_init(void) uint32_t j; uint32_t r; - for (i = 0; i < 256; ++i) { + for (i = 0; i < 256; ++i) + { r = i; for (j = 0; j < 8; ++j) r = (r >> 1) ^ (poly & ~((r & 1) - 1)); @@ -50,7 +51,8 @@ XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) { crc = ~crc; - while (size != 0) { + while (size != 0) + { crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); --size; } diff --git a/depends/xz-embedded/src/xz_crc64.c b/depends/xz-embedded/src/xz_crc64.c index ca1caee8..4794b9d3 100644 --- a/depends/xz-embedded/src/xz_crc64.c +++ b/depends/xz-embedded/src/xz_crc64.c @@ -13,7 +13,7 @@ #include "xz_private.h" #ifndef STATIC_RW_DATA -# define STATIC_RW_DATA static +#define STATIC_RW_DATA static #endif STATIC_RW_DATA uint64_t xz_crc64_table[256]; @@ -26,7 +26,8 @@ XZ_EXTERN void xz_crc64_init(void) uint32_t j; uint64_t r; - for (i = 0; i < 256; ++i) { + for (i = 0; i < 256; ++i) + { r = i; for (j = 0; j < 8; ++j) r = (r >> 1) ^ (poly & ~((r & 1) - 1)); @@ -41,7 +42,8 @@ XZ_EXTERN uint64_t xz_crc64(const uint8_t *buf, size_t size, uint64_t crc) { crc = ~crc; - while (size != 0) { + while (size != 0) + { crc = xz_crc64_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); --size; } diff --git a/depends/xz-embedded/src/xz_dec_bcj.c b/depends/xz-embedded/src/xz_dec_bcj.c index a768e6d2..9ffda3bd 100644 --- a/depends/xz-embedded/src/xz_dec_bcj.c +++ b/depends/xz-embedded/src/xz_dec_bcj.c @@ -16,15 +16,17 @@ */ #ifdef XZ_DEC_BCJ -struct xz_dec_bcj { +struct xz_dec_bcj +{ /* Type of the BCJ filter being used */ - enum { - BCJ_X86 = 4, /* x86 or x86-64 */ - BCJ_POWERPC = 5, /* Big endian only */ - BCJ_IA64 = 6, /* Big or little endian */ - BCJ_ARM = 7, /* Little endian only */ - BCJ_ARMTHUMB = 8, /* Little endian only */ - BCJ_SPARC = 9 /* Big or little endian */ + enum + { + BCJ_X86 = 4, /* x86 or x86-64 */ + BCJ_POWERPC = 5, /* Big endian only */ + BCJ_IA64 = 6, /* Big or little endian */ + BCJ_ARM = 7, /* Little endian only */ + BCJ_ARMTHUMB = 8, /* Little endian only */ + BCJ_SPARC = 9 /* Big or little endian */ } type; /* @@ -52,7 +54,8 @@ struct xz_dec_bcj { size_t out_pos; size_t out_size; - struct { + struct + { /* Amount of already filtered data in the beginning of buf */ size_t filtered; @@ -87,13 +90,13 @@ static inline int bcj_x86_test_msbyte(uint8_t b) static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - static const bool mask_to_allowed_status[8] - = { true, true, true, false, true, false, false, false }; + static const bool mask_to_allowed_status[8] = {true, true, true, false, + true, false, false, false}; - static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 }; + static const uint8_t mask_to_bit_num[8] = {0, 1, 2, 2, 3, 3, 3, 3}; size_t i; - size_t prev_pos = (size_t)-1; + size_t prev_pos = (size_t) - 1; uint32_t prev_mask = s->x86_prev_mask; uint32_t src; uint32_t dest; @@ -104,19 +107,24 @@ static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) return 0; size -= 4; - for (i = 0; i < size; ++i) { + for (i = 0; i < size; ++i) + { if ((buf[i] & 0xFE) != 0xE8) continue; prev_pos = i - prev_pos; - if (prev_pos > 3) { + if (prev_pos > 3) + { prev_mask = 0; - } else { + } + else + { prev_mask = (prev_mask << (prev_pos - 1)) & 7; - if (prev_mask != 0) { + if (prev_mask != 0) + { b = buf[i + 4 - mask_to_bit_num[prev_mask]]; - if (!mask_to_allowed_status[prev_mask] - || bcj_x86_test_msbyte(b)) { + if (!mask_to_allowed_status[prev_mask] || bcj_x86_test_msbyte(b)) + { prev_pos = i; prev_mask = (prev_mask << 1) | 1; continue; @@ -126,9 +134,11 @@ static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) prev_pos = i; - if (bcj_x86_test_msbyte(buf[i + 4])) { + if (bcj_x86_test_msbyte(buf[i + 4])) + { src = get_unaligned_le32(buf + i + 1); - while (true) { + while (true) + { dest = src - (s->pos + (uint32_t)i + 5); if (prev_mask == 0) break; @@ -145,7 +155,9 @@ static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) dest |= (uint32_t)0 - (dest & 0x01000000); put_unaligned_le32(dest, buf + i + 1); i += 4; - } else { + } + else + { prev_mask = (prev_mask << 1) | 1; } } @@ -162,9 +174,11 @@ static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) size_t i; uint32_t instr; - for (i = 0; i + 4 <= size; i += 4) { + for (i = 0; i + 4 <= size; i += 4) + { instr = get_unaligned_be32(buf + i); - if ((instr & 0xFC000003) == 0x48000001) { + if ((instr & 0xFC000003) == 0x48000001) + { instr &= 0x03FFFFFC; instr -= s->pos + (uint32_t)i; instr &= 0x03FFFFFC; @@ -180,12 +194,8 @@ static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) #ifdef XZ_DEC_IA64 static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) { - static const uint8_t branch_table[32] = { - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 4, 4, 6, 6, 0, 0, 7, 7, - 4, 4, 0, 0, 4, 4, 0, 0 - }; + static const uint8_t branch_table[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 4, 4, 6, 6, 0, 0, 7, 7, 4, 4, 0, 0, 4, 4, 0, 0}; /* * The local variables take a little bit stack space, but it's less @@ -219,9 +229,11 @@ static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) /* Instruction normalized with bit_res for easier manipulation */ uint64_t norm; - for (i = 0; i + 16 <= size; i += 16) { + for (i = 0; i + 16 <= size; i += 16) + { mask = branch_table[buf[i] & 0x1F]; - for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) { + for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) + { if (((mask >> slot) & 1) == 0) continue; @@ -229,13 +241,12 @@ static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) bit_res = bit_pos & 7; instr = 0; for (j = 0; j < 6; ++j) - instr |= (uint64_t)(buf[i + j + byte_pos]) - << (8 * j); + instr |= (uint64_t)(buf[i + j + byte_pos]) << (8 * j); norm = instr >> bit_res; - if (((norm >> 37) & 0x0F) == 0x05 - && ((norm >> 9) & 0x07) == 0) { + if (((norm >> 37) & 0x0F) == 0x05 && ((norm >> 9) & 0x07) == 0) + { addr = (norm >> 13) & 0x0FFFFF; addr |= ((uint32_t)(norm >> 36) & 1) << 20; addr <<= 4; @@ -244,15 +255,13 @@ static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) norm &= ~((uint64_t)0x8FFFFF << 13); norm |= (uint64_t)(addr & 0x0FFFFF) << 13; - norm |= (uint64_t)(addr & 0x100000) - << (36 - 20); + norm |= (uint64_t)(addr & 0x100000) << (36 - 20); instr &= (1 << bit_res) - 1; instr |= norm << bit_res; for (j = 0; j < 6; j++) - buf[i + j + byte_pos] - = (uint8_t)(instr >> (8 * j)); + buf[i + j + byte_pos] = (uint8_t)(instr >> (8 * j)); } } } @@ -267,10 +276,12 @@ static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size) size_t i; uint32_t addr; - for (i = 0; i + 4 <= size; i += 4) { - if (buf[i + 3] == 0xEB) { - addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) - | ((uint32_t)buf[i + 2] << 16); + for (i = 0; i + 4 <= size; i += 4) + { + if (buf[i + 3] == 0xEB) + { + addr = + (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) | ((uint32_t)buf[i + 2] << 16); addr <<= 2; addr -= s->pos + (uint32_t)i + 8; addr >>= 2; @@ -290,13 +301,12 @@ static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size) size_t i; uint32_t addr; - for (i = 0; i + 4 <= size; i += 2) { - if ((buf[i + 1] & 0xF8) == 0xF0 - && (buf[i + 3] & 0xF8) == 0xF8) { - addr = (((uint32_t)buf[i + 1] & 0x07) << 19) - | ((uint32_t)buf[i] << 11) - | (((uint32_t)buf[i + 3] & 0x07) << 8) - | (uint32_t)buf[i + 2]; + for (i = 0; i + 4 <= size; i += 2) + { + if ((buf[i + 1] & 0xF8) == 0xF0 && (buf[i + 3] & 0xF8) == 0xF8) + { + addr = (((uint32_t)buf[i + 1] & 0x07) << 19) | ((uint32_t)buf[i] << 11) | + (((uint32_t)buf[i + 3] & 0x07) << 8) | (uint32_t)buf[i + 2]; addr <<= 1; addr -= s->pos + (uint32_t)i + 4; addr >>= 1; @@ -318,14 +328,16 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) size_t i; uint32_t instr; - for (i = 0; i + 4 <= size; i += 4) { + for (i = 0; i + 4 <= size; i += 4) + { instr = get_unaligned_be32(buf + i); - if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) { + if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) + { instr <<= 2; instr -= s->pos + (uint32_t)i; instr >>= 2; - instr = ((uint32_t)0x40000000 - (instr & 0x400000)) - | 0x40000000 | (instr & 0x3FFFFF); + instr = + ((uint32_t)0x40000000 - (instr & 0x400000)) | 0x40000000 | (instr & 0x3FFFFF); put_unaligned_be32(instr, buf + i); } } @@ -342,15 +354,15 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) * pointers, which could be problematic in the kernel boot code, which must * avoid pointers to static data (at least on x86). */ -static void bcj_apply(struct xz_dec_bcj *s, - uint8_t *buf, size_t *pos, size_t size) +static void bcj_apply(struct xz_dec_bcj *s, uint8_t *buf, size_t *pos, size_t size) { size_t filtered; buf += *pos; size -= *pos; - switch (s->type) { + switch (s->type) + { #ifdef XZ_DEC_X86 case BCJ_X86: filtered = bcj_x86(s, buf, size); @@ -414,9 +426,8 @@ static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) * data in chunks of 1-16 bytes. To hide this issue, this function does * some buffering. */ -XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, - struct xz_dec_lzma2 *lzma2, - struct xz_buf *b) +XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2, + struct xz_buf *b) { size_t out_start; @@ -425,7 +436,8 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, * immediatelly if we couldn't flush everything, or if the next * filter in the chain had already returned XZ_STREAM_END. */ - if (s->temp.filtered > 0) { + if (s->temp.filtered > 0) + { bcj_flush(s, b); if (s->temp.filtered > 0) return XZ_OK; @@ -446,14 +458,14 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, * case where the output buffer is full and the next filter has no * more output coming but hasn't returned XZ_STREAM_END yet. */ - if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { + if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) + { out_start = b->out_pos; memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); b->out_pos += s->temp.size; s->ret = xz_dec_lzma2_run(lzma2, b); - if (s->ret != XZ_STREAM_END - && (s->ret != XZ_OK || s->single_call)) + if (s->ret != XZ_STREAM_END && (s->ret != XZ_OK || s->single_call)) return s->ret; bcj_apply(s, b->out, &out_start, b->out_pos); @@ -487,7 +499,8 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, * A mix of filtered and unfiltered data may be left in temp; it will * be taken care on the next call to this function. */ - if (b->out_pos < b->out_size) { + if (b->out_pos < b->out_size) + { /* Make b->out{,_pos,_size} temporarily point to s->temp. */ s->out = b->out; s->out_pos = b->out_pos; @@ -535,7 +548,8 @@ XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call) XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id) { - switch (id) { + switch (id) + { #ifdef XZ_DEC_X86 case BCJ_X86: #endif diff --git a/depends/xz-embedded/src/xz_dec_lzma2.c b/depends/xz-embedded/src/xz_dec_lzma2.c index a6cdc969..3d7b9a2e 100644 --- a/depends/xz-embedded/src/xz_dec_lzma2.c +++ b/depends/xz-embedded/src/xz_dec_lzma2.c @@ -41,7 +41,8 @@ * in which the dictionary variables address the actual output * buffer directly. */ -struct dictionary { +struct dictionary +{ /* Beginning of the history buffer */ uint8_t *buf; @@ -92,7 +93,8 @@ struct dictionary { }; /* Range decoder */ -struct rc_dec { +struct rc_dec +{ uint32_t range; uint32_t code; @@ -112,7 +114,8 @@ struct rc_dec { }; /* Probabilities for a length decoder. */ -struct lzma_len_dec { +struct lzma_len_dec +{ /* Probability of match length being at least 10 */ uint16_t choice; @@ -129,7 +132,8 @@ struct lzma_len_dec { uint16_t high[LEN_HIGH_SYMBOLS]; }; -struct lzma_dec { +struct lzma_dec +{ /* Distances of latest four matches */ uint32_t rep0; uint32_t rep1; @@ -153,7 +157,7 @@ struct lzma_dec { */ uint32_t lc; uint32_t literal_pos_mask; /* (1 << lp) - 1 */ - uint32_t pos_mask; /* (1 << pb) - 1 */ + uint32_t pos_mask; /* (1 << pb) - 1 */ /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ uint16_t is_match[STATES][POS_STATES_MAX]; @@ -211,9 +215,11 @@ struct lzma_dec { uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; }; -struct lzma2_dec { +struct lzma2_dec +{ /* Position in xz_dec_lzma2_run(). */ - enum lzma2_seq { + enum lzma2_seq + { SEQ_CONTROL, SEQ_UNCOMPRESSED_1, SEQ_UNCOMPRESSED_2, @@ -250,7 +256,8 @@ struct lzma2_dec { bool need_props; }; -struct xz_dec_lzma2 { +struct xz_dec_lzma2 +{ /* * The order below is important on x86 to reduce code size and * it shouldn't hurt on other platforms. Everything up to and @@ -269,7 +276,8 @@ struct xz_dec_lzma2 { * Temporary buffer which holds small number of input bytes between * decoder calls. See lzma2_lzma() for details. */ - struct { + struct + { uint32_t size; uint8_t buf[3 * LZMA_IN_REQUIRED]; } temp; @@ -285,7 +293,8 @@ struct xz_dec_lzma2 { */ static void dict_reset(struct dictionary *dict, struct xz_buf *b) { - if (DEC_IS_SINGLE(dict->mode)) { + if (DEC_IS_SINGLE(dict->mode)) + { dict->buf = b->out + b->out_pos; dict->end = b->out_size - b->out_pos; } @@ -358,7 +367,8 @@ static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) if (dist >= dict->pos) back += dict->end; - do { + do + { dict->buf[dict->pos++] = dict->buf[back++]; if (back == dict->end) back = 0; @@ -371,15 +381,13 @@ static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) } /* Copy uncompressed data as is from input to dictionary and output buffers. */ -static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, - uint32_t *left) +static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, uint32_t *left) { size_t copy_size; - while (*left > 0 && b->in_pos < b->in_size - && b->out_pos < b->out_size) { - copy_size = min(b->in_size - b->in_pos, - b->out_size - b->out_pos); + while (*left > 0 && b->in_pos < b->in_size && b->out_pos < b->out_size) + { + copy_size = min(b->in_size - b->in_pos, b->out_size - b->out_pos); if (copy_size > dict->end - dict->pos) copy_size = dict->end - dict->pos; if (copy_size > *left) @@ -393,12 +401,12 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, if (dict->full < dict->pos) dict->full = dict->pos; - if (DEC_IS_MULTI(dict->mode)) { + if (DEC_IS_MULTI(dict->mode)) + { if (dict->pos == dict->end) dict->pos = 0; - memcpy(b->out + b->out_pos, b->in + b->in_pos, - copy_size); + memcpy(b->out + b->out_pos, b->in + b->in_pos, copy_size); } dict->start = dict->pos; @@ -417,12 +425,12 @@ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) { size_t copy_size = dict->pos - dict->start; - if (DEC_IS_MULTI(dict->mode)) { + if (DEC_IS_MULTI(dict->mode)) + { if (dict->pos == dict->end) dict->pos = 0; - memcpy(b->out + b->out_pos, dict->buf + dict->start, - copy_size); + memcpy(b->out + b->out_pos, dict->buf + dict->start, copy_size); } dict->start = dict->pos; @@ -437,7 +445,7 @@ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) /* Reset the range decoder. */ static void rc_reset(struct rc_dec *rc) { - rc->range = (uint32_t)-1; + rc->range = (uint32_t) - 1; rc->code = 0; rc->init_bytes_left = RC_INIT_BYTES; } @@ -448,7 +456,8 @@ static void rc_reset(struct rc_dec *rc) */ static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b) { - while (rc->init_bytes_left > 0) { + while (rc->init_bytes_left > 0) + { if (b->in_pos == b->in_size) return false; @@ -477,7 +486,8 @@ static inline bool rc_is_finished(const struct rc_dec *rc) /* Read the next input byte if needed. */ static __always_inline void rc_normalize(struct rc_dec *rc) { - if (rc->range < RC_TOP_VALUE) { + if (rc->range < RC_TOP_VALUE) + { rc->range <<= RC_SHIFT_BITS; rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; } @@ -501,11 +511,14 @@ static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) rc_normalize(rc); bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; - if (rc->code < bound) { + if (rc->code < bound) + { rc->range = bound; *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; bit = 0; - } else { + } + else + { rc->range -= bound; rc->code -= bound; *prob -= *prob >> RC_MOVE_BITS; @@ -516,12 +529,12 @@ static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) } /* Decode a bittree starting from the most significant bit. */ -static __always_inline uint32_t rc_bittree(struct rc_dec *rc, - uint16_t *probs, uint32_t limit) +static __always_inline uint32_t rc_bittree(struct rc_dec *rc, uint16_t *probs, uint32_t limit) { uint32_t symbol = 1; - do { + do + { if (rc_bit(rc, &probs[symbol])) symbol = (symbol << 1) + 1; else @@ -532,18 +545,21 @@ static __always_inline uint32_t rc_bittree(struct rc_dec *rc, } /* Decode a bittree starting from the least significant bit. */ -static __always_inline void rc_bittree_reverse(struct rc_dec *rc, - uint16_t *probs, - uint32_t *dest, uint32_t limit) +static __always_inline void rc_bittree_reverse(struct rc_dec *rc, uint16_t *probs, + uint32_t *dest, uint32_t limit) { uint32_t symbol = 1; uint32_t i = 0; - do { - if (rc_bit(rc, &probs[symbol])) { + do + { + if (rc_bit(rc, &probs[symbol])) + { symbol = (symbol << 1) + 1; *dest += 1 << i; - } else { + } + else + { symbol <<= 1; } } while (++i < limit); @@ -554,7 +570,8 @@ static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) { uint32_t mask; - do { + do + { rc_normalize(rc); rc->range >>= 1; rc->code -= rc->range; @@ -589,22 +606,29 @@ static void lzma_literal(struct xz_dec_lzma2 *s) probs = lzma_literal_probs(s); - if (lzma_state_is_literal(s->lzma.state)) { + if (lzma_state_is_literal(s->lzma.state)) + { symbol = rc_bittree(&s->rc, probs, 0x100); - } else { + } + else + { symbol = 1; match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; offset = 0x100; - do { + do + { match_bit = match_byte & offset; match_byte <<= 1; i = offset + match_bit + symbol; - if (rc_bit(&s->rc, &probs[i])) { + if (rc_bit(&s->rc, &probs[i])) + { symbol = (symbol << 1) + 1; offset &= match_bit; - } else { + } + else + { symbol <<= 1; offset &= ~match_bit; } @@ -616,26 +640,30 @@ static void lzma_literal(struct xz_dec_lzma2 *s) } /* Decode the length of the match into s->lzma.len. */ -static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, - uint32_t pos_state) +static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, uint32_t pos_state) { uint16_t *probs; uint32_t limit; - if (!rc_bit(&s->rc, &l->choice)) { + if (!rc_bit(&s->rc, &l->choice)) + { probs = l->low[pos_state]; limit = LEN_LOW_SYMBOLS; s->lzma.len = MATCH_LEN_MIN; - } else { - if (!rc_bit(&s->rc, &l->choice2)) { + } + else + { + if (!rc_bit(&s->rc, &l->choice2)) + { probs = l->mid[pos_state]; limit = LEN_MID_SYMBOLS; s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; - } else { + } + else + { probs = l->high; limit = LEN_HIGH_SYMBOLS; - s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS - + LEN_MID_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; } } @@ -660,23 +688,26 @@ static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; - if (dist_slot < DIST_MODEL_START) { + if (dist_slot < DIST_MODEL_START) + { s->lzma.rep0 = dist_slot; - } else { + } + else + { limit = (dist_slot >> 1) - 1; s->lzma.rep0 = 2 + (dist_slot & 1); - if (dist_slot < DIST_MODEL_END) { + if (dist_slot < DIST_MODEL_END) + { s->lzma.rep0 <<= limit; - probs = s->lzma.dist_special + s->lzma.rep0 - - dist_slot - 1; - rc_bittree_reverse(&s->rc, probs, - &s->lzma.rep0, limit); - } else { + probs = s->lzma.dist_special + s->lzma.rep0 - dist_slot - 1; + rc_bittree_reverse(&s->rc, probs, &s->lzma.rep0, limit); + } + else + { rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); s->lzma.rep0 <<= ALIGN_BITS; - rc_bittree_reverse(&s->rc, s->lzma.dist_align, - &s->lzma.rep0, ALIGN_BITS); + rc_bittree_reverse(&s->rc, s->lzma.dist_align, &s->lzma.rep0, ALIGN_BITS); } } } @@ -689,20 +720,29 @@ static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) { uint32_t tmp; - if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { - if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ - s->lzma.state][pos_state])) { + if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) + { + if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[s->lzma.state][pos_state])) + { lzma_state_short_rep(&s->lzma.state); s->lzma.len = 1; return; } - } else { - if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { + } + else + { + if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) + { tmp = s->lzma.rep1; - } else { - if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { + } + else + { + if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) + { tmp = s->lzma.rep2; - } else { + } + else + { tmp = s->lzma.rep3; s->lzma.rep3 = s->lzma.rep2; } @@ -734,13 +774,16 @@ static bool lzma_main(struct xz_dec_lzma2 *s) * Decode more LZMA symbols. One iteration may consume up to * LZMA_IN_REQUIRED - 1 bytes. */ - while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { + while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) + { pos_state = s->dict.pos & s->lzma.pos_mask; - if (!rc_bit(&s->rc, &s->lzma.is_match[ - s->lzma.state][pos_state])) { + if (!rc_bit(&s->rc, &s->lzma.is_match[s->lzma.state][pos_state])) + { lzma_literal(s); - } else { + } + else + { if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) lzma_rep_match(s, pos_state); else @@ -802,7 +845,8 @@ static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) return false; s->lzma.pos_mask = 0; - while (props >= 9 * 5) { + while (props >= 9 * 5) + { props -= 9 * 5; ++s->lzma.pos_mask; } @@ -810,7 +854,8 @@ static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; s->lzma.literal_pos_mask = 0; - while (props >= 9) { + while (props >= 9) + { props -= 9; ++s->lzma.literal_pos_mask; } @@ -849,7 +894,8 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) uint32_t tmp; in_avail = b->in_size - b->in_pos; - if (s->temp.size > 0 || s->lzma2.compressed == 0) { + if (s->temp.size > 0 || s->lzma2.compressed == 0) + { tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; if (tmp > s->lzma2.compressed - s->temp.size) tmp = s->lzma2.compressed - s->temp.size; @@ -858,16 +904,19 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); - if (s->temp.size + tmp == s->lzma2.compressed) { - memzero(s->temp.buf + s->temp.size + tmp, - sizeof(s->temp.buf) - - s->temp.size - tmp); + if (s->temp.size + tmp == s->lzma2.compressed) + { + memzero(s->temp.buf + s->temp.size + tmp, sizeof(s->temp.buf) - s->temp.size - tmp); s->rc.in_limit = s->temp.size + tmp; - } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { + } + else if (s->temp.size + tmp < LZMA_IN_REQUIRED) + { s->temp.size += tmp; b->in_pos += tmp; return true; - } else { + } + else + { s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; } @@ -879,10 +928,10 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) s->lzma2.compressed -= s->rc.in_pos; - if (s->rc.in_pos < s->temp.size) { + if (s->rc.in_pos < s->temp.size) + { s->temp.size -= s->rc.in_pos; - memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, - s->temp.size); + memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, s->temp.size); return true; } @@ -891,7 +940,8 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) } in_avail = b->in_size - b->in_pos; - if (in_avail >= LZMA_IN_REQUIRED) { + if (in_avail >= LZMA_IN_REQUIRED) + { s->rc.in = b->in; s->rc.in_pos = b->in_pos; @@ -912,7 +962,8 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) } in_avail = b->in_size - b->in_pos; - if (in_avail < LZMA_IN_REQUIRED) { + if (in_avail < LZMA_IN_REQUIRED) + { if (in_avail > s->lzma2.compressed) in_avail = s->lzma2.compressed; @@ -928,13 +979,14 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) * Take care of the LZMA2 control layer, and forward the job of actual LZMA * decoding or copying of uncompressed chunks to other functions. */ -XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, - struct xz_buf *b) +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b) { uint32_t tmp; - while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { - switch (s->lzma2.sequence) { + while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) + { + switch (s->lzma2.sequence) + { case SEQ_CONTROL: /* * LZMA2 control byte @@ -972,38 +1024,45 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, if (tmp == 0x00) return XZ_STREAM_END; - if (tmp >= 0xE0 || tmp == 0x01) { + if (tmp >= 0xE0 || tmp == 0x01) + { s->lzma2.need_props = true; s->lzma2.need_dict_reset = false; dict_reset(&s->dict, b); - } else if (s->lzma2.need_dict_reset) { + } + else if (s->lzma2.need_dict_reset) + { return XZ_DATA_ERROR; } - if (tmp >= 0x80) { + if (tmp >= 0x80) + { s->lzma2.uncompressed = (tmp & 0x1F) << 16; s->lzma2.sequence = SEQ_UNCOMPRESSED_1; - if (tmp >= 0xC0) { + if (tmp >= 0xC0) + { /* * When there are new properties, * state reset is done at * SEQ_PROPERTIES. */ s->lzma2.need_props = false; - s->lzma2.next_sequence - = SEQ_PROPERTIES; - - } else if (s->lzma2.need_props) { + s->lzma2.next_sequence = SEQ_PROPERTIES; + } + else if (s->lzma2.need_props) + { return XZ_DATA_ERROR; - - } else { - s->lzma2.next_sequence - = SEQ_LZMA_PREPARE; + } + else + { + s->lzma2.next_sequence = SEQ_LZMA_PREPARE; if (tmp >= 0xA0) lzma_reset(s); } - } else { + } + else + { if (tmp > 0x02) return XZ_DATA_ERROR; @@ -1014,26 +1073,22 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, break; case SEQ_UNCOMPRESSED_1: - s->lzma2.uncompressed - += (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] << 8; s->lzma2.sequence = SEQ_UNCOMPRESSED_2; break; case SEQ_UNCOMPRESSED_2: - s->lzma2.uncompressed - += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.uncompressed += (uint32_t)b->in[b->in_pos++] + 1; s->lzma2.sequence = SEQ_COMPRESSED_0; break; case SEQ_COMPRESSED_0: - s->lzma2.compressed - = (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.compressed = (uint32_t)b->in[b->in_pos++] << 8; s->lzma2.sequence = SEQ_COMPRESSED_1; break; case SEQ_COMPRESSED_1: - s->lzma2.compressed - += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.compressed += (uint32_t)b->in[b->in_pos++] + 1; s->lzma2.sequence = s->lzma2.next_sequence; break; @@ -1063,26 +1118,24 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, * the output buffer yet, we may run this loop * multiple times without changing s->lzma2.sequence. */ - dict_limit(&s->dict, min_t(size_t, - b->out_size - b->out_pos, - s->lzma2.uncompressed)); + dict_limit(&s->dict, + min_t(size_t, b->out_size - b->out_pos, s->lzma2.uncompressed)); if (!lzma2_lzma(s, b)) return XZ_DATA_ERROR; s->lzma2.uncompressed -= dict_flush(&s->dict, b); - if (s->lzma2.uncompressed == 0) { - if (s->lzma2.compressed > 0 || s->lzma.len > 0 - || !rc_is_finished(&s->rc)) + if (s->lzma2.uncompressed == 0) + { + if (s->lzma2.compressed > 0 || s->lzma.len > 0 || !rc_is_finished(&s->rc)) return XZ_DATA_ERROR; rc_reset(&s->rc); s->lzma2.sequence = SEQ_CONTROL; - - } else if (b->out_pos == b->out_size - || (b->in_pos == b->in_size - && s->temp.size - < s->lzma2.compressed)) { + } + else if (b->out_pos == b->out_size || + (b->in_pos == b->in_size && s->temp.size < s->lzma2.compressed)) + { return XZ_OK; } @@ -1101,8 +1154,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, return XZ_OK; } -XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, - uint32_t dict_max) +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max) { struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) @@ -1111,13 +1163,17 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, s->dict.mode = mode; s->dict.size_max = dict_max; - if (DEC_IS_PREALLOC(mode)) { + if (DEC_IS_PREALLOC(mode)) + { s->dict.buf = vmalloc(dict_max); - if (s->dict.buf == NULL) { + if (s->dict.buf == NULL) + { kfree(s); return NULL; } - } else if (DEC_IS_DYNALLOC(mode)) { + } + else if (DEC_IS_DYNALLOC(mode)) + { s->dict.buf = NULL; s->dict.allocated = 0; } @@ -1134,17 +1190,21 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) s->dict.size = 2 + (props & 1); s->dict.size <<= (props >> 1) + 11; - if (DEC_IS_MULTI(s->dict.mode)) { + if (DEC_IS_MULTI(s->dict.mode)) + { if (s->dict.size > s->dict.size_max) return XZ_MEMLIMIT_ERROR; s->dict.end = s->dict.size; - if (DEC_IS_DYNALLOC(s->dict.mode)) { - if (s->dict.allocated < s->dict.size) { + if (DEC_IS_DYNALLOC(s->dict.mode)) + { + if (s->dict.allocated < s->dict.size) + { vfree(s->dict.buf); s->dict.buf = vmalloc(s->dict.size); - if (s->dict.buf == NULL) { + if (s->dict.buf == NULL) + { s->dict.allocated = 0; return XZ_MEM_ERROR; } diff --git a/depends/xz-embedded/src/xz_dec_stream.c b/depends/xz-embedded/src/xz_dec_stream.c index d6525506..6e935ded 100644 --- a/depends/xz-embedded/src/xz_dec_stream.c +++ b/depends/xz-embedded/src/xz_dec_stream.c @@ -11,21 +11,24 @@ #include "xz_stream.h" #ifdef XZ_USE_CRC64 -# define IS_CRC64(check_type) ((check_type) == XZ_CHECK_CRC64) +#define IS_CRC64(check_type) ((check_type) == XZ_CHECK_CRC64) #else -# define IS_CRC64(check_type) false +#define IS_CRC64(check_type) false #endif /* Hash used to validate the Index field */ -struct xz_dec_hash { +struct xz_dec_hash +{ vli_type unpadded; vli_type uncompressed; uint32_t crc32; }; -struct xz_dec { +struct xz_dec +{ /* Position in dec_main() */ - enum { + enum + { SEQ_STREAM_HEADER, SEQ_BLOCK_START, SEQ_BLOCK_HEADER, @@ -69,7 +72,8 @@ struct xz_dec { bool allow_buf_error; /* Information stored in Block Header */ - struct { + struct + { /* * Value stored in the Compressed Size field, or * VLI_UNKNOWN if Compressed Size is not present. @@ -87,7 +91,8 @@ struct xz_dec { } block_header; /* Information collected when decoding Blocks */ - struct { + struct + { /* Observed compressed size of the current Block */ vli_type compressed; @@ -105,9 +110,11 @@ struct xz_dec { } block; /* Variables needed when verifying the Index field */ - struct { + struct + { /* Position in dec_index() */ - enum { + enum + { SEQ_INDEX_COUNT, SEQ_INDEX_UNPADDED, SEQ_INDEX_UNCOMPRESSED @@ -133,7 +140,8 @@ struct xz_dec { * to a multiple of four bytes; the size_t variables before it * should guarantee this. */ - struct { + struct + { size_t pos; size_t size; uint8_t buf[1024]; @@ -149,14 +157,8 @@ struct xz_dec { #ifdef XZ_DEC_ANY_CHECK /* Sizes of the Check field with different Check IDs */ -static const uint8_t check_sizes[16] = { - 0, - 4, 4, 4, - 8, 8, 8, - 16, 16, 16, - 32, 32, 32, - 64, 64, 64 -}; +static const uint8_t check_sizes[16] = {0, 4, 4, 4, 8, 8, 8, 16, + 16, 16, 32, 32, 32, 64, 64, 64}; #endif /* @@ -167,14 +169,14 @@ static const uint8_t check_sizes[16] = { */ static bool fill_temp(struct xz_dec *s, struct xz_buf *b) { - size_t copy_size = min_t(size_t, - b->in_size - b->in_pos, s->temp.size - s->temp.pos); + size_t copy_size = min_t(size_t, b->in_size - b->in_pos, s->temp.size - s->temp.pos); memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); b->in_pos += copy_size; s->temp.pos += copy_size; - if (s->temp.pos == s->temp.size) { + if (s->temp.pos == s->temp.size) + { s->temp.pos = 0; return true; } @@ -183,21 +185,22 @@ static bool fill_temp(struct xz_dec *s, struct xz_buf *b) } /* Decode a variable-length integer (little-endian base-128 encoding) */ -static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, - size_t *in_pos, size_t in_size) +static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, size_t *in_pos, size_t in_size) { uint8_t byte; if (s->pos == 0) s->vli = 0; - while (*in_pos < in_size) { + while (*in_pos < in_size) + { byte = in[*in_pos]; ++*in_pos; s->vli |= (vli_type)(byte & 0x7F) << s->pos; - if ((byte & 0x80) == 0) { + if ((byte & 0x80) == 0) + { /* Don't allow non-minimal encodings. */ if (byte == 0 && s->pos != 0) return XZ_DATA_ERROR; @@ -247,33 +250,28 @@ static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) * There is no need to separately check for VLI_UNKNOWN, since * the observed sizes are always smaller than VLI_UNKNOWN. */ - if (s->block.compressed > s->block_header.compressed - || s->block.uncompressed - > s->block_header.uncompressed) + if (s->block.compressed > s->block_header.compressed || + s->block.uncompressed > s->block_header.uncompressed) return XZ_DATA_ERROR; if (s->check_type == XZ_CHECK_CRC32) - s->crc = xz_crc32(b->out + s->out_start, - b->out_pos - s->out_start, s->crc); + s->crc = xz_crc32(b->out + s->out_start, b->out_pos - s->out_start, s->crc); #ifdef XZ_USE_CRC64 else if (s->check_type == XZ_CHECK_CRC64) - s->crc = xz_crc64(b->out + s->out_start, - b->out_pos - s->out_start, s->crc); + s->crc = xz_crc64(b->out + s->out_start, b->out_pos - s->out_start, s->crc); #endif - if (ret == XZ_STREAM_END) { - if (s->block_header.compressed != VLI_UNKNOWN - && s->block_header.compressed - != s->block.compressed) + if (ret == XZ_STREAM_END) + { + if (s->block_header.compressed != VLI_UNKNOWN && + s->block_header.compressed != s->block.compressed) return XZ_DATA_ERROR; - if (s->block_header.uncompressed != VLI_UNKNOWN - && s->block_header.uncompressed - != s->block.uncompressed) + if (s->block_header.uncompressed != VLI_UNKNOWN && + s->block_header.uncompressed != s->block.uncompressed) return XZ_DATA_ERROR; - s->block.hash.unpadded += s->block_header.size - + s->block.compressed; + s->block.hash.unpadded += s->block_header.size + s->block.compressed; #ifdef XZ_DEC_ANY_CHECK s->block.hash.unpadded += check_sizes[s->check_type]; @@ -285,9 +283,8 @@ static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) #endif s->block.hash.uncompressed += s->block.uncompressed; - s->block.hash.crc32 = xz_crc32( - (const uint8_t *)&s->block.hash, - sizeof(s->block.hash), s->block.hash.crc32); + s->block.hash.crc32 = xz_crc32((const uint8_t *)&s->block.hash, sizeof(s->block.hash), + s->block.hash.crc32); ++s->block.count; } @@ -315,14 +312,17 @@ static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) { enum xz_ret ret; - do { + do + { ret = dec_vli(s, b->in, &b->in_pos, b->in_size); - if (ret != XZ_STREAM_END) { + if (ret != XZ_STREAM_END) + { index_update(s, b); return ret; } - switch (s->index.sequence) { + switch (s->index.sequence) + { case SEQ_INDEX_COUNT: s->index.count = s->vli; @@ -344,10 +344,8 @@ static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) case SEQ_INDEX_UNCOMPRESSED: s->index.hash.uncompressed += s->vli; - s->index.hash.crc32 = xz_crc32( - (const uint8_t *)&s->index.hash, - sizeof(s->index.hash), - s->index.hash.crc32); + s->index.hash.crc32 = xz_crc32((const uint8_t *)&s->index.hash, + sizeof(s->index.hash), s->index.hash.crc32); --s->index.count; s->index.sequence = SEQ_INDEX_UNPADDED; break; @@ -362,10 +360,10 @@ static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) * of s->crc. s->pos must be zero when starting to validate the first byte. * The "bits" argument allows using the same code for both CRC32 and CRC64. */ -static enum xz_ret crc_validate(struct xz_dec *s, struct xz_buf *b, - uint32_t bits) +static enum xz_ret crc_validate(struct xz_dec *s, struct xz_buf *b, uint32_t bits) { - do { + do + { if (b->in_pos == b->in_size) return XZ_OK; @@ -389,7 +387,8 @@ static enum xz_ret crc_validate(struct xz_dec *s, struct xz_buf *b, */ static bool check_skip(struct xz_dec *s, struct xz_buf *b) { - while (s->pos < check_sizes[s->check_type]) { + while (s->pos < check_sizes[s->check_type]) + { if (b->in_pos == b->in_size) return false; @@ -409,8 +408,8 @@ static enum xz_ret dec_stream_header(struct xz_dec *s) if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) return XZ_FORMAT_ERROR; - if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) - != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) + if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) != + get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) return XZ_DATA_ERROR; if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) @@ -476,49 +475,53 @@ static enum xz_ret dec_block_header(struct xz_dec *s) * eight bytes so this is safe. */ s->temp.size -= 4; - if (xz_crc32(s->temp.buf, s->temp.size, 0) - != get_le32(s->temp.buf + s->temp.size)) + if (xz_crc32(s->temp.buf, s->temp.size, 0) != get_le32(s->temp.buf + s->temp.size)) return XZ_DATA_ERROR; s->temp.pos = 2; - /* - * Catch unsupported Block Flags. We support only one or two filters - * in the chain, so we catch that with the same test. - */ +/* + * Catch unsupported Block Flags. We support only one or two filters + * in the chain, so we catch that with the same test. + */ #ifdef XZ_DEC_BCJ if (s->temp.buf[1] & 0x3E) #else - if (s->temp.buf[1] & 0x3F) + if (s->temp.buf[1] & 0x3F) #endif - return XZ_OPTIONS_ERROR; + return XZ_OPTIONS_ERROR; /* Compressed Size */ - if (s->temp.buf[1] & 0x40) { - if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) - != XZ_STREAM_END) + if (s->temp.buf[1] & 0x40) + { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) return XZ_DATA_ERROR; s->block_header.compressed = s->vli; - } else { + } + else + { s->block_header.compressed = VLI_UNKNOWN; } /* Uncompressed Size */ - if (s->temp.buf[1] & 0x80) { - if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) - != XZ_STREAM_END) + if (s->temp.buf[1] & 0x80) + { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) return XZ_DATA_ERROR; s->block_header.uncompressed = s->vli; - } else { + } + else + { s->block_header.uncompressed = VLI_UNKNOWN; } #ifdef XZ_DEC_BCJ /* If there are two filters, the first one must be a BCJ filter. */ s->bcj_active = s->temp.buf[1] & 0x01; - if (s->bcj_active) { + if (s->bcj_active) + { if (s->temp.size - s->temp.pos < 2) return XZ_OPTIONS_ERROR; @@ -577,8 +580,10 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) */ s->in_start = b->in_pos; - while (true) { - switch (s->sequence) { + while (true) + { + switch (s->sequence) + { case SEQ_STREAM_HEADER: /* * Stream Header is copied to s->temp, and then @@ -610,7 +615,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) return XZ_OK; /* See if this is the beginning of the Index field. */ - if (b->in[b->in_pos] == 0) { + if (b->in[b->in_pos] == 0) + { s->in_start = b->in_pos++; s->sequence = SEQ_INDEX; break; @@ -620,8 +626,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) * Calculate the size of the Block Header and * prepare to decode it. */ - s->block_header.size - = ((uint32_t)b->in[b->in_pos] + 1) * 4; + s->block_header.size = ((uint32_t)b->in[b->in_pos] + 1) * 4; s->temp.size = s->block_header.size; s->temp.pos = 0; @@ -652,7 +657,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) * anymore, so we use it here to test the size * of the Block Padding field. */ - while (s->block.compressed & 3) { + while (s->block.compressed & 3) + { if (b->in_pos == b->in_size) return XZ_OK; @@ -665,18 +671,21 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_BLOCK_CHECK; case SEQ_BLOCK_CHECK: - if (s->check_type == XZ_CHECK_CRC32) { + if (s->check_type == XZ_CHECK_CRC32) + { ret = crc_validate(s, b, 32); if (ret != XZ_STREAM_END) return ret; } - else if (IS_CRC64(s->check_type)) { + else if (IS_CRC64(s->check_type)) + { ret = crc_validate(s, b, 64); if (ret != XZ_STREAM_END) return ret; } #ifdef XZ_DEC_ANY_CHECK - else if (!check_skip(s, b)) { + else if (!check_skip(s, b)) + { return XZ_OK; } #endif @@ -692,9 +701,10 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_INDEX_PADDING; case SEQ_INDEX_PADDING: - while ((s->index.size + (b->in_pos - s->in_start)) - & 3) { - if (b->in_pos == b->in_size) { + while ((s->index.size + (b->in_pos - s->in_start)) & 3) + { + if (b->in_pos == b->in_size) + { index_update(s, b); return XZ_OK; } @@ -707,8 +717,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) index_update(s, b); /* Compare the hashes to validate the Index field. */ - if (!memeq(&s->block.hash, &s->index.hash, - sizeof(s->block.hash))) + if (!memeq(&s->block.hash, &s->index.hash, sizeof(s->block.hash))) return XZ_DATA_ERROR; s->sequence = SEQ_INDEX_CRC32; @@ -770,23 +779,26 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) out_start = b->out_pos; ret = dec_main(s, b); - if (DEC_IS_SINGLE(s->mode)) { + if (DEC_IS_SINGLE(s->mode)) + { if (ret == XZ_OK) - ret = b->in_pos == b->in_size - ? XZ_DATA_ERROR : XZ_BUF_ERROR; + ret = b->in_pos == b->in_size ? XZ_DATA_ERROR : XZ_BUF_ERROR; - if (ret != XZ_STREAM_END) { + if (ret != XZ_STREAM_END) + { b->in_pos = in_start; b->out_pos = out_start; } - - } else if (ret == XZ_OK && in_start == b->in_pos - && out_start == b->out_pos) { + } + else if (ret == XZ_OK && in_start == b->in_pos && out_start == b->out_pos) + { if (s->allow_buf_error) ret = XZ_BUF_ERROR; s->allow_buf_error = true; - } else { + } + else + { s->allow_buf_error = false; } @@ -837,7 +849,8 @@ XZ_EXTERN void xz_dec_reset(struct xz_dec *s) XZ_EXTERN void xz_dec_end(struct xz_dec *s) { - if (s != NULL) { + if (s != NULL) + { xz_dec_lzma2_end(s->lzma2); #ifdef XZ_DEC_BCJ xz_dec_bcj_end(s->bcj); diff --git a/depends/xz-embedded/src/xz_lzma2.h b/depends/xz-embedded/src/xz_lzma2.h index 071d67be..3976033a 100644 --- a/depends/xz-embedded/src/xz_lzma2.h +++ b/depends/xz-embedded/src/xz_lzma2.h @@ -39,7 +39,8 @@ * The symbol names are in from STATE_oldest_older_previous. REP means * either short or long repeated match, and NONLIT means any non-literal. */ -enum lzma_state { +enum lzma_state +{ STATE_LIT_LIT, STATE_MATCH_LIT_LIT, STATE_REP_LIT_LIT, @@ -146,8 +147,7 @@ static inline bool lzma_state_is_literal(enum lzma_state state) */ static inline uint32_t lzma_get_dist_state(uint32_t len) { - return len < DIST_STATES + MATCH_LEN_MIN - ? len - MATCH_LEN_MIN : DIST_STATES - 1; + return len < DIST_STATES + MATCH_LEN_MIN ? len - MATCH_LEN_MIN : DIST_STATES - 1; } /* @@ -192,7 +192,7 @@ static inline uint32_t lzma_get_dist_state(uint32_t len) #define ALIGN_MASK (ALIGN_SIZE - 1) /* Total number of all probability variables */ -#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE) +#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX *LITERAL_CODER_SIZE) /* * LZMA remembers the four most recent match distances. Reusing these diff --git a/depends/xz-embedded/src/xz_private.h b/depends/xz-embedded/src/xz_private.h index 482b90f3..55a3af1c 100644 --- a/depends/xz-embedded/src/xz_private.h +++ b/depends/xz-embedded/src/xz_private.h @@ -11,51 +11,50 @@ #define XZ_PRIVATE_H #ifdef __KERNEL__ -# include <linux/xz.h> -# include <linux/kernel.h> -# include <asm/unaligned.h> - /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ -# ifndef XZ_PREBOOT -# include <linux/slab.h> -# include <linux/vmalloc.h> -# include <linux/string.h> -# ifdef CONFIG_XZ_DEC_X86 -# define XZ_DEC_X86 -# endif -# ifdef CONFIG_XZ_DEC_POWERPC -# define XZ_DEC_POWERPC -# endif -# ifdef CONFIG_XZ_DEC_IA64 -# define XZ_DEC_IA64 -# endif -# ifdef CONFIG_XZ_DEC_ARM -# define XZ_DEC_ARM -# endif -# ifdef CONFIG_XZ_DEC_ARMTHUMB -# define XZ_DEC_ARMTHUMB -# endif -# ifdef CONFIG_XZ_DEC_SPARC -# define XZ_DEC_SPARC -# endif -# define memeq(a, b, size) (memcmp(a, b, size) == 0) -# define memzero(buf, size) memset(buf, 0, size) -# endif -# define get_le32(p) le32_to_cpup((const uint32_t *)(p)) +#include <linux/xz.h> +#include <linux/kernel.h> +#include <asm/unaligned.h> +/* XZ_PREBOOT may be defined only via decompress_unxz.c. */ +#ifndef XZ_PREBOOT +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/string.h> +#ifdef CONFIG_XZ_DEC_X86 +#define XZ_DEC_X86 +#endif +#ifdef CONFIG_XZ_DEC_POWERPC +#define XZ_DEC_POWERPC +#endif +#ifdef CONFIG_XZ_DEC_IA64 +#define XZ_DEC_IA64 +#endif +#ifdef CONFIG_XZ_DEC_ARM +#define XZ_DEC_ARM +#endif +#ifdef CONFIG_XZ_DEC_ARMTHUMB +#define XZ_DEC_ARMTHUMB +#endif +#ifdef CONFIG_XZ_DEC_SPARC +#define XZ_DEC_SPARC +#endif +#define memeq(a, b, size) (memcmp(a, b, size) == 0) +#define memzero(buf, size) memset(buf, 0, size) +#endif +#define get_le32(p) le32_to_cpup((const uint32_t *)(p)) #else - /* - * For userspace builds, use a separate header to define the required - * macros and functions. This makes it easier to adapt the code into - * different environments and avoids clutter in the Linux kernel tree. - */ -# include "xz_config.h" +/* + * For userspace builds, use a separate header to define the required + * macros and functions. This makes it easier to adapt the code into + * different environments and avoids clutter in the Linux kernel tree. + */ +#include "xz_config.h" #endif /* If no specific decoding mode is requested, enable support for all modes. */ -#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \ - && !defined(XZ_DEC_DYNALLOC) -# define XZ_DEC_SINGLE -# define XZ_DEC_PREALLOC -# define XZ_DEC_DYNALLOC +#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) && !defined(XZ_DEC_DYNALLOC) +#define XZ_DEC_SINGLE +#define XZ_DEC_PREALLOC +#define XZ_DEC_DYNALLOC #endif /* @@ -64,29 +63,29 @@ * false at compile time and thus allow the compiler to omit unneeded code. */ #ifdef XZ_DEC_SINGLE -# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE) +#define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE) #else -# define DEC_IS_SINGLE(mode) (false) +#define DEC_IS_SINGLE(mode) (false) #endif #ifdef XZ_DEC_PREALLOC -# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC) +#define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC) #else -# define DEC_IS_PREALLOC(mode) (false) +#define DEC_IS_PREALLOC(mode) (false) #endif #ifdef XZ_DEC_DYNALLOC -# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC) +#define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC) #else -# define DEC_IS_DYNALLOC(mode) (false) +#define DEC_IS_DYNALLOC(mode) (false) #endif #if !defined(XZ_DEC_SINGLE) -# define DEC_IS_MULTI(mode) (true) +#define DEC_IS_MULTI(mode) (true) #elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC) -# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE) +#define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE) #else -# define DEC_IS_MULTI(mode) (false) +#define DEC_IS_MULTI(mode) (false) #endif /* @@ -94,20 +93,18 @@ * XZ_DEC_BCJ is used to enable generic support for BCJ decoders. */ #ifndef XZ_DEC_BCJ -# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \ - || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \ - || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \ - || defined(XZ_DEC_SPARC) -# define XZ_DEC_BCJ -# endif +#if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) || defined(XZ_DEC_IA64) || \ + defined(XZ_DEC_ARM) || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) || \ + defined(XZ_DEC_SPARC) +#define XZ_DEC_BCJ +#endif #endif /* * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used * before calling xz_dec_lzma2_run(). */ -XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, - uint32_t dict_max); +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max); /* * Decode the LZMA2 properties (one byte) and reset the decoder. Return @@ -115,12 +112,10 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, * big enough, and XZ_OPTIONS_ERROR if props indicates something that this * decoder doesn't support. */ -XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, - uint8_t props); +XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props); /* Decode raw LZMA2 stream from b->in to b->out. */ -XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, - struct xz_buf *b); +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b); /* Free the memory allocated for the LZMA2 decoder. */ XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s); @@ -145,9 +140,8 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id); * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run() * must be called directly. */ -XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, - struct xz_dec_lzma2 *lzma2, - struct xz_buf *b); +XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2, + struct xz_buf *b); /* Free the memory allocated for the BCJ filters. */ #define xz_dec_bcj_end(s) kfree(s) diff --git a/depends/xz-embedded/src/xz_stream.h b/depends/xz-embedded/src/xz_stream.h index 66cb5a70..c0e191e6 100644 --- a/depends/xz-embedded/src/xz_stream.h +++ b/depends/xz-embedded/src/xz_stream.h @@ -11,10 +11,9 @@ #define XZ_STREAM_H #if defined(__KERNEL__) && !XZ_INTERNAL_CRC32 -# include <linux/crc32.h> -# undef crc32 -# define xz_crc32(buf, size, crc) \ - (~crc32_le(~(uint32_t)(crc), buf, size)) +#include <linux/crc32.h> +#undef crc32 +#define xz_crc32(buf, size, crc) (~crc32_le(~(uint32_t)(crc), buf, size)) #endif /* @@ -42,14 +41,15 @@ */ typedef uint64_t vli_type; -#define VLI_MAX ((vli_type)-1 / 2) -#define VLI_UNKNOWN ((vli_type)-1) +#define VLI_MAX ((vli_type) - 1 / 2) +#define VLI_UNKNOWN ((vli_type) - 1) /* Maximum encoded size of a VLI */ #define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7) /* Integrity Check types */ -enum xz_check { +enum xz_check +{ XZ_CHECK_NONE = 0, XZ_CHECK_CRC32 = 1, XZ_CHECK_CRC64 = 4, |