4 * Authors: Lasse Collin <lasse.collin@tukaani.org>
5 * Igor Pavlov <http://7-zip.org/>
7 * This file has been put into the public domain.
8 * You can do whatever you want with this file.
11 #include "xz_private.h"
15 * Range decoder initialization eats the first five bytes of each LZMA chunk.
17 #define RC_INIT_BYTES 5
20 * Minimum number of usable input buffer to safely decode one LZMA symbol.
21 * The worst case is that we decode 22 bits using probabilities and 26
22 * direct bits. This may decode at maximum of 20 bytes of input. However,
23 * lzma_main() does an extra normalization before returning, thus we
24 * need to put 21 here.
26 #define LZMA_IN_REQUIRED 21
29 * Dictionary (history buffer)
31 * These are always true:
32 * start <= pos <= full <= end
35 * In multi-call mode, also these are true:
40 * Most of these variables are size_t to support single-call mode,
41 * in which the dictionary variables address the actual output
45 /* Beginning of the history buffer */
48 /* Old position in buf (before decoding more data) */
55 * How full dictionary is. This is used to detect corrupt input that
56 * would read beyond the beginning of the uncompressed stream.
60 /* Write limit; we don't write to buf[limit] or later bytes. */
64 * End of the dictionary buffer. In multi-call mode, this is
65 * the same as the dictionary size. In single-call mode, this
66 * indicates the size of the output buffer.
71 * Size of the dictionary as specified in Block Header. This is used
72 * together with "full" to detect corrupt input that would make us
73 * read beyond the beginning of the uncompressed stream.
78 * Maximum allowed dictionary size in multi-call mode.
79 * This is ignored in single-call mode.
84 * Amount of memory currently allocated for the dictionary.
85 * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
86 * size_max is always the same as the allocated size.)
100 * Number of initializing bytes remaining to be read
103 uint32_t init_bytes_left;
106 * Buffer from which we read our input. It can be either
107 * temp.buf or the caller-provided input buffer.
114 /* Probabilities for a length decoder. */
115 struct lzma_len_dec {
116 /* Probability of match length being at least 10 */
119 /* Probability of match length being at least 18 */
122 /* Probabilities for match lengths 2-9 */
123 uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
125 /* Probabilities for match lengths 10-17 */
126 uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
128 /* Probabilities for match lengths 18-273 */
129 uint16_t high[LEN_HIGH_SYMBOLS];
133 /* Distances of latest four matches */
139 /* Types of the most recently seen LZMA symbols */
140 enum lzma_state state;
143 * Length of a match. This is updated so that dict_repeat can
144 * be called again to finish repeating the whole match.
149 * LZMA properties or related bit masks (number of literal
150 * context bits, a mask dervied from the number of literal
151 * position bits, and a mask dervied from the number
155 uint32_t literal_pos_mask; /* (1 << lp) - 1 */
156 uint32_t pos_mask; /* (1 << pb) - 1 */
158 /* If 1, it's a match. Otherwise it's a single 8-bit literal. */
159 uint16_t is_match[STATES][POS_STATES_MAX];
161 /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
162 uint16_t is_rep[STATES];
165 * If 0, distance of a repeated match is rep0.
166 * Otherwise check is_rep1.
168 uint16_t is_rep0[STATES];
171 * If 0, distance of a repeated match is rep1.
172 * Otherwise check is_rep2.
174 uint16_t is_rep1[STATES];
176 /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
177 uint16_t is_rep2[STATES];
180 * If 1, the repeated match has length of one byte. Otherwise
181 * the length is decoded from rep_len_decoder.
183 uint16_t is_rep0_long[STATES][POS_STATES_MAX];
186 * Probability tree for the highest two bits of the match
187 * distance. There is a separate probability tree for match
188 * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
190 uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
193 * Probility trees for additional bits for match distance
194 * when the distance is in the range [4, 127].
196 uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
199 * Probability tree for the lowest four bits of a match
200 * distance that is equal to or greater than 128.
202 uint16_t dist_align[ALIGN_SIZE];
204 /* Length of a normal match */
205 struct lzma_len_dec match_len_dec;
207 /* Length of a repeated match */
208 struct lzma_len_dec rep_len_dec;
210 /* Probabilities of literals */
211 uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
215 /* Position in xz_dec_lzma2_run(). */
228 /* Next position after decoding the compressed size of the chunk. */
229 enum lzma2_seq next_sequence;
231 /* Uncompressed size of LZMA chunk (2 MiB at maximum) */
232 uint32_t uncompressed;
235 * Compressed size of LZMA chunk or compressed/uncompressed
236 * size of uncompressed chunk (64 KiB at maximum)
241 * True if dictionary reset is needed. This is false before
242 * the first chunk (LZMA or uncompressed).
244 bool need_dict_reset;
247 * True if new LZMA properties are needed. This is false
248 * before the first LZMA chunk.
253 struct xz_dec_lzma2 {
255 * The order below is important on x86 to reduce code size and
256 * it shouldn't hurt on other platforms. Everything up to and
257 * including lzma.pos_mask are in the first 128 bytes on x86-32,
258 * which allows using smaller instructions to access those
259 * variables. On x86-64, fewer variables fit into the first 128
260 * bytes, but this is still the best order without sacrificing
261 * the readability by splitting the structures.
264 struct dictionary dict;
265 struct lzma2_dec lzma2;
266 struct lzma_dec lzma;
269 * Temporary buffer which holds small number of input bytes between
270 * decoder calls. See lzma2_lzma() for details.
274 uint8_t buf[3 * LZMA_IN_REQUIRED];
283 * Reset the dictionary state. When in single-call mode, set up the beginning
284 * of the dictionary to point to the actual output buffer.
286 static void dict_reset(struct dictionary *dict, struct xz_buf *b)
288 if (DEC_IS_SINGLE(dict->mode)) {
289 dict->buf = b->out + b->out_pos;
290 dict->end = b->out_size - b->out_pos;
299 /* Set dictionary write limit */
300 static void dict_limit(struct dictionary *dict, size_t out_max)
302 if (dict->end - dict->pos <= out_max)
303 dict->limit = dict->end;
305 dict->limit = dict->pos + out_max;
308 /* Return true if at least one byte can be written into the dictionary. */
309 static inline bool dict_has_space(const struct dictionary *dict)
311 return dict->pos < dict->limit;
315 * Get a byte from the dictionary at the given distance. The distance is
316 * assumed to valid, or as a special case, zero when the dictionary is
317 * still empty. This special case is needed for single-call decoding to
318 * avoid writing a '\0' to the end of the destination buffer.
320 static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
322 size_t offset = dict->pos - dist - 1;
324 if (dist >= dict->pos)
327 return dict->full > 0 ? dict->buf[offset] : 0;
331 * Put one byte into the dictionary. It is assumed that there is space for it.
333 static inline void dict_put(struct dictionary *dict, uint8_t byte)
335 dict->buf[dict->pos++] = byte;
337 if (dict->full < dict->pos)
338 dict->full = dict->pos;
342 * Repeat given number of bytes from the given distance. If the distance is
343 * invalid, false is returned. On success, true is returned and *len is
344 * updated to indicate how many bytes were left to be repeated.
346 static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
351 if (dist >= dict->full || dist >= dict->size)
354 left = min_t(size_t, dict->limit - dict->pos, *len);
357 back = dict->pos - dist - 1;
358 if (dist >= dict->pos)
362 dict->buf[dict->pos++] = dict->buf[back++];
363 if (back == dict->end)
365 } while (--left > 0);
367 if (dict->full < dict->pos)
368 dict->full = dict->pos;
373 /* Copy uncompressed data as is from input to dictionary and output buffers. */
374 static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
379 while (*left > 0 && b->in_pos < b->in_size
380 && b->out_pos < b->out_size) {
381 copy_size = min(b->in_size - b->in_pos,
382 b->out_size - b->out_pos);
383 if (copy_size > dict->end - dict->pos)
384 copy_size = dict->end - dict->pos;
385 if (copy_size > *left)
391 * If doing in-place decompression in single-call mode and the
392 * uncompressed size of the file is larger than the caller
393 * thought (i.e. it is invalid input!), the buffers below may
394 * overlap and cause undefined behavior with memcpy().
395 * With valid inputs memcpy() would be fine here.
397 memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
398 dict->pos += copy_size;
400 if (dict->full < dict->pos)
401 dict->full = dict->pos;
403 if (DEC_IS_MULTI(dict->mode)) {
404 if (dict->pos == dict->end)
408 * Like above but for multi-call mode: use memmove()
409 * to avoid undefined behavior with invalid input.
411 memmove(b->out + b->out_pos, b->in + b->in_pos,
415 dict->start = dict->pos;
417 b->out_pos += copy_size;
418 b->in_pos += copy_size;
423 * Flush pending data from dictionary to b->out. It is assumed that there is
424 * enough space in b->out. This is guaranteed because caller uses dict_limit()
425 * before decoding data into the dictionary.
427 static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
429 size_t copy_size = dict->pos - dict->start;
431 if (DEC_IS_MULTI(dict->mode)) {
432 if (dict->pos == dict->end)
436 * These buffers cannot overlap even if doing in-place
437 * decompression because in multi-call mode dict->buf
438 * has been allocated by us in this file; it's not
439 * provided by the caller like in single-call mode.
441 memcpy(b->out + b->out_pos, dict->buf + dict->start,
445 dict->start = dict->pos;
446 b->out_pos += copy_size;
454 /* Reset the range decoder. */
455 static void rc_reset(struct rc_dec *rc)
457 rc->range = (uint32_t)-1;
459 rc->init_bytes_left = RC_INIT_BYTES;
463 * Read the first five initial bytes into rc->code if they haven't been
464 * read already. (Yes, the first byte gets completely ignored.)
466 static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
468 while (rc->init_bytes_left > 0) {
469 if (b->in_pos == b->in_size)
472 rc->code = (rc->code << 8) + b->in[b->in_pos++];
473 --rc->init_bytes_left;
479 /* Return true if there may not be enough input for the next decoding loop. */
480 static inline bool rc_limit_exceeded(const struct rc_dec *rc)
482 return rc->in_pos > rc->in_limit;
486 * Return true if it is possible (from point of view of range decoder) that
487 * we have reached the end of the LZMA chunk.
489 static inline bool rc_is_finished(const struct rc_dec *rc)
491 return rc->code == 0;
494 /* Read the next input byte if needed. */
495 static __always_inline void rc_normalize(struct rc_dec *rc)
497 if (rc->range < RC_TOP_VALUE) {
498 rc->range <<= RC_SHIFT_BITS;
499 rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
504 * Decode one bit. In some versions, this function has been splitted in three
505 * functions so that the compiler is supposed to be able to more easily avoid
506 * an extra branch. In this particular version of the LZMA decoder, this
507 * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
508 * on x86). Using a non-splitted version results in nicer looking code too.
510 * NOTE: This must return an int. Do not make it return a bool or the speed
511 * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
512 * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
514 static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
520 bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
521 if (rc->code < bound) {
523 *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
528 *prob -= *prob >> RC_MOVE_BITS;
535 /* Decode a bittree starting from the most significant bit. */
536 static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
537 uint16_t *probs, uint32_t limit)
542 if (rc_bit(rc, &probs[symbol]))
543 symbol = (symbol << 1) + 1;
546 } while (symbol < limit);
551 /* Decode a bittree starting from the least significant bit. */
552 static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
554 uint32_t *dest, uint32_t limit)
560 if (rc_bit(rc, &probs[symbol])) {
561 symbol = (symbol << 1) + 1;
566 } while (++i < limit);
569 /* Decode direct bits (fixed fifty-fifty probability) */
570 static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
577 rc->code -= rc->range;
578 mask = (uint32_t)0 - (rc->code >> 31);
579 rc->code += rc->range & mask;
580 *dest = (*dest << 1) + (mask + 1);
581 } while (--limit > 0);
588 /* Get pointer to literal coder probability array. */
589 static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
591 uint32_t prev_byte = dict_get(&s->dict, 0);
592 uint32_t low = prev_byte >> (8 - s->lzma.lc);
593 uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
594 return s->lzma.literal[low + high];
597 /* Decode a literal (one 8-bit byte) */
598 static void lzma_literal(struct xz_dec_lzma2 *s)
607 probs = lzma_literal_probs(s);
609 if (lzma_state_is_literal(s->lzma.state)) {
610 symbol = rc_bittree(&s->rc, probs, 0x100);
613 match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
617 match_bit = match_byte & offset;
619 i = offset + match_bit + symbol;
621 if (rc_bit(&s->rc, &probs[i])) {
622 symbol = (symbol << 1) + 1;
626 offset &= ~match_bit;
628 } while (symbol < 0x100);
631 dict_put(&s->dict, (uint8_t)symbol);
632 lzma_state_literal(&s->lzma.state);
635 /* Decode the length of the match into s->lzma.len. */
636 static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
642 if (!rc_bit(&s->rc, &l->choice)) {
643 probs = l->low[pos_state];
644 limit = LEN_LOW_SYMBOLS;
645 s->lzma.len = MATCH_LEN_MIN;
647 if (!rc_bit(&s->rc, &l->choice2)) {
648 probs = l->mid[pos_state];
649 limit = LEN_MID_SYMBOLS;
650 s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
653 limit = LEN_HIGH_SYMBOLS;
654 s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
659 s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
662 /* Decode a match. The distance will be stored in s->lzma.rep0. */
663 static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
669 lzma_state_match(&s->lzma.state);
671 s->lzma.rep3 = s->lzma.rep2;
672 s->lzma.rep2 = s->lzma.rep1;
673 s->lzma.rep1 = s->lzma.rep0;
675 lzma_len(s, &s->lzma.match_len_dec, pos_state);
677 probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
678 dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
680 if (dist_slot < DIST_MODEL_START) {
681 s->lzma.rep0 = dist_slot;
683 limit = (dist_slot >> 1) - 1;
684 s->lzma.rep0 = 2 + (dist_slot & 1);
686 if (dist_slot < DIST_MODEL_END) {
687 s->lzma.rep0 <<= limit;
688 probs = s->lzma.dist_special + s->lzma.rep0
690 rc_bittree_reverse(&s->rc, probs,
691 &s->lzma.rep0, limit);
693 rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
694 s->lzma.rep0 <<= ALIGN_BITS;
695 rc_bittree_reverse(&s->rc, s->lzma.dist_align,
696 &s->lzma.rep0, ALIGN_BITS);
702 * Decode a repeated match. The distance is one of the four most recently
703 * seen matches. The distance will be stored in s->lzma.rep0.
705 static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
709 if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
710 if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
711 s->lzma.state][pos_state])) {
712 lzma_state_short_rep(&s->lzma.state);
717 if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
720 if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
724 s->lzma.rep3 = s->lzma.rep2;
727 s->lzma.rep2 = s->lzma.rep1;
730 s->lzma.rep1 = s->lzma.rep0;
734 lzma_state_long_rep(&s->lzma.state);
735 lzma_len(s, &s->lzma.rep_len_dec, pos_state);
738 /* LZMA decoder core */
739 static bool lzma_main(struct xz_dec_lzma2 *s)
744 * If the dictionary was reached during the previous call, try to
745 * finish the possibly pending repeat in the dictionary.
747 if (dict_has_space(&s->dict) && s->lzma.len > 0)
748 dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
751 * Decode more LZMA symbols. One iteration may consume up to
752 * LZMA_IN_REQUIRED - 1 bytes.
754 while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
755 pos_state = s->dict.pos & s->lzma.pos_mask;
757 if (!rc_bit(&s->rc, &s->lzma.is_match[
758 s->lzma.state][pos_state])) {
761 if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
762 lzma_rep_match(s, pos_state);
764 lzma_match(s, pos_state);
766 if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
772 * Having the range decoder always normalized when we are outside
773 * this function makes it easier to correctly handle end of the chunk.
775 rc_normalize(&s->rc);
781 * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
782 * here, because LZMA state may be reset without resetting the dictionary.
784 static void lzma_reset(struct xz_dec_lzma2 *s)
789 s->lzma.state = STATE_LIT_LIT;
796 * All probabilities are initialized to the same value. This hack
797 * makes the code smaller by avoiding a separate loop for each
800 * This could be optimized so that only that part of literal
801 * probabilities that are actually required. In the common case
802 * we would write 12 KiB less.
804 probs = s->lzma.is_match[0];
805 for (i = 0; i < PROBS_TOTAL; ++i)
806 probs[i] = RC_BIT_MODEL_TOTAL / 2;
812 * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
813 * from the decoded lp and pb values. On success, the LZMA decoder state is
814 * reset and true is returned.
816 static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
818 if (props > (4 * 5 + 4) * 9 + 8)
821 s->lzma.pos_mask = 0;
822 while (props >= 9 * 5) {
827 s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
829 s->lzma.literal_pos_mask = 0;
832 ++s->lzma.literal_pos_mask;
837 if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
840 s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
852 * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
853 * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
854 * wrapper function takes care of making the LZMA decoder's assumption safe.
856 * As long as there is plenty of input left to be decoded in the current LZMA
857 * chunk, we decode directly from the caller-supplied input buffer until
858 * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
859 * s->temp.buf, which (hopefully) gets filled on the next call to this
860 * function. We decode a few bytes from the temporary buffer so that we can
861 * continue decoding from the caller-supplied input buffer again.
863 static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
868 in_avail = b->in_size - b->in_pos;
869 if (s->temp.size > 0 || s->lzma2.compressed == 0) {
870 tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
871 if (tmp > s->lzma2.compressed - s->temp.size)
872 tmp = s->lzma2.compressed - s->temp.size;
876 memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
878 if (s->temp.size + tmp == s->lzma2.compressed) {
879 memzero(s->temp.buf + s->temp.size + tmp,
881 - s->temp.size - tmp);
882 s->rc.in_limit = s->temp.size + tmp;
883 } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
888 s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
891 s->rc.in = s->temp.buf;
894 if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
897 s->lzma2.compressed -= s->rc.in_pos;
899 if (s->rc.in_pos < s->temp.size) {
900 s->temp.size -= s->rc.in_pos;
901 memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
906 b->in_pos += s->rc.in_pos - s->temp.size;
910 in_avail = b->in_size - b->in_pos;
911 if (in_avail >= LZMA_IN_REQUIRED) {
913 s->rc.in_pos = b->in_pos;
915 if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
916 s->rc.in_limit = b->in_pos + s->lzma2.compressed;
918 s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
923 in_avail = s->rc.in_pos - b->in_pos;
924 if (in_avail > s->lzma2.compressed)
927 s->lzma2.compressed -= in_avail;
928 b->in_pos = s->rc.in_pos;
931 in_avail = b->in_size - b->in_pos;
932 if (in_avail < LZMA_IN_REQUIRED) {
933 if (in_avail > s->lzma2.compressed)
934 in_avail = s->lzma2.compressed;
936 memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
937 s->temp.size = in_avail;
938 b->in_pos += in_avail;
945 * Take care of the LZMA2 control layer, and forward the job of actual LZMA
946 * decoding or copying of uncompressed chunks to other functions.
948 XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
953 while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
954 switch (s->lzma2.sequence) {
961 * 0x01 Dictionary reset followed by
962 * an uncompressed chunk
963 * 0x02 Uncompressed chunk (no dictionary reset)
965 * Highest three bits (s->control & 0xE0):
966 * 0xE0 Dictionary reset, new properties and state
967 * reset, followed by LZMA compressed chunk
968 * 0xC0 New properties and state reset, followed
969 * by LZMA compressed chunk (no dictionary
971 * 0xA0 State reset using old properties,
972 * followed by LZMA compressed chunk (no
974 * 0x80 LZMA chunk (no dictionary or state reset)
976 * For LZMA compressed chunks, the lowest five bits
977 * (s->control & 1F) are the highest bits of the
978 * uncompressed size (bits 16-20).
980 * A new LZMA2 stream must begin with a dictionary
981 * reset. The first LZMA chunk must set new
982 * properties and reset the LZMA state.
984 * Values that don't match anything described above
985 * are invalid and we return XZ_DATA_ERROR.
987 tmp = b->in[b->in_pos++];
990 return XZ_STREAM_END;
992 if (tmp >= 0xE0 || tmp == 0x01) {
993 s->lzma2.need_props = true;
994 s->lzma2.need_dict_reset = false;
995 dict_reset(&s->dict, b);
996 } else if (s->lzma2.need_dict_reset) {
997 return XZ_DATA_ERROR;
1001 s->lzma2.uncompressed = (tmp & 0x1F) << 16;
1002 s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
1006 * When there are new properties,
1007 * state reset is done at
1010 s->lzma2.need_props = false;
1011 s->lzma2.next_sequence
1014 } else if (s->lzma2.need_props) {
1015 return XZ_DATA_ERROR;
1018 s->lzma2.next_sequence
1025 return XZ_DATA_ERROR;
1027 s->lzma2.sequence = SEQ_COMPRESSED_0;
1028 s->lzma2.next_sequence = SEQ_COPY;
1033 case SEQ_UNCOMPRESSED_1:
1034 s->lzma2.uncompressed
1035 += (uint32_t)b->in[b->in_pos++] << 8;
1036 s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
1039 case SEQ_UNCOMPRESSED_2:
1040 s->lzma2.uncompressed
1041 += (uint32_t)b->in[b->in_pos++] + 1;
1042 s->lzma2.sequence = SEQ_COMPRESSED_0;
1045 case SEQ_COMPRESSED_0:
1047 = (uint32_t)b->in[b->in_pos++] << 8;
1048 s->lzma2.sequence = SEQ_COMPRESSED_1;
1051 case SEQ_COMPRESSED_1:
1053 += (uint32_t)b->in[b->in_pos++] + 1;
1054 s->lzma2.sequence = s->lzma2.next_sequence;
1057 case SEQ_PROPERTIES:
1058 if (!lzma_props(s, b->in[b->in_pos++]))
1059 return XZ_DATA_ERROR;
1061 s->lzma2.sequence = SEQ_LZMA_PREPARE;
1065 case SEQ_LZMA_PREPARE:
1066 if (s->lzma2.compressed < RC_INIT_BYTES)
1067 return XZ_DATA_ERROR;
1069 if (!rc_read_init(&s->rc, b))
1072 s->lzma2.compressed -= RC_INIT_BYTES;
1073 s->lzma2.sequence = SEQ_LZMA_RUN;
1079 * Set dictionary limit to indicate how much we want
1080 * to be encoded at maximum. Decode new data into the
1081 * dictionary. Flush the new data from dictionary to
1082 * b->out. Check if we finished decoding this chunk.
1083 * In case the dictionary got full but we didn't fill
1084 * the output buffer yet, we may run this loop
1085 * multiple times without changing s->lzma2.sequence.
1087 dict_limit(&s->dict, min_t(size_t,
1088 b->out_size - b->out_pos,
1089 s->lzma2.uncompressed));
1090 if (!lzma2_lzma(s, b))
1091 return XZ_DATA_ERROR;
1093 s->lzma2.uncompressed -= dict_flush(&s->dict, b);
1095 if (s->lzma2.uncompressed == 0) {
1096 if (s->lzma2.compressed > 0 || s->lzma.len > 0
1097 || !rc_is_finished(&s->rc))
1098 return XZ_DATA_ERROR;
1101 s->lzma2.sequence = SEQ_CONTROL;
1103 } else if (b->out_pos == b->out_size
1104 || (b->in_pos == b->in_size
1106 < s->lzma2.compressed)) {
1113 dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
1114 if (s->lzma2.compressed > 0)
1117 s->lzma2.sequence = SEQ_CONTROL;
1125 XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
1128 struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
1132 s->dict.mode = mode;
1133 s->dict.size_max = dict_max;
1135 if (DEC_IS_PREALLOC(mode)) {
1136 s->dict.buf = vmalloc(dict_max);
1137 if (s->dict.buf == NULL) {
1141 } else if (DEC_IS_DYNALLOC(mode)) {
1143 s->dict.allocated = 0;
1149 XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
1151 /* This limits dictionary size to 3 GiB to keep parsing simpler. */
1153 return XZ_OPTIONS_ERROR;
1155 s->dict.size = 2 + (props & 1);
1156 s->dict.size <<= (props >> 1) + 11;
1158 if (DEC_IS_MULTI(s->dict.mode)) {
1159 if (s->dict.size > s->dict.size_max)
1160 return XZ_MEMLIMIT_ERROR;
1162 s->dict.end = s->dict.size;
1164 if (DEC_IS_DYNALLOC(s->dict.mode)) {
1165 if (s->dict.allocated < s->dict.size) {
1166 s->dict.allocated = s->dict.size;
1168 s->dict.buf = vmalloc(s->dict.size);
1169 if (s->dict.buf == NULL) {
1170 s->dict.allocated = 0;
1171 return XZ_MEM_ERROR;
1179 s->lzma2.sequence = SEQ_CONTROL;
1180 s->lzma2.need_dict_reset = true;
1187 XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
1189 if (DEC_IS_MULTI(s->dict.mode))