Magellan Linux

Contents of /trunk/mkinitrd-magellan/busybox/archival/libunarchive/decompress_unlzma.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 532 - (show annotations) (download)
Sat Sep 1 22:45:15 2007 UTC (16 years, 8 months ago) by niro
File MIME type: text/plain
File size: 12810 byte(s)
-import if magellan mkinitrd; it is a fork of redhats mkinitrd-5.0.8 with all magellan patches and features; deprecates magellan-src/mkinitrd

1 /* vi: set sw=4 ts=4: */
2 /*
3 * Small lzma deflate implementation.
4 * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
5 *
6 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
7 * Copyright (C) 1999-2005 Igor Pavlov
8 *
9 * Licensed under GPLv2 or later, see file LICENSE in this tarball for details.
10 */
11
12 #include "libbb.h"
13 #include "unarchive.h"
14
15 #ifdef CONFIG_FEATURE_LZMA_FAST
16 # define speed_inline ATTRIBUTE_ALWAYS_INLINE
17 #else
18 # define speed_inline
19 #endif
20
21
22 typedef struct {
23 int fd;
24 uint8_t *ptr;
25
26 /* Was keeping rc on stack in unlzma and separately allocating buffer,
27 * but with "buffer 'attached to' allocated rc" code is smaller: */
28 /* uint8_t *buffer; */
29 #define RC_BUFFER ((uint8_t*)(rc+1))
30
31 uint8_t *buffer_end;
32
33 /* Had provisions for variable buffer, but we don't need it here */
34 /* int buffer_size; */
35 #define RC_BUFFER_SIZE 0x10000
36
37 uint32_t code;
38 uint32_t range;
39 uint32_t bound;
40 } rc_t;
41
42 #define RC_TOP_BITS 24
43 #define RC_MOVE_BITS 5
44 #define RC_MODEL_TOTAL_BITS 11
45
46
47 /* Called twice: once at startup and once in rc_normalize() */
48 static void rc_read(rc_t * rc)
49 {
50 int buffer_size = safe_read(rc->fd, RC_BUFFER, RC_BUFFER_SIZE);
51 if (buffer_size <= 0)
52 bb_error_msg_and_die("unexpected EOF");
53 rc->ptr = RC_BUFFER;
54 rc->buffer_end = RC_BUFFER + buffer_size;
55 }
56
57 /* Called once */
58 static rc_t* rc_init(int fd) /*, int buffer_size) */
59 {
60 int i;
61 rc_t* rc;
62
63 rc = xmalloc(sizeof(rc_t) + RC_BUFFER_SIZE);
64
65 rc->fd = fd;
66 /* rc->buffer_size = buffer_size; */
67 rc->buffer_end = RC_BUFFER + RC_BUFFER_SIZE;
68 rc->ptr = rc->buffer_end;
69
70 rc->code = 0;
71 rc->range = 0xFFFFFFFF;
72 for (i = 0; i < 5; i++) {
73 if (rc->ptr >= rc->buffer_end)
74 rc_read(rc);
75 rc->code = (rc->code << 8) | *rc->ptr++;
76 }
77 return rc;
78 }
79
80 /* Called once */
81 static ATTRIBUTE_ALWAYS_INLINE void rc_free(rc_t * rc)
82 {
83 if (ENABLE_FEATURE_CLEAN_UP)
84 free(rc);
85 }
86
87 /* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */
88 static void rc_do_normalize(rc_t * rc)
89 {
90 if (rc->ptr >= rc->buffer_end)
91 rc_read(rc);
92 rc->range <<= 8;
93 rc->code = (rc->code << 8) | *rc->ptr++;
94 }
95 static ATTRIBUTE_ALWAYS_INLINE void rc_normalize(rc_t * rc)
96 {
97 if (rc->range < (1 << RC_TOP_BITS)) {
98 rc_do_normalize(rc);
99 }
100 }
101
102 /* Called 9 times */
103 /* Why rc_is_bit_0_helper exists?
104 * Because we want to always expose (rc->code < rc->bound) to optimizer
105 */
106 static speed_inline uint32_t rc_is_bit_0_helper(rc_t * rc, uint16_t * p)
107 {
108 rc_normalize(rc);
109 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
110 return rc->bound;
111 }
112 static ATTRIBUTE_ALWAYS_INLINE int rc_is_bit_0(rc_t * rc, uint16_t * p)
113 {
114 uint32_t t = rc_is_bit_0_helper(rc, p);
115 return rc->code < t;
116 }
117
118 /* Called ~10 times, but very small, thus inlined */
119 static speed_inline void rc_update_bit_0(rc_t * rc, uint16_t * p)
120 {
121 rc->range = rc->bound;
122 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
123 }
124 static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p)
125 {
126 rc->range -= rc->bound;
127 rc->code -= rc->bound;
128 *p -= *p >> RC_MOVE_BITS;
129 }
130
131 /* Called 4 times in unlzma loop */
132 static int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol)
133 {
134 if (rc_is_bit_0(rc, p)) {
135 rc_update_bit_0(rc, p);
136 *symbol *= 2;
137 return 0;
138 } else {
139 rc_update_bit_1(rc, p);
140 *symbol = *symbol * 2 + 1;
141 return 1;
142 }
143 }
144
145 /* Called once */
146 static ATTRIBUTE_ALWAYS_INLINE int rc_direct_bit(rc_t * rc)
147 {
148 rc_normalize(rc);
149 rc->range >>= 1;
150 if (rc->code >= rc->range) {
151 rc->code -= rc->range;
152 return 1;
153 }
154 return 0;
155 }
156
157 /* Called twice */
158 static speed_inline void
159 rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol)
160 {
161 int i = num_levels;
162
163 *symbol = 1;
164 while (i--)
165 rc_get_bit(rc, p + *symbol, symbol);
166 *symbol -= 1 << num_levels;
167 }
168
169
170 typedef struct {
171 uint8_t pos;
172 uint32_t dict_size;
173 uint64_t dst_size;
174 } __attribute__ ((packed)) lzma_header_t;
175
176
177 /* #defines will force compiler to compute/optimize each one with each usage.
178 * Have heart and use enum instead. */
179 enum {
180 LZMA_BASE_SIZE = 1846,
181 LZMA_LIT_SIZE = 768,
182
183 LZMA_NUM_POS_BITS_MAX = 4,
184
185 LZMA_LEN_NUM_LOW_BITS = 3,
186 LZMA_LEN_NUM_MID_BITS = 3,
187 LZMA_LEN_NUM_HIGH_BITS = 8,
188
189 LZMA_LEN_CHOICE = 0,
190 LZMA_LEN_CHOICE_2 = (LZMA_LEN_CHOICE + 1),
191 LZMA_LEN_LOW = (LZMA_LEN_CHOICE_2 + 1),
192 LZMA_LEN_MID = (LZMA_LEN_LOW \
193 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))),
194 LZMA_LEN_HIGH = (LZMA_LEN_MID \
195 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))),
196 LZMA_NUM_LEN_PROBS = (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)),
197
198 LZMA_NUM_STATES = 12,
199 LZMA_NUM_LIT_STATES = 7,
200
201 LZMA_START_POS_MODEL_INDEX = 4,
202 LZMA_END_POS_MODEL_INDEX = 14,
203 LZMA_NUM_FULL_DISTANCES = (1 << (LZMA_END_POS_MODEL_INDEX >> 1)),
204
205 LZMA_NUM_POS_SLOT_BITS = 6,
206 LZMA_NUM_LEN_TO_POS_STATES = 4,
207
208 LZMA_NUM_ALIGN_BITS = 4,
209
210 LZMA_MATCH_MIN_LEN = 2,
211
212 LZMA_IS_MATCH = 0,
213 LZMA_IS_REP = (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)),
214 LZMA_IS_REP_G0 = (LZMA_IS_REP + LZMA_NUM_STATES),
215 LZMA_IS_REP_G1 = (LZMA_IS_REP_G0 + LZMA_NUM_STATES),
216 LZMA_IS_REP_G2 = (LZMA_IS_REP_G1 + LZMA_NUM_STATES),
217 LZMA_IS_REP_0_LONG = (LZMA_IS_REP_G2 + LZMA_NUM_STATES),
218 LZMA_POS_SLOT = (LZMA_IS_REP_0_LONG \
219 + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)),
220 LZMA_SPEC_POS = (LZMA_POS_SLOT \
221 + (LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)),
222 LZMA_ALIGN = (LZMA_SPEC_POS \
223 + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX),
224 LZMA_LEN_CODER = (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)),
225 LZMA_REP_LEN_CODER = (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS),
226 LZMA_LITERAL = (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS),
227 };
228
229
230 USE_DESKTOP(long long) int
231 unlzma(int src_fd, int dst_fd)
232 {
233 USE_DESKTOP(long long total_written = 0;)
234 lzma_header_t header;
235 int lc, pb, lp;
236 uint32_t pos_state_mask;
237 uint32_t literal_pos_mask;
238 uint32_t pos;
239 uint16_t *p;
240 uint16_t *prob;
241 uint16_t *prob_lit;
242 int num_bits;
243 int num_probs;
244 rc_t *rc;
245 int i, mi;
246 uint8_t *buffer;
247 uint8_t previous_byte = 0;
248 size_t buffer_pos = 0, global_pos = 0;
249 int len = 0;
250 int state = 0;
251 uint32_t rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
252
253 xread(src_fd, &header, sizeof(header));
254
255 if (header.pos >= (9 * 5 * 5))
256 bb_error_msg_and_die("bad header");
257 mi = header.pos / 9;
258 lc = header.pos % 9;
259 pb = mi / 5;
260 lp = mi % 5;
261 pos_state_mask = (1 << pb) - 1;
262 literal_pos_mask = (1 << lp) - 1;
263
264 header.dict_size = SWAP_LE32(header.dict_size);
265 header.dst_size = SWAP_LE64(header.dst_size);
266
267 if (header.dict_size == 0)
268 header.dict_size = 1;
269
270 buffer = xmalloc(MIN(header.dst_size, header.dict_size));
271
272 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
273 p = xmalloc(num_probs * sizeof(*p));
274 num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
275 for (i = 0; i < num_probs; i++)
276 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
277
278 rc = rc_init(src_fd); /*, RC_BUFFER_SIZE); */
279
280 while (global_pos + buffer_pos < header.dst_size) {
281 int pos_state = (buffer_pos + global_pos) & pos_state_mask;
282
283 prob =
284 p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state;
285 if (rc_is_bit_0(rc, prob)) {
286 mi = 1;
287 rc_update_bit_0(rc, prob);
288 prob = (p + LZMA_LITERAL + (LZMA_LIT_SIZE
289 * ((((buffer_pos + global_pos) & literal_pos_mask) << lc)
290 + (previous_byte >> (8 - lc)))));
291
292 if (state >= LZMA_NUM_LIT_STATES) {
293 int match_byte;
294
295 pos = buffer_pos - rep0;
296 while (pos >= header.dict_size)
297 pos += header.dict_size;
298 match_byte = buffer[pos];
299 do {
300 int bit;
301
302 match_byte <<= 1;
303 bit = match_byte & 0x100;
304 prob_lit = prob + 0x100 + bit + mi;
305 if (rc_get_bit(rc, prob_lit, &mi)) {
306 if (!bit)
307 break;
308 } else {
309 if (bit)
310 break;
311 }
312 } while (mi < 0x100);
313 }
314 while (mi < 0x100) {
315 prob_lit = prob + mi;
316 rc_get_bit(rc, prob_lit, &mi);
317 }
318 previous_byte = (uint8_t) mi;
319
320 buffer[buffer_pos++] = previous_byte;
321 if (buffer_pos == header.dict_size) {
322 buffer_pos = 0;
323 global_pos += header.dict_size;
324 if (full_write(dst_fd, buffer, header.dict_size) != header.dict_size)
325 goto bad;
326 USE_DESKTOP(total_written += header.dict_size;)
327 }
328 if (state < 4)
329 state = 0;
330 else if (state < 10)
331 state -= 3;
332 else
333 state -= 6;
334 } else {
335 int offset;
336 uint16_t *prob_len;
337
338 rc_update_bit_1(rc, prob);
339 prob = p + LZMA_IS_REP + state;
340 if (rc_is_bit_0(rc, prob)) {
341 rc_update_bit_0(rc, prob);
342 rep3 = rep2;
343 rep2 = rep1;
344 rep1 = rep0;
345 state = state < LZMA_NUM_LIT_STATES ? 0 : 3;
346 prob = p + LZMA_LEN_CODER;
347 } else {
348 rc_update_bit_1(rc, prob);
349 prob = p + LZMA_IS_REP_G0 + state;
350 if (rc_is_bit_0(rc, prob)) {
351 rc_update_bit_0(rc, prob);
352 prob = (p + LZMA_IS_REP_0_LONG
353 + (state << LZMA_NUM_POS_BITS_MAX) + pos_state);
354 if (rc_is_bit_0(rc, prob)) {
355 rc_update_bit_0(rc, prob);
356
357 state = state < LZMA_NUM_LIT_STATES ? 9 : 11;
358 pos = buffer_pos - rep0;
359 while (pos >= header.dict_size)
360 pos += header.dict_size;
361 previous_byte = buffer[pos];
362 buffer[buffer_pos++] = previous_byte;
363 if (buffer_pos == header.dict_size) {
364 buffer_pos = 0;
365 global_pos += header.dict_size;
366 if (full_write(dst_fd, buffer, header.dict_size) != header.dict_size)
367 goto bad;
368 USE_DESKTOP(total_written += header.dict_size;)
369 }
370 continue;
371 } else {
372 rc_update_bit_1(rc, prob);
373 }
374 } else {
375 uint32_t distance;
376
377 rc_update_bit_1(rc, prob);
378 prob = p + LZMA_IS_REP_G1 + state;
379 if (rc_is_bit_0(rc, prob)) {
380 rc_update_bit_0(rc, prob);
381 distance = rep1;
382 } else {
383 rc_update_bit_1(rc, prob);
384 prob = p + LZMA_IS_REP_G2 + state;
385 if (rc_is_bit_0(rc, prob)) {
386 rc_update_bit_0(rc, prob);
387 distance = rep2;
388 } else {
389 rc_update_bit_1(rc, prob);
390 distance = rep3;
391 rep3 = rep2;
392 }
393 rep2 = rep1;
394 }
395 rep1 = rep0;
396 rep0 = distance;
397 }
398 state = state < LZMA_NUM_LIT_STATES ? 8 : 11;
399 prob = p + LZMA_REP_LEN_CODER;
400 }
401
402 prob_len = prob + LZMA_LEN_CHOICE;
403 if (rc_is_bit_0(rc, prob_len)) {
404 rc_update_bit_0(rc, prob_len);
405 prob_len = (prob + LZMA_LEN_LOW
406 + (pos_state << LZMA_LEN_NUM_LOW_BITS));
407 offset = 0;
408 num_bits = LZMA_LEN_NUM_LOW_BITS;
409 } else {
410 rc_update_bit_1(rc, prob_len);
411 prob_len = prob + LZMA_LEN_CHOICE_2;
412 if (rc_is_bit_0(rc, prob_len)) {
413 rc_update_bit_0(rc, prob_len);
414 prob_len = (prob + LZMA_LEN_MID
415 + (pos_state << LZMA_LEN_NUM_MID_BITS));
416 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
417 num_bits = LZMA_LEN_NUM_MID_BITS;
418 } else {
419 rc_update_bit_1(rc, prob_len);
420 prob_len = prob + LZMA_LEN_HIGH;
421 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
422 + (1 << LZMA_LEN_NUM_MID_BITS));
423 num_bits = LZMA_LEN_NUM_HIGH_BITS;
424 }
425 }
426 rc_bit_tree_decode(rc, prob_len, num_bits, &len);
427 len += offset;
428
429 if (state < 4) {
430 int pos_slot;
431
432 state += LZMA_NUM_LIT_STATES;
433 prob =
434 p + LZMA_POS_SLOT +
435 ((len <
436 LZMA_NUM_LEN_TO_POS_STATES ? len :
437 LZMA_NUM_LEN_TO_POS_STATES - 1)
438 << LZMA_NUM_POS_SLOT_BITS);
439 rc_bit_tree_decode(rc, prob, LZMA_NUM_POS_SLOT_BITS,
440 &pos_slot);
441 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
442 num_bits = (pos_slot >> 1) - 1;
443 rep0 = 2 | (pos_slot & 1);
444 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
445 rep0 <<= num_bits;
446 prob = p + LZMA_SPEC_POS + rep0 - pos_slot - 1;
447 } else {
448 num_bits -= LZMA_NUM_ALIGN_BITS;
449 while (num_bits--)
450 rep0 = (rep0 << 1) | rc_direct_bit(rc);
451 prob = p + LZMA_ALIGN;
452 rep0 <<= LZMA_NUM_ALIGN_BITS;
453 num_bits = LZMA_NUM_ALIGN_BITS;
454 }
455 i = 1;
456 mi = 1;
457 while (num_bits--) {
458 if (rc_get_bit(rc, prob + mi, &mi))
459 rep0 |= i;
460 i <<= 1;
461 }
462 } else
463 rep0 = pos_slot;
464 if (++rep0 == 0)
465 break;
466 }
467
468 len += LZMA_MATCH_MIN_LEN;
469
470 do {
471 pos = buffer_pos - rep0;
472 while (pos >= header.dict_size)
473 pos += header.dict_size;
474 previous_byte = buffer[pos];
475 buffer[buffer_pos++] = previous_byte;
476 if (buffer_pos == header.dict_size) {
477 buffer_pos = 0;
478 global_pos += header.dict_size;
479 if (full_write(dst_fd, buffer, header.dict_size) != header.dict_size)
480 goto bad;
481 USE_DESKTOP(total_written += header.dict_size;)
482 }
483 len--;
484 } while (len != 0 && buffer_pos < header.dst_size);
485 }
486 }
487
488
489 if (full_write(dst_fd, buffer, buffer_pos) != buffer_pos) {
490 bad:
491 rc_free(rc);
492 return -1;
493 }
494 rc_free(rc);
495 USE_DESKTOP(total_written += buffer_pos;)
496 return USE_DESKTOP(total_written) + 0;
497 }