lemon-project-template-glpk
diff deps/glpk/src/zlib/deflate.c @ 9:33de93886c88
Import GLPK 4.47
author | Alpar Juttner <alpar@cs.elte.hu> |
---|---|
date | Sun, 06 Nov 2011 20:59:10 +0100 |
parents | |
children |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/deps/glpk/src/zlib/deflate.c Sun Nov 06 20:59:10 2011 +0100 1.3 @@ -0,0 +1,1834 @@ 1.4 +/* deflate.c -- compress data using the deflation algorithm 1.5 + * Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler 1.6 + * For conditions of distribution and use, see copyright notice in zlib.h 1.7 + */ 1.8 + 1.9 +/* 1.10 + * ALGORITHM 1.11 + * 1.12 + * The "deflation" process depends on being able to identify portions 1.13 + * of the input text which are identical to earlier input (within a 1.14 + * sliding window trailing behind the input currently being processed). 1.15 + * 1.16 + * The most straightforward technique turns out to be the fastest for 1.17 + * most input files: try all possible matches and select the longest. 1.18 + * The key feature of this algorithm is that insertions into the string 1.19 + * dictionary are very simple and thus fast, and deletions are avoided 1.20 + * completely. Insertions are performed at each input character, whereas 1.21 + * string matches are performed only when the previous match ends. So it 1.22 + * is preferable to spend more time in matches to allow very fast string 1.23 + * insertions and avoid deletions. The matching algorithm for small 1.24 + * strings is inspired from that of Rabin & Karp. A brute force approach 1.25 + * is used to find longer strings when a small match has been found. 1.26 + * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze 1.27 + * (by Leonid Broukhis). 1.28 + * A previous version of this file used a more sophisticated algorithm 1.29 + * (by Fiala and Greene) which is guaranteed to run in linear amortized 1.30 + * time, but has a larger average cost, uses more memory and is patented. 1.31 + * However the F&G algorithm may be faster for some highly redundant 1.32 + * files if the parameter max_chain_length (described below) is too large. 1.33 + * 1.34 + * ACKNOWLEDGEMENTS 1.35 + * 1.36 + * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and 1.37 + * I found it in 'freeze' written by Leonid Broukhis. 1.38 + * Thanks to many people for bug reports and testing. 1.39 + * 1.40 + * REFERENCES 1.41 + * 1.42 + * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". 1.43 + * Available in http://www.ietf.org/rfc/rfc1951.txt 1.44 + * 1.45 + * A description of the Rabin and Karp algorithm is given in the book 1.46 + * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. 1.47 + * 1.48 + * Fiala,E.R., and Greene,D.H. 1.49 + * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 1.50 + * 1.51 + */ 1.52 + 1.53 +/* @(#) $Id$ */ 1.54 + 1.55 +#include "deflate.h" 1.56 + 1.57 +const char deflate_copyright[] = 1.58 + " deflate 1.2.5 Copyright 1995-2010 Jean-loup Gailly and Mark Adler "; 1.59 +/* 1.60 + If you use the zlib library in a product, an acknowledgment is welcome 1.61 + in the documentation of your product. If for some reason you cannot 1.62 + include such an acknowledgment, I would appreciate that you keep this 1.63 + copyright string in the executable of your product. 1.64 + */ 1.65 + 1.66 +/* =========================================================================== 1.67 + * Function prototypes. 1.68 + */ 1.69 +typedef enum { 1.70 + need_more, /* block not completed, need more input or more output */ 1.71 + block_done, /* block flush performed */ 1.72 + finish_started, /* finish started, need only more output at next deflate */ 1.73 + finish_done /* finish done, accept no more input or output */ 1.74 +} block_state; 1.75 + 1.76 +typedef block_state (*compress_func) OF((deflate_state *s, int flush)); 1.77 +/* Compression function. Returns the block state after the call. */ 1.78 + 1.79 +local void fill_window OF((deflate_state *s)); 1.80 +local block_state deflate_stored OF((deflate_state *s, int flush)); 1.81 +local block_state deflate_fast OF((deflate_state *s, int flush)); 1.82 +#ifndef FASTEST 1.83 +local block_state deflate_slow OF((deflate_state *s, int flush)); 1.84 +#endif 1.85 +local block_state deflate_rle OF((deflate_state *s, int flush)); 1.86 +local block_state deflate_huff OF((deflate_state *s, int flush)); 1.87 +local void lm_init OF((deflate_state *s)); 1.88 +local void putShortMSB OF((deflate_state *s, uInt b)); 1.89 +local void flush_pending OF((z_streamp strm)); 1.90 +local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); 1.91 +#ifdef ASMV 1.92 + void match_init OF((void)); /* asm code initialization */ 1.93 + uInt longest_match OF((deflate_state *s, IPos cur_match)); 1.94 +#else 1.95 +local uInt longest_match OF((deflate_state *s, IPos cur_match)); 1.96 +#endif 1.97 + 1.98 +#ifdef DEBUG 1.99 +local void check_match OF((deflate_state *s, IPos start, IPos match, 1.100 + int length)); 1.101 +#endif 1.102 + 1.103 +/* =========================================================================== 1.104 + * Local data 1.105 + */ 1.106 + 1.107 +#define NIL 0 1.108 +/* Tail of hash chains */ 1.109 + 1.110 +#ifndef TOO_FAR 1.111 +# define TOO_FAR 4096 1.112 +#endif 1.113 +/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ 1.114 + 1.115 +/* Values for max_lazy_match, good_match and max_chain_length, depending on 1.116 + * the desired pack level (0..9). The values given below have been tuned to 1.117 + * exclude worst case performance for pathological files. Better values may be 1.118 + * found for specific files. 1.119 + */ 1.120 +typedef struct config_s { 1.121 + ush good_length; /* reduce lazy search above this match length */ 1.122 + ush max_lazy; /* do not perform lazy search above this match length */ 1.123 + ush nice_length; /* quit search above this match length */ 1.124 + ush max_chain; 1.125 + compress_func func; 1.126 +} config; 1.127 + 1.128 +#ifdef FASTEST 1.129 +local const config configuration_table[2] = { 1.130 +/* good lazy nice chain */ 1.131 +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 1.132 +/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ 1.133 +#else 1.134 +local const config configuration_table[10] = { 1.135 +/* good lazy nice chain */ 1.136 +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 1.137 +/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ 1.138 +/* 2 */ {4, 5, 16, 8, deflate_fast}, 1.139 +/* 3 */ {4, 6, 32, 32, deflate_fast}, 1.140 + 1.141 +/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ 1.142 +/* 5 */ {8, 16, 32, 32, deflate_slow}, 1.143 +/* 6 */ {8, 16, 128, 128, deflate_slow}, 1.144 +/* 7 */ {8, 32, 128, 256, deflate_slow}, 1.145 +/* 8 */ {32, 128, 258, 1024, deflate_slow}, 1.146 +/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ 1.147 +#endif 1.148 + 1.149 +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 1.150 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different 1.151 + * meaning. 1.152 + */ 1.153 + 1.154 +#define EQUAL 0 1.155 +/* result of memcmp for equal strings */ 1.156 + 1.157 +#ifndef NO_DUMMY_DECL 1.158 +struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ 1.159 +#endif 1.160 + 1.161 +/* =========================================================================== 1.162 + * Update a hash value with the given input byte 1.163 + * IN assertion: all calls to to UPDATE_HASH are made with consecutive 1.164 + * input characters, so that a running hash key can be computed from the 1.165 + * previous key instead of complete recalculation each time. 1.166 + */ 1.167 +#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) 1.168 + 1.169 + 1.170 +/* =========================================================================== 1.171 + * Insert string str in the dictionary and set match_head to the previous head 1.172 + * of the hash chain (the most recent string with same hash key). Return 1.173 + * the previous length of the hash chain. 1.174 + * If this file is compiled with -DFASTEST, the compression level is forced 1.175 + * to 1, and no hash chains are maintained. 1.176 + * IN assertion: all calls to to INSERT_STRING are made with consecutive 1.177 + * input characters and the first MIN_MATCH bytes of str are valid 1.178 + * (except for the last MIN_MATCH-1 bytes of the input file). 1.179 + */ 1.180 +#ifdef FASTEST 1.181 +#define INSERT_STRING(s, str, match_head) \ 1.182 + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 1.183 + match_head = s->head[s->ins_h], \ 1.184 + s->head[s->ins_h] = (Pos)(str)) 1.185 +#else 1.186 +#define INSERT_STRING(s, str, match_head) \ 1.187 + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 1.188 + match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ 1.189 + s->head[s->ins_h] = (Pos)(str)) 1.190 +#endif 1.191 + 1.192 +/* =========================================================================== 1.193 + * Initialize the hash table (avoiding 64K overflow for 16 bit systems). 1.194 + * prev[] will be initialized on the fly. 1.195 + */ 1.196 +#define CLEAR_HASH(s) \ 1.197 + s->head[s->hash_size-1] = NIL; \ 1.198 + zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); 1.199 + 1.200 +/* ========================================================================= */ 1.201 +int ZEXPORT deflateInit_(strm, level, version, stream_size) 1.202 + z_streamp strm; 1.203 + int level; 1.204 + const char *version; 1.205 + int stream_size; 1.206 +{ 1.207 + return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, 1.208 + Z_DEFAULT_STRATEGY, version, stream_size); 1.209 + /* To do: ignore strm->next_in if we use it as window */ 1.210 +} 1.211 + 1.212 +/* ========================================================================= */ 1.213 +int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, 1.214 + version, stream_size) 1.215 + z_streamp strm; 1.216 + int level; 1.217 + int method; 1.218 + int windowBits; 1.219 + int memLevel; 1.220 + int strategy; 1.221 + const char *version; 1.222 + int stream_size; 1.223 +{ 1.224 + deflate_state *s; 1.225 + int wrap = 1; 1.226 + static const char my_version[] = ZLIB_VERSION; 1.227 + 1.228 + ushf *overlay; 1.229 + /* We overlay pending_buf and d_buf+l_buf. This works since the average 1.230 + * output size for (length,distance) codes is <= 24 bits. 1.231 + */ 1.232 + 1.233 + if (version == Z_NULL || version[0] != my_version[0] || 1.234 + stream_size != sizeof(z_stream)) { 1.235 + return Z_VERSION_ERROR; 1.236 + } 1.237 + if (strm == Z_NULL) return Z_STREAM_ERROR; 1.238 + 1.239 + strm->msg = Z_NULL; 1.240 + if (strm->zalloc == (alloc_func)0) { 1.241 + strm->zalloc = zcalloc; 1.242 + strm->opaque = (voidpf)0; 1.243 + } 1.244 + if (strm->zfree == (free_func)0) strm->zfree = zcfree; 1.245 + 1.246 +#ifdef FASTEST 1.247 + if (level != 0) level = 1; 1.248 +#else 1.249 + if (level == Z_DEFAULT_COMPRESSION) level = 6; 1.250 +#endif 1.251 + 1.252 + if (windowBits < 0) { /* suppress zlib wrapper */ 1.253 + wrap = 0; 1.254 + windowBits = -windowBits; 1.255 + } 1.256 +#ifdef GZIP 1.257 + else if (windowBits > 15) { 1.258 + wrap = 2; /* write gzip wrapper instead */ 1.259 + windowBits -= 16; 1.260 + } 1.261 +#endif 1.262 + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || 1.263 + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || 1.264 + strategy < 0 || strategy > Z_FIXED) { 1.265 + return Z_STREAM_ERROR; 1.266 + } 1.267 + if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ 1.268 + s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); 1.269 + if (s == Z_NULL) return Z_MEM_ERROR; 1.270 + strm->state = (struct internal_state FAR *)s; 1.271 + s->strm = strm; 1.272 + 1.273 + s->wrap = wrap; 1.274 + s->gzhead = Z_NULL; 1.275 + s->w_bits = windowBits; 1.276 + s->w_size = 1 << s->w_bits; 1.277 + s->w_mask = s->w_size - 1; 1.278 + 1.279 + s->hash_bits = memLevel + 7; 1.280 + s->hash_size = 1 << s->hash_bits; 1.281 + s->hash_mask = s->hash_size - 1; 1.282 + s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); 1.283 + 1.284 + s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); 1.285 + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); 1.286 + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); 1.287 + 1.288 + s->high_water = 0; /* nothing written to s->window yet */ 1.289 + 1.290 + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ 1.291 + 1.292 + overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); 1.293 + s->pending_buf = (uchf *) overlay; 1.294 + s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); 1.295 + 1.296 + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || 1.297 + s->pending_buf == Z_NULL) { 1.298 + s->status = FINISH_STATE; 1.299 + strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); 1.300 + deflateEnd (strm); 1.301 + return Z_MEM_ERROR; 1.302 + } 1.303 + s->d_buf = overlay + s->lit_bufsize/sizeof(ush); 1.304 + s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; 1.305 + 1.306 + s->level = level; 1.307 + s->strategy = strategy; 1.308 + s->method = (Byte)method; 1.309 + 1.310 + return deflateReset(strm); 1.311 +} 1.312 + 1.313 +/* ========================================================================= */ 1.314 +int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) 1.315 + z_streamp strm; 1.316 + const Bytef *dictionary; 1.317 + uInt dictLength; 1.318 +{ 1.319 + deflate_state *s; 1.320 + uInt length = dictLength; 1.321 + uInt n; 1.322 + IPos hash_head = 0; 1.323 + 1.324 + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || 1.325 + strm->state->wrap == 2 || 1.326 + (strm->state->wrap == 1 && strm->state->status != INIT_STATE)) 1.327 + return Z_STREAM_ERROR; 1.328 + 1.329 + s = strm->state; 1.330 + if (s->wrap) 1.331 + strm->adler = adler32(strm->adler, dictionary, dictLength); 1.332 + 1.333 + if (length < MIN_MATCH) return Z_OK; 1.334 + if (length > s->w_size) { 1.335 + length = s->w_size; 1.336 + dictionary += dictLength - length; /* use the tail of the dictionary */ 1.337 + } 1.338 + zmemcpy(s->window, dictionary, length); 1.339 + s->strstart = length; 1.340 + s->block_start = (long)length; 1.341 + 1.342 + /* Insert all strings in the hash table (except for the last two bytes). 1.343 + * s->lookahead stays null, so s->ins_h will be recomputed at the next 1.344 + * call of fill_window. 1.345 + */ 1.346 + s->ins_h = s->window[0]; 1.347 + UPDATE_HASH(s, s->ins_h, s->window[1]); 1.348 + for (n = 0; n <= length - MIN_MATCH; n++) { 1.349 + INSERT_STRING(s, n, hash_head); 1.350 + } 1.351 + if (hash_head) hash_head = 0; /* to make compiler happy */ 1.352 + return Z_OK; 1.353 +} 1.354 + 1.355 +/* ========================================================================= */ 1.356 +int ZEXPORT deflateReset (strm) 1.357 + z_streamp strm; 1.358 +{ 1.359 + deflate_state *s; 1.360 + 1.361 + if (strm == Z_NULL || strm->state == Z_NULL || 1.362 + strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) { 1.363 + return Z_STREAM_ERROR; 1.364 + } 1.365 + 1.366 + strm->total_in = strm->total_out = 0; 1.367 + strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ 1.368 + strm->data_type = Z_UNKNOWN; 1.369 + 1.370 + s = (deflate_state *)strm->state; 1.371 + s->pending = 0; 1.372 + s->pending_out = s->pending_buf; 1.373 + 1.374 + if (s->wrap < 0) { 1.375 + s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ 1.376 + } 1.377 + s->status = s->wrap ? INIT_STATE : BUSY_STATE; 1.378 + strm->adler = 1.379 +#ifdef GZIP 1.380 + s->wrap == 2 ? crc32(0L, Z_NULL, 0) : 1.381 +#endif 1.382 + adler32(0L, Z_NULL, 0); 1.383 + s->last_flush = Z_NO_FLUSH; 1.384 + 1.385 + _tr_init(s); 1.386 + lm_init(s); 1.387 + 1.388 + return Z_OK; 1.389 +} 1.390 + 1.391 +/* ========================================================================= */ 1.392 +int ZEXPORT deflateSetHeader (strm, head) 1.393 + z_streamp strm; 1.394 + gz_headerp head; 1.395 +{ 1.396 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.397 + if (strm->state->wrap != 2) return Z_STREAM_ERROR; 1.398 + strm->state->gzhead = head; 1.399 + return Z_OK; 1.400 +} 1.401 + 1.402 +/* ========================================================================= */ 1.403 +int ZEXPORT deflatePrime (strm, bits, value) 1.404 + z_streamp strm; 1.405 + int bits; 1.406 + int value; 1.407 +{ 1.408 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.409 + strm->state->bi_valid = bits; 1.410 + strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); 1.411 + return Z_OK; 1.412 +} 1.413 + 1.414 +/* ========================================================================= */ 1.415 +int ZEXPORT deflateParams(strm, level, strategy) 1.416 + z_streamp strm; 1.417 + int level; 1.418 + int strategy; 1.419 +{ 1.420 + deflate_state *s; 1.421 + compress_func func; 1.422 + int err = Z_OK; 1.423 + 1.424 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.425 + s = strm->state; 1.426 + 1.427 +#ifdef FASTEST 1.428 + if (level != 0) level = 1; 1.429 +#else 1.430 + if (level == Z_DEFAULT_COMPRESSION) level = 6; 1.431 +#endif 1.432 + if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { 1.433 + return Z_STREAM_ERROR; 1.434 + } 1.435 + func = configuration_table[s->level].func; 1.436 + 1.437 + if ((strategy != s->strategy || func != configuration_table[level].func) && 1.438 + strm->total_in != 0) { 1.439 + /* Flush the last buffer: */ 1.440 + err = deflate(strm, Z_BLOCK); 1.441 + } 1.442 + if (s->level != level) { 1.443 + s->level = level; 1.444 + s->max_lazy_match = configuration_table[level].max_lazy; 1.445 + s->good_match = configuration_table[level].good_length; 1.446 + s->nice_match = configuration_table[level].nice_length; 1.447 + s->max_chain_length = configuration_table[level].max_chain; 1.448 + } 1.449 + s->strategy = strategy; 1.450 + return err; 1.451 +} 1.452 + 1.453 +/* ========================================================================= */ 1.454 +int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) 1.455 + z_streamp strm; 1.456 + int good_length; 1.457 + int max_lazy; 1.458 + int nice_length; 1.459 + int max_chain; 1.460 +{ 1.461 + deflate_state *s; 1.462 + 1.463 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.464 + s = strm->state; 1.465 + s->good_match = good_length; 1.466 + s->max_lazy_match = max_lazy; 1.467 + s->nice_match = nice_length; 1.468 + s->max_chain_length = max_chain; 1.469 + return Z_OK; 1.470 +} 1.471 + 1.472 +/* ========================================================================= 1.473 + * For the default windowBits of 15 and memLevel of 8, this function returns 1.474 + * a close to exact, as well as small, upper bound on the compressed size. 1.475 + * They are coded as constants here for a reason--if the #define's are 1.476 + * changed, then this function needs to be changed as well. The return 1.477 + * value for 15 and 8 only works for those exact settings. 1.478 + * 1.479 + * For any setting other than those defaults for windowBits and memLevel, 1.480 + * the value returned is a conservative worst case for the maximum expansion 1.481 + * resulting from using fixed blocks instead of stored blocks, which deflate 1.482 + * can emit on compressed data for some combinations of the parameters. 1.483 + * 1.484 + * This function could be more sophisticated to provide closer upper bounds for 1.485 + * every combination of windowBits and memLevel. But even the conservative 1.486 + * upper bound of about 14% expansion does not seem onerous for output buffer 1.487 + * allocation. 1.488 + */ 1.489 +uLong ZEXPORT deflateBound(strm, sourceLen) 1.490 + z_streamp strm; 1.491 + uLong sourceLen; 1.492 +{ 1.493 + deflate_state *s; 1.494 + uLong complen, wraplen; 1.495 + Bytef *str; 1.496 + 1.497 + /* conservative upper bound for compressed data */ 1.498 + complen = sourceLen + 1.499 + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; 1.500 + 1.501 + /* if can't get parameters, return conservative bound plus zlib wrapper */ 1.502 + if (strm == Z_NULL || strm->state == Z_NULL) 1.503 + return complen + 6; 1.504 + 1.505 + /* compute wrapper length */ 1.506 + s = strm->state; 1.507 + switch (s->wrap) { 1.508 + case 0: /* raw deflate */ 1.509 + wraplen = 0; 1.510 + break; 1.511 + case 1: /* zlib wrapper */ 1.512 + wraplen = 6 + (s->strstart ? 4 : 0); 1.513 + break; 1.514 + case 2: /* gzip wrapper */ 1.515 + wraplen = 18; 1.516 + if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ 1.517 + if (s->gzhead->extra != Z_NULL) 1.518 + wraplen += 2 + s->gzhead->extra_len; 1.519 + str = s->gzhead->name; 1.520 + if (str != Z_NULL) 1.521 + do { 1.522 + wraplen++; 1.523 + } while (*str++); 1.524 + str = s->gzhead->comment; 1.525 + if (str != Z_NULL) 1.526 + do { 1.527 + wraplen++; 1.528 + } while (*str++); 1.529 + if (s->gzhead->hcrc) 1.530 + wraplen += 2; 1.531 + } 1.532 + break; 1.533 + default: /* for compiler happiness */ 1.534 + wraplen = 6; 1.535 + } 1.536 + 1.537 + /* if not default parameters, return conservative bound */ 1.538 + if (s->w_bits != 15 || s->hash_bits != 8 + 7) 1.539 + return complen + wraplen; 1.540 + 1.541 + /* default settings: return tight bound for that case */ 1.542 + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + 1.543 + (sourceLen >> 25) + 13 - 6 + wraplen; 1.544 +} 1.545 + 1.546 +/* ========================================================================= 1.547 + * Put a short in the pending buffer. The 16-bit value is put in MSB order. 1.548 + * IN assertion: the stream state is correct and there is enough room in 1.549 + * pending_buf. 1.550 + */ 1.551 +local void putShortMSB (s, b) 1.552 + deflate_state *s; 1.553 + uInt b; 1.554 +{ 1.555 + put_byte(s, (Byte)(b >> 8)); 1.556 + put_byte(s, (Byte)(b & 0xff)); 1.557 +} 1.558 + 1.559 +/* ========================================================================= 1.560 + * Flush as much pending output as possible. All deflate() output goes 1.561 + * through this function so some applications may wish to modify it 1.562 + * to avoid allocating a large strm->next_out buffer and copying into it. 1.563 + * (See also read_buf()). 1.564 + */ 1.565 +local void flush_pending(strm) 1.566 + z_streamp strm; 1.567 +{ 1.568 + unsigned len = strm->state->pending; 1.569 + 1.570 + if (len > strm->avail_out) len = strm->avail_out; 1.571 + if (len == 0) return; 1.572 + 1.573 + zmemcpy(strm->next_out, strm->state->pending_out, len); 1.574 + strm->next_out += len; 1.575 + strm->state->pending_out += len; 1.576 + strm->total_out += len; 1.577 + strm->avail_out -= len; 1.578 + strm->state->pending -= len; 1.579 + if (strm->state->pending == 0) { 1.580 + strm->state->pending_out = strm->state->pending_buf; 1.581 + } 1.582 +} 1.583 + 1.584 +/* ========================================================================= */ 1.585 +int ZEXPORT deflate (strm, flush) 1.586 + z_streamp strm; 1.587 + int flush; 1.588 +{ 1.589 + int old_flush; /* value of flush param for previous deflate call */ 1.590 + deflate_state *s; 1.591 + 1.592 + if (strm == Z_NULL || strm->state == Z_NULL || 1.593 + flush > Z_BLOCK || flush < 0) { 1.594 + return Z_STREAM_ERROR; 1.595 + } 1.596 + s = strm->state; 1.597 + 1.598 + if (strm->next_out == Z_NULL || 1.599 + (strm->next_in == Z_NULL && strm->avail_in != 0) || 1.600 + (s->status == FINISH_STATE && flush != Z_FINISH)) { 1.601 + ERR_RETURN(strm, Z_STREAM_ERROR); 1.602 + } 1.603 + if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); 1.604 + 1.605 + s->strm = strm; /* just in case */ 1.606 + old_flush = s->last_flush; 1.607 + s->last_flush = flush; 1.608 + 1.609 + /* Write the header */ 1.610 + if (s->status == INIT_STATE) { 1.611 +#ifdef GZIP 1.612 + if (s->wrap == 2) { 1.613 + strm->adler = crc32(0L, Z_NULL, 0); 1.614 + put_byte(s, 31); 1.615 + put_byte(s, 139); 1.616 + put_byte(s, 8); 1.617 + if (s->gzhead == Z_NULL) { 1.618 + put_byte(s, 0); 1.619 + put_byte(s, 0); 1.620 + put_byte(s, 0); 1.621 + put_byte(s, 0); 1.622 + put_byte(s, 0); 1.623 + put_byte(s, s->level == 9 ? 2 : 1.624 + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 1.625 + 4 : 0)); 1.626 + put_byte(s, OS_CODE); 1.627 + s->status = BUSY_STATE; 1.628 + } 1.629 + else { 1.630 + put_byte(s, (s->gzhead->text ? 1 : 0) + 1.631 + (s->gzhead->hcrc ? 2 : 0) + 1.632 + (s->gzhead->extra == Z_NULL ? 0 : 4) + 1.633 + (s->gzhead->name == Z_NULL ? 0 : 8) + 1.634 + (s->gzhead->comment == Z_NULL ? 0 : 16) 1.635 + ); 1.636 + put_byte(s, (Byte)(s->gzhead->time & 0xff)); 1.637 + put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); 1.638 + put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); 1.639 + put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); 1.640 + put_byte(s, s->level == 9 ? 2 : 1.641 + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 1.642 + 4 : 0)); 1.643 + put_byte(s, s->gzhead->os & 0xff); 1.644 + if (s->gzhead->extra != Z_NULL) { 1.645 + put_byte(s, s->gzhead->extra_len & 0xff); 1.646 + put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); 1.647 + } 1.648 + if (s->gzhead->hcrc) 1.649 + strm->adler = crc32(strm->adler, s->pending_buf, 1.650 + s->pending); 1.651 + s->gzindex = 0; 1.652 + s->status = EXTRA_STATE; 1.653 + } 1.654 + } 1.655 + else 1.656 +#endif 1.657 + { 1.658 + uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; 1.659 + uInt level_flags; 1.660 + 1.661 + if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) 1.662 + level_flags = 0; 1.663 + else if (s->level < 6) 1.664 + level_flags = 1; 1.665 + else if (s->level == 6) 1.666 + level_flags = 2; 1.667 + else 1.668 + level_flags = 3; 1.669 + header |= (level_flags << 6); 1.670 + if (s->strstart != 0) header |= PRESET_DICT; 1.671 + header += 31 - (header % 31); 1.672 + 1.673 + s->status = BUSY_STATE; 1.674 + putShortMSB(s, header); 1.675 + 1.676 + /* Save the adler32 of the preset dictionary: */ 1.677 + if (s->strstart != 0) { 1.678 + putShortMSB(s, (uInt)(strm->adler >> 16)); 1.679 + putShortMSB(s, (uInt)(strm->adler & 0xffff)); 1.680 + } 1.681 + strm->adler = adler32(0L, Z_NULL, 0); 1.682 + } 1.683 + } 1.684 +#ifdef GZIP 1.685 + if (s->status == EXTRA_STATE) { 1.686 + if (s->gzhead->extra != Z_NULL) { 1.687 + uInt beg = s->pending; /* start of bytes to update crc */ 1.688 + 1.689 + while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { 1.690 + if (s->pending == s->pending_buf_size) { 1.691 + if (s->gzhead->hcrc && s->pending > beg) 1.692 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.693 + s->pending - beg); 1.694 + flush_pending(strm); 1.695 + beg = s->pending; 1.696 + if (s->pending == s->pending_buf_size) 1.697 + break; 1.698 + } 1.699 + put_byte(s, s->gzhead->extra[s->gzindex]); 1.700 + s->gzindex++; 1.701 + } 1.702 + if (s->gzhead->hcrc && s->pending > beg) 1.703 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.704 + s->pending - beg); 1.705 + if (s->gzindex == s->gzhead->extra_len) { 1.706 + s->gzindex = 0; 1.707 + s->status = NAME_STATE; 1.708 + } 1.709 + } 1.710 + else 1.711 + s->status = NAME_STATE; 1.712 + } 1.713 + if (s->status == NAME_STATE) { 1.714 + if (s->gzhead->name != Z_NULL) { 1.715 + uInt beg = s->pending; /* start of bytes to update crc */ 1.716 + int val; 1.717 + 1.718 + do { 1.719 + if (s->pending == s->pending_buf_size) { 1.720 + if (s->gzhead->hcrc && s->pending > beg) 1.721 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.722 + s->pending - beg); 1.723 + flush_pending(strm); 1.724 + beg = s->pending; 1.725 + if (s->pending == s->pending_buf_size) { 1.726 + val = 1; 1.727 + break; 1.728 + } 1.729 + } 1.730 + val = s->gzhead->name[s->gzindex++]; 1.731 + put_byte(s, val); 1.732 + } while (val != 0); 1.733 + if (s->gzhead->hcrc && s->pending > beg) 1.734 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.735 + s->pending - beg); 1.736 + if (val == 0) { 1.737 + s->gzindex = 0; 1.738 + s->status = COMMENT_STATE; 1.739 + } 1.740 + } 1.741 + else 1.742 + s->status = COMMENT_STATE; 1.743 + } 1.744 + if (s->status == COMMENT_STATE) { 1.745 + if (s->gzhead->comment != Z_NULL) { 1.746 + uInt beg = s->pending; /* start of bytes to update crc */ 1.747 + int val; 1.748 + 1.749 + do { 1.750 + if (s->pending == s->pending_buf_size) { 1.751 + if (s->gzhead->hcrc && s->pending > beg) 1.752 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.753 + s->pending - beg); 1.754 + flush_pending(strm); 1.755 + beg = s->pending; 1.756 + if (s->pending == s->pending_buf_size) { 1.757 + val = 1; 1.758 + break; 1.759 + } 1.760 + } 1.761 + val = s->gzhead->comment[s->gzindex++]; 1.762 + put_byte(s, val); 1.763 + } while (val != 0); 1.764 + if (s->gzhead->hcrc && s->pending > beg) 1.765 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.766 + s->pending - beg); 1.767 + if (val == 0) 1.768 + s->status = HCRC_STATE; 1.769 + } 1.770 + else 1.771 + s->status = HCRC_STATE; 1.772 + } 1.773 + if (s->status == HCRC_STATE) { 1.774 + if (s->gzhead->hcrc) { 1.775 + if (s->pending + 2 > s->pending_buf_size) 1.776 + flush_pending(strm); 1.777 + if (s->pending + 2 <= s->pending_buf_size) { 1.778 + put_byte(s, (Byte)(strm->adler & 0xff)); 1.779 + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 1.780 + strm->adler = crc32(0L, Z_NULL, 0); 1.781 + s->status = BUSY_STATE; 1.782 + } 1.783 + } 1.784 + else 1.785 + s->status = BUSY_STATE; 1.786 + } 1.787 +#endif 1.788 + 1.789 + /* Flush as much pending output as possible */ 1.790 + if (s->pending != 0) { 1.791 + flush_pending(strm); 1.792 + if (strm->avail_out == 0) { 1.793 + /* Since avail_out is 0, deflate will be called again with 1.794 + * more output space, but possibly with both pending and 1.795 + * avail_in equal to zero. There won't be anything to do, 1.796 + * but this is not an error situation so make sure we 1.797 + * return OK instead of BUF_ERROR at next call of deflate: 1.798 + */ 1.799 + s->last_flush = -1; 1.800 + return Z_OK; 1.801 + } 1.802 + 1.803 + /* Make sure there is something to do and avoid duplicate consecutive 1.804 + * flushes. For repeated and useless calls with Z_FINISH, we keep 1.805 + * returning Z_STREAM_END instead of Z_BUF_ERROR. 1.806 + */ 1.807 + } else if (strm->avail_in == 0 && flush <= old_flush && 1.808 + flush != Z_FINISH) { 1.809 + ERR_RETURN(strm, Z_BUF_ERROR); 1.810 + } 1.811 + 1.812 + /* User must not provide more input after the first FINISH: */ 1.813 + if (s->status == FINISH_STATE && strm->avail_in != 0) { 1.814 + ERR_RETURN(strm, Z_BUF_ERROR); 1.815 + } 1.816 + 1.817 + /* Start a new block or continue the current one. 1.818 + */ 1.819 + if (strm->avail_in != 0 || s->lookahead != 0 || 1.820 + (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { 1.821 + block_state bstate; 1.822 + 1.823 + bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : 1.824 + (s->strategy == Z_RLE ? deflate_rle(s, flush) : 1.825 + (*(configuration_table[s->level].func))(s, flush)); 1.826 + 1.827 + if (bstate == finish_started || bstate == finish_done) { 1.828 + s->status = FINISH_STATE; 1.829 + } 1.830 + if (bstate == need_more || bstate == finish_started) { 1.831 + if (strm->avail_out == 0) { 1.832 + s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ 1.833 + } 1.834 + return Z_OK; 1.835 + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call 1.836 + * of deflate should use the same flush parameter to make sure 1.837 + * that the flush is complete. So we don't have to output an 1.838 + * empty block here, this will be done at next call. This also 1.839 + * ensures that for a very small output buffer, we emit at most 1.840 + * one empty block. 1.841 + */ 1.842 + } 1.843 + if (bstate == block_done) { 1.844 + if (flush == Z_PARTIAL_FLUSH) { 1.845 + _tr_align(s); 1.846 + } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ 1.847 + _tr_stored_block(s, (char*)0, 0L, 0); 1.848 + /* For a full flush, this empty block will be recognized 1.849 + * as a special marker by inflate_sync(). 1.850 + */ 1.851 + if (flush == Z_FULL_FLUSH) { 1.852 + CLEAR_HASH(s); /* forget history */ 1.853 + if (s->lookahead == 0) { 1.854 + s->strstart = 0; 1.855 + s->block_start = 0L; 1.856 + } 1.857 + } 1.858 + } 1.859 + flush_pending(strm); 1.860 + if (strm->avail_out == 0) { 1.861 + s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ 1.862 + return Z_OK; 1.863 + } 1.864 + } 1.865 + } 1.866 + Assert(strm->avail_out > 0, "bug2"); 1.867 + 1.868 + if (flush != Z_FINISH) return Z_OK; 1.869 + if (s->wrap <= 0) return Z_STREAM_END; 1.870 + 1.871 + /* Write the trailer */ 1.872 +#ifdef GZIP 1.873 + if (s->wrap == 2) { 1.874 + put_byte(s, (Byte)(strm->adler & 0xff)); 1.875 + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 1.876 + put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); 1.877 + put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); 1.878 + put_byte(s, (Byte)(strm->total_in & 0xff)); 1.879 + put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); 1.880 + put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); 1.881 + put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); 1.882 + } 1.883 + else 1.884 +#endif 1.885 + { 1.886 + putShortMSB(s, (uInt)(strm->adler >> 16)); 1.887 + putShortMSB(s, (uInt)(strm->adler & 0xffff)); 1.888 + } 1.889 + flush_pending(strm); 1.890 + /* If avail_out is zero, the application will call deflate again 1.891 + * to flush the rest. 1.892 + */ 1.893 + if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ 1.894 + return s->pending != 0 ? Z_OK : Z_STREAM_END; 1.895 +} 1.896 + 1.897 +/* ========================================================================= */ 1.898 +int ZEXPORT deflateEnd (strm) 1.899 + z_streamp strm; 1.900 +{ 1.901 + int status; 1.902 + 1.903 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.904 + 1.905 + status = strm->state->status; 1.906 + if (status != INIT_STATE && 1.907 + status != EXTRA_STATE && 1.908 + status != NAME_STATE && 1.909 + status != COMMENT_STATE && 1.910 + status != HCRC_STATE && 1.911 + status != BUSY_STATE && 1.912 + status != FINISH_STATE) { 1.913 + return Z_STREAM_ERROR; 1.914 + } 1.915 + 1.916 + /* Deallocate in reverse order of allocations: */ 1.917 + TRY_FREE(strm, strm->state->pending_buf); 1.918 + TRY_FREE(strm, strm->state->head); 1.919 + TRY_FREE(strm, strm->state->prev); 1.920 + TRY_FREE(strm, strm->state->window); 1.921 + 1.922 + ZFREE(strm, strm->state); 1.923 + strm->state = Z_NULL; 1.924 + 1.925 + return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; 1.926 +} 1.927 + 1.928 +/* ========================================================================= 1.929 + * Copy the source state to the destination state. 1.930 + * To simplify the source, this is not supported for 16-bit MSDOS (which 1.931 + * doesn't have enough memory anyway to duplicate compression states). 1.932 + */ 1.933 +int ZEXPORT deflateCopy (dest, source) 1.934 + z_streamp dest; 1.935 + z_streamp source; 1.936 +{ 1.937 +#ifdef MAXSEG_64K 1.938 + return Z_STREAM_ERROR; 1.939 +#else 1.940 + deflate_state *ds; 1.941 + deflate_state *ss; 1.942 + ushf *overlay; 1.943 + 1.944 + 1.945 + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { 1.946 + return Z_STREAM_ERROR; 1.947 + } 1.948 + 1.949 + ss = source->state; 1.950 + 1.951 + zmemcpy(dest, source, sizeof(z_stream)); 1.952 + 1.953 + ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); 1.954 + if (ds == Z_NULL) return Z_MEM_ERROR; 1.955 + dest->state = (struct internal_state FAR *) ds; 1.956 + zmemcpy(ds, ss, sizeof(deflate_state)); 1.957 + ds->strm = dest; 1.958 + 1.959 + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); 1.960 + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); 1.961 + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); 1.962 + overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); 1.963 + ds->pending_buf = (uchf *) overlay; 1.964 + 1.965 + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || 1.966 + ds->pending_buf == Z_NULL) { 1.967 + deflateEnd (dest); 1.968 + return Z_MEM_ERROR; 1.969 + } 1.970 + /* following zmemcpy do not work for 16-bit MSDOS */ 1.971 + zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); 1.972 + zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); 1.973 + zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); 1.974 + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); 1.975 + 1.976 + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); 1.977 + ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); 1.978 + ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; 1.979 + 1.980 + ds->l_desc.dyn_tree = ds->dyn_ltree; 1.981 + ds->d_desc.dyn_tree = ds->dyn_dtree; 1.982 + ds->bl_desc.dyn_tree = ds->bl_tree; 1.983 + 1.984 + return Z_OK; 1.985 +#endif /* MAXSEG_64K */ 1.986 +} 1.987 + 1.988 +/* =========================================================================== 1.989 + * Read a new buffer from the current input stream, update the adler32 1.990 + * and total number of bytes read. All deflate() input goes through 1.991 + * this function so some applications may wish to modify it to avoid 1.992 + * allocating a large strm->next_in buffer and copying from it. 1.993 + * (See also flush_pending()). 1.994 + */ 1.995 +local int read_buf(strm, buf, size) 1.996 + z_streamp strm; 1.997 + Bytef *buf; 1.998 + unsigned size; 1.999 +{ 1.1000 + unsigned len = strm->avail_in; 1.1001 + 1.1002 + if (len > size) len = size; 1.1003 + if (len == 0) return 0; 1.1004 + 1.1005 + strm->avail_in -= len; 1.1006 + 1.1007 + if (strm->state->wrap == 1) { 1.1008 + strm->adler = adler32(strm->adler, strm->next_in, len); 1.1009 + } 1.1010 +#ifdef GZIP 1.1011 + else if (strm->state->wrap == 2) { 1.1012 + strm->adler = crc32(strm->adler, strm->next_in, len); 1.1013 + } 1.1014 +#endif 1.1015 + zmemcpy(buf, strm->next_in, len); 1.1016 + strm->next_in += len; 1.1017 + strm->total_in += len; 1.1018 + 1.1019 + return (int)len; 1.1020 +} 1.1021 + 1.1022 +/* =========================================================================== 1.1023 + * Initialize the "longest match" routines for a new zlib stream 1.1024 + */ 1.1025 +local void lm_init (s) 1.1026 + deflate_state *s; 1.1027 +{ 1.1028 + s->window_size = (ulg)2L*s->w_size; 1.1029 + 1.1030 + CLEAR_HASH(s); 1.1031 + 1.1032 + /* Set the default configuration parameters: 1.1033 + */ 1.1034 + s->max_lazy_match = configuration_table[s->level].max_lazy; 1.1035 + s->good_match = configuration_table[s->level].good_length; 1.1036 + s->nice_match = configuration_table[s->level].nice_length; 1.1037 + s->max_chain_length = configuration_table[s->level].max_chain; 1.1038 + 1.1039 + s->strstart = 0; 1.1040 + s->block_start = 0L; 1.1041 + s->lookahead = 0; 1.1042 + s->match_length = s->prev_length = MIN_MATCH-1; 1.1043 + s->match_available = 0; 1.1044 + s->ins_h = 0; 1.1045 +#ifndef FASTEST 1.1046 +#ifdef ASMV 1.1047 + match_init(); /* initialize the asm code */ 1.1048 +#endif 1.1049 +#endif 1.1050 +} 1.1051 + 1.1052 +#ifndef FASTEST 1.1053 +/* =========================================================================== 1.1054 + * Set match_start to the longest match starting at the given string and 1.1055 + * return its length. Matches shorter or equal to prev_length are discarded, 1.1056 + * in which case the result is equal to prev_length and match_start is 1.1057 + * garbage. 1.1058 + * IN assertions: cur_match is the head of the hash chain for the current 1.1059 + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 1.1060 + * OUT assertion: the match length is not greater than s->lookahead. 1.1061 + */ 1.1062 +#ifndef ASMV 1.1063 +/* For 80x86 and 680x0, an optimized version will be provided in match.asm or 1.1064 + * match.S. The code will be functionally equivalent. 1.1065 + */ 1.1066 +local uInt longest_match(s, cur_match) 1.1067 + deflate_state *s; 1.1068 + IPos cur_match; /* current match */ 1.1069 +{ 1.1070 + unsigned chain_length = s->max_chain_length;/* max hash chain length */ 1.1071 + register Bytef *scan = s->window + s->strstart; /* current string */ 1.1072 + register Bytef *match; /* matched string */ 1.1073 + register int len; /* length of current match */ 1.1074 + int best_len = s->prev_length; /* best match length so far */ 1.1075 + int nice_match = s->nice_match; /* stop if match long enough */ 1.1076 + IPos limit = s->strstart > (IPos)MAX_DIST(s) ? 1.1077 + s->strstart - (IPos)MAX_DIST(s) : NIL; 1.1078 + /* Stop when cur_match becomes <= limit. To simplify the code, 1.1079 + * we prevent matches with the string of window index 0. 1.1080 + */ 1.1081 + Posf *prev = s->prev; 1.1082 + uInt wmask = s->w_mask; 1.1083 + 1.1084 +#ifdef UNALIGNED_OK 1.1085 + /* Compare two bytes at a time. Note: this is not always beneficial. 1.1086 + * Try with and without -DUNALIGNED_OK to check. 1.1087 + */ 1.1088 + register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; 1.1089 + register ush scan_start = *(ushf*)scan; 1.1090 + register ush scan_end = *(ushf*)(scan+best_len-1); 1.1091 +#else 1.1092 + register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1.1093 + register Byte scan_end1 = scan[best_len-1]; 1.1094 + register Byte scan_end = scan[best_len]; 1.1095 +#endif 1.1096 + 1.1097 + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1.1098 + * It is easy to get rid of this optimization if necessary. 1.1099 + */ 1.1100 + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1.1101 + 1.1102 + /* Do not waste too much time if we already have a good match: */ 1.1103 + if (s->prev_length >= s->good_match) { 1.1104 + chain_length >>= 2; 1.1105 + } 1.1106 + /* Do not look for matches beyond the end of the input. This is necessary 1.1107 + * to make deflate deterministic. 1.1108 + */ 1.1109 + if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; 1.1110 + 1.1111 + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1.1112 + 1.1113 + do { 1.1114 + Assert(cur_match < s->strstart, "no future"); 1.1115 + match = s->window + cur_match; 1.1116 + 1.1117 + /* Skip to next match if the match length cannot increase 1.1118 + * or if the match length is less than 2. Note that the checks below 1.1119 + * for insufficient lookahead only occur occasionally for performance 1.1120 + * reasons. Therefore uninitialized memory will be accessed, and 1.1121 + * conditional jumps will be made that depend on those values. 1.1122 + * However the length of the match is limited to the lookahead, so 1.1123 + * the output of deflate is not affected by the uninitialized values. 1.1124 + */ 1.1125 +#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) 1.1126 + /* This code assumes sizeof(unsigned short) == 2. Do not use 1.1127 + * UNALIGNED_OK if your compiler uses a different size. 1.1128 + */ 1.1129 + if (*(ushf*)(match+best_len-1) != scan_end || 1.1130 + *(ushf*)match != scan_start) continue; 1.1131 + 1.1132 + /* It is not necessary to compare scan[2] and match[2] since they are 1.1133 + * always equal when the other bytes match, given that the hash keys 1.1134 + * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at 1.1135 + * strstart+3, +5, ... up to strstart+257. We check for insufficient 1.1136 + * lookahead only every 4th comparison; the 128th check will be made 1.1137 + * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is 1.1138 + * necessary to put more guard bytes at the end of the window, or 1.1139 + * to check more often for insufficient lookahead. 1.1140 + */ 1.1141 + Assert(scan[2] == match[2], "scan[2]?"); 1.1142 + scan++, match++; 1.1143 + do { 1.1144 + } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1145 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1146 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1147 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1148 + scan < strend); 1.1149 + /* The funny "do {}" generates better code on most compilers */ 1.1150 + 1.1151 + /* Here, scan <= window+strstart+257 */ 1.1152 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1.1153 + if (*scan == *match) scan++; 1.1154 + 1.1155 + len = (MAX_MATCH - 1) - (int)(strend-scan); 1.1156 + scan = strend - (MAX_MATCH-1); 1.1157 + 1.1158 +#else /* UNALIGNED_OK */ 1.1159 + 1.1160 + if (match[best_len] != scan_end || 1.1161 + match[best_len-1] != scan_end1 || 1.1162 + *match != *scan || 1.1163 + *++match != scan[1]) continue; 1.1164 + 1.1165 + /* The check at best_len-1 can be removed because it will be made 1.1166 + * again later. (This heuristic is not always a win.) 1.1167 + * It is not necessary to compare scan[2] and match[2] since they 1.1168 + * are always equal when the other bytes match, given that 1.1169 + * the hash keys are equal and that HASH_BITS >= 8. 1.1170 + */ 1.1171 + scan += 2, match++; 1.1172 + Assert(*scan == *match, "match[2]?"); 1.1173 + 1.1174 + /* We check for insufficient lookahead only every 8th comparison; 1.1175 + * the 256th check will be made at strstart+258. 1.1176 + */ 1.1177 + do { 1.1178 + } while (*++scan == *++match && *++scan == *++match && 1.1179 + *++scan == *++match && *++scan == *++match && 1.1180 + *++scan == *++match && *++scan == *++match && 1.1181 + *++scan == *++match && *++scan == *++match && 1.1182 + scan < strend); 1.1183 + 1.1184 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1.1185 + 1.1186 + len = MAX_MATCH - (int)(strend - scan); 1.1187 + scan = strend - MAX_MATCH; 1.1188 + 1.1189 +#endif /* UNALIGNED_OK */ 1.1190 + 1.1191 + if (len > best_len) { 1.1192 + s->match_start = cur_match; 1.1193 + best_len = len; 1.1194 + if (len >= nice_match) break; 1.1195 +#ifdef UNALIGNED_OK 1.1196 + scan_end = *(ushf*)(scan+best_len-1); 1.1197 +#else 1.1198 + scan_end1 = scan[best_len-1]; 1.1199 + scan_end = scan[best_len]; 1.1200 +#endif 1.1201 + } 1.1202 + } while ((cur_match = prev[cur_match & wmask]) > limit 1.1203 + && --chain_length != 0); 1.1204 + 1.1205 + if ((uInt)best_len <= s->lookahead) return (uInt)best_len; 1.1206 + return s->lookahead; 1.1207 +} 1.1208 +#endif /* ASMV */ 1.1209 + 1.1210 +#else /* FASTEST */ 1.1211 + 1.1212 +/* --------------------------------------------------------------------------- 1.1213 + * Optimized version for FASTEST only 1.1214 + */ 1.1215 +local uInt longest_match(s, cur_match) 1.1216 + deflate_state *s; 1.1217 + IPos cur_match; /* current match */ 1.1218 +{ 1.1219 + register Bytef *scan = s->window + s->strstart; /* current string */ 1.1220 + register Bytef *match; /* matched string */ 1.1221 + register int len; /* length of current match */ 1.1222 + register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1.1223 + 1.1224 + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1.1225 + * It is easy to get rid of this optimization if necessary. 1.1226 + */ 1.1227 + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1.1228 + 1.1229 + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1.1230 + 1.1231 + Assert(cur_match < s->strstart, "no future"); 1.1232 + 1.1233 + match = s->window + cur_match; 1.1234 + 1.1235 + /* Return failure if the match length is less than 2: 1.1236 + */ 1.1237 + if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; 1.1238 + 1.1239 + /* The check at best_len-1 can be removed because it will be made 1.1240 + * again later. (This heuristic is not always a win.) 1.1241 + * It is not necessary to compare scan[2] and match[2] since they 1.1242 + * are always equal when the other bytes match, given that 1.1243 + * the hash keys are equal and that HASH_BITS >= 8. 1.1244 + */ 1.1245 + scan += 2, match += 2; 1.1246 + Assert(*scan == *match, "match[2]?"); 1.1247 + 1.1248 + /* We check for insufficient lookahead only every 8th comparison; 1.1249 + * the 256th check will be made at strstart+258. 1.1250 + */ 1.1251 + do { 1.1252 + } while (*++scan == *++match && *++scan == *++match && 1.1253 + *++scan == *++match && *++scan == *++match && 1.1254 + *++scan == *++match && *++scan == *++match && 1.1255 + *++scan == *++match && *++scan == *++match && 1.1256 + scan < strend); 1.1257 + 1.1258 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1.1259 + 1.1260 + len = MAX_MATCH - (int)(strend - scan); 1.1261 + 1.1262 + if (len < MIN_MATCH) return MIN_MATCH - 1; 1.1263 + 1.1264 + s->match_start = cur_match; 1.1265 + return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; 1.1266 +} 1.1267 + 1.1268 +#endif /* FASTEST */ 1.1269 + 1.1270 +#ifdef DEBUG 1.1271 +/* =========================================================================== 1.1272 + * Check that the match at match_start is indeed a match. 1.1273 + */ 1.1274 +local void check_match(s, start, match, length) 1.1275 + deflate_state *s; 1.1276 + IPos start, match; 1.1277 + int length; 1.1278 +{ 1.1279 + /* check that the match is indeed a match */ 1.1280 + if (zmemcmp(s->window + match, 1.1281 + s->window + start, length) != EQUAL) { 1.1282 + fprintf(stderr, " start %u, match %u, length %d\n", 1.1283 + start, match, length); 1.1284 + do { 1.1285 + fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); 1.1286 + } while (--length != 0); 1.1287 + z_error("invalid match"); 1.1288 + } 1.1289 + if (z_verbose > 1) { 1.1290 + fprintf(stderr,"\\[%d,%d]", start-match, length); 1.1291 + do { putc(s->window[start++], stderr); } while (--length != 0); 1.1292 + } 1.1293 +} 1.1294 +#else 1.1295 +# define check_match(s, start, match, length) 1.1296 +#endif /* DEBUG */ 1.1297 + 1.1298 +/* =========================================================================== 1.1299 + * Fill the window when the lookahead becomes insufficient. 1.1300 + * Updates strstart and lookahead. 1.1301 + * 1.1302 + * IN assertion: lookahead < MIN_LOOKAHEAD 1.1303 + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD 1.1304 + * At least one byte has been read, or avail_in == 0; reads are 1.1305 + * performed for at least two bytes (required for the zip translate_eol 1.1306 + * option -- not supported here). 1.1307 + */ 1.1308 +local void fill_window(s) 1.1309 + deflate_state *s; 1.1310 +{ 1.1311 + register unsigned n, m; 1.1312 + register Posf *p; 1.1313 + unsigned more; /* Amount of free space at the end of the window. */ 1.1314 + uInt wsize = s->w_size; 1.1315 + 1.1316 + do { 1.1317 + more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); 1.1318 + 1.1319 + /* Deal with !@#$% 64K limit: */ 1.1320 + if (sizeof(int) <= 2) { 1.1321 + if (more == 0 && s->strstart == 0 && s->lookahead == 0) { 1.1322 + more = wsize; 1.1323 + 1.1324 + } else if (more == (unsigned)(-1)) { 1.1325 + /* Very unlikely, but possible on 16 bit machine if 1.1326 + * strstart == 0 && lookahead == 1 (input done a byte at time) 1.1327 + */ 1.1328 + more--; 1.1329 + } 1.1330 + } 1.1331 + 1.1332 + /* If the window is almost full and there is insufficient lookahead, 1.1333 + * move the upper half to the lower one to make room in the upper half. 1.1334 + */ 1.1335 + if (s->strstart >= wsize+MAX_DIST(s)) { 1.1336 + 1.1337 + zmemcpy(s->window, s->window+wsize, (unsigned)wsize); 1.1338 + s->match_start -= wsize; 1.1339 + s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ 1.1340 + s->block_start -= (long) wsize; 1.1341 + 1.1342 + /* Slide the hash table (could be avoided with 32 bit values 1.1343 + at the expense of memory usage). We slide even when level == 0 1.1344 + to keep the hash table consistent if we switch back to level > 0 1.1345 + later. (Using level 0 permanently is not an optimal usage of 1.1346 + zlib, so we don't care about this pathological case.) 1.1347 + */ 1.1348 + n = s->hash_size; 1.1349 + p = &s->head[n]; 1.1350 + do { 1.1351 + m = *--p; 1.1352 + *p = (Pos)(m >= wsize ? m-wsize : NIL); 1.1353 + } while (--n); 1.1354 + 1.1355 + n = wsize; 1.1356 +#ifndef FASTEST 1.1357 + p = &s->prev[n]; 1.1358 + do { 1.1359 + m = *--p; 1.1360 + *p = (Pos)(m >= wsize ? m-wsize : NIL); 1.1361 + /* If n is not on any hash chain, prev[n] is garbage but 1.1362 + * its value will never be used. 1.1363 + */ 1.1364 + } while (--n); 1.1365 +#endif 1.1366 + more += wsize; 1.1367 + } 1.1368 + if (s->strm->avail_in == 0) return; 1.1369 + 1.1370 + /* If there was no sliding: 1.1371 + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && 1.1372 + * more == window_size - lookahead - strstart 1.1373 + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) 1.1374 + * => more >= window_size - 2*WSIZE + 2 1.1375 + * In the BIG_MEM or MMAP case (not yet supported), 1.1376 + * window_size == input_size + MIN_LOOKAHEAD && 1.1377 + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. 1.1378 + * Otherwise, window_size == 2*WSIZE so more >= 2. 1.1379 + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. 1.1380 + */ 1.1381 + Assert(more >= 2, "more < 2"); 1.1382 + 1.1383 + n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); 1.1384 + s->lookahead += n; 1.1385 + 1.1386 + /* Initialize the hash value now that we have some input: */ 1.1387 + if (s->lookahead >= MIN_MATCH) { 1.1388 + s->ins_h = s->window[s->strstart]; 1.1389 + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1.1390 +#if MIN_MATCH != 3 1.1391 + Call UPDATE_HASH() MIN_MATCH-3 more times 1.1392 +#endif 1.1393 + } 1.1394 + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, 1.1395 + * but this is not important since only literal bytes will be emitted. 1.1396 + */ 1.1397 + 1.1398 + } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); 1.1399 + 1.1400 + /* If the WIN_INIT bytes after the end of the current data have never been 1.1401 + * written, then zero those bytes in order to avoid memory check reports of 1.1402 + * the use of uninitialized (or uninitialised as Julian writes) bytes by 1.1403 + * the longest match routines. Update the high water mark for the next 1.1404 + * time through here. WIN_INIT is set to MAX_MATCH since the longest match 1.1405 + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. 1.1406 + */ 1.1407 + if (s->high_water < s->window_size) { 1.1408 + ulg curr = s->strstart + (ulg)(s->lookahead); 1.1409 + ulg init; 1.1410 + 1.1411 + if (s->high_water < curr) { 1.1412 + /* Previous high water mark below current data -- zero WIN_INIT 1.1413 + * bytes or up to end of window, whichever is less. 1.1414 + */ 1.1415 + init = s->window_size - curr; 1.1416 + if (init > WIN_INIT) 1.1417 + init = WIN_INIT; 1.1418 + zmemzero(s->window + curr, (unsigned)init); 1.1419 + s->high_water = curr + init; 1.1420 + } 1.1421 + else if (s->high_water < (ulg)curr + WIN_INIT) { 1.1422 + /* High water mark at or above current data, but below current data 1.1423 + * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up 1.1424 + * to end of window, whichever is less. 1.1425 + */ 1.1426 + init = (ulg)curr + WIN_INIT - s->high_water; 1.1427 + if (init > s->window_size - s->high_water) 1.1428 + init = s->window_size - s->high_water; 1.1429 + zmemzero(s->window + s->high_water, (unsigned)init); 1.1430 + s->high_water += init; 1.1431 + } 1.1432 + } 1.1433 +} 1.1434 + 1.1435 +/* =========================================================================== 1.1436 + * Flush the current block, with given end-of-file flag. 1.1437 + * IN assertion: strstart is set to the end of the current match. 1.1438 + */ 1.1439 +#define FLUSH_BLOCK_ONLY(s, last) { \ 1.1440 + _tr_flush_block(s, (s->block_start >= 0L ? \ 1.1441 + (charf *)&s->window[(unsigned)s->block_start] : \ 1.1442 + (charf *)Z_NULL), \ 1.1443 + (ulg)((long)s->strstart - s->block_start), \ 1.1444 + (last)); \ 1.1445 + s->block_start = s->strstart; \ 1.1446 + flush_pending(s->strm); \ 1.1447 + Tracev((stderr,"[FLUSH]")); \ 1.1448 +} 1.1449 + 1.1450 +/* Same but force premature exit if necessary. */ 1.1451 +#define FLUSH_BLOCK(s, last) { \ 1.1452 + FLUSH_BLOCK_ONLY(s, last); \ 1.1453 + if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ 1.1454 +} 1.1455 + 1.1456 +/* =========================================================================== 1.1457 + * Copy without compression as much as possible from the input stream, return 1.1458 + * the current block state. 1.1459 + * This function does not insert new strings in the dictionary since 1.1460 + * uncompressible data is probably not useful. This function is used 1.1461 + * only for the level=0 compression option. 1.1462 + * NOTE: this function should be optimized to avoid extra copying from 1.1463 + * window to pending_buf. 1.1464 + */ 1.1465 +local block_state deflate_stored(s, flush) 1.1466 + deflate_state *s; 1.1467 + int flush; 1.1468 +{ 1.1469 + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited 1.1470 + * to pending_buf_size, and each stored block has a 5 byte header: 1.1471 + */ 1.1472 + ulg max_block_size = 0xffff; 1.1473 + ulg max_start; 1.1474 + 1.1475 + if (max_block_size > s->pending_buf_size - 5) { 1.1476 + max_block_size = s->pending_buf_size - 5; 1.1477 + } 1.1478 + 1.1479 + /* Copy as much as possible from input to output: */ 1.1480 + for (;;) { 1.1481 + /* Fill the window as much as possible: */ 1.1482 + if (s->lookahead <= 1) { 1.1483 + 1.1484 + Assert(s->strstart < s->w_size+MAX_DIST(s) || 1.1485 + s->block_start >= (long)s->w_size, "slide too late"); 1.1486 + 1.1487 + fill_window(s); 1.1488 + if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; 1.1489 + 1.1490 + if (s->lookahead == 0) break; /* flush the current block */ 1.1491 + } 1.1492 + Assert(s->block_start >= 0L, "block gone"); 1.1493 + 1.1494 + s->strstart += s->lookahead; 1.1495 + s->lookahead = 0; 1.1496 + 1.1497 + /* Emit a stored block if pending_buf will be full: */ 1.1498 + max_start = s->block_start + max_block_size; 1.1499 + if (s->strstart == 0 || (ulg)s->strstart >= max_start) { 1.1500 + /* strstart == 0 is possible when wraparound on 16-bit machine */ 1.1501 + s->lookahead = (uInt)(s->strstart - max_start); 1.1502 + s->strstart = (uInt)max_start; 1.1503 + FLUSH_BLOCK(s, 0); 1.1504 + } 1.1505 + /* Flush if we may have to slide, otherwise block_start may become 1.1506 + * negative and the data will be gone: 1.1507 + */ 1.1508 + if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { 1.1509 + FLUSH_BLOCK(s, 0); 1.1510 + } 1.1511 + } 1.1512 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1513 + return flush == Z_FINISH ? finish_done : block_done; 1.1514 +} 1.1515 + 1.1516 +/* =========================================================================== 1.1517 + * Compress as much as possible from the input stream, return the current 1.1518 + * block state. 1.1519 + * This function does not perform lazy evaluation of matches and inserts 1.1520 + * new strings in the dictionary only for unmatched strings or for short 1.1521 + * matches. It is used only for the fast compression options. 1.1522 + */ 1.1523 +local block_state deflate_fast(s, flush) 1.1524 + deflate_state *s; 1.1525 + int flush; 1.1526 +{ 1.1527 + IPos hash_head; /* head of the hash chain */ 1.1528 + int bflush; /* set if current block must be flushed */ 1.1529 + 1.1530 + for (;;) { 1.1531 + /* Make sure that we always have enough lookahead, except 1.1532 + * at the end of the input file. We need MAX_MATCH bytes 1.1533 + * for the next match, plus MIN_MATCH bytes to insert the 1.1534 + * string following the next match. 1.1535 + */ 1.1536 + if (s->lookahead < MIN_LOOKAHEAD) { 1.1537 + fill_window(s); 1.1538 + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1.1539 + return need_more; 1.1540 + } 1.1541 + if (s->lookahead == 0) break; /* flush the current block */ 1.1542 + } 1.1543 + 1.1544 + /* Insert the string window[strstart .. strstart+2] in the 1.1545 + * dictionary, and set hash_head to the head of the hash chain: 1.1546 + */ 1.1547 + hash_head = NIL; 1.1548 + if (s->lookahead >= MIN_MATCH) { 1.1549 + INSERT_STRING(s, s->strstart, hash_head); 1.1550 + } 1.1551 + 1.1552 + /* Find the longest match, discarding those <= prev_length. 1.1553 + * At this point we have always match_length < MIN_MATCH 1.1554 + */ 1.1555 + if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { 1.1556 + /* To simplify the code, we prevent matches with the string 1.1557 + * of window index 0 (in particular we have to avoid a match 1.1558 + * of the string with itself at the start of the input file). 1.1559 + */ 1.1560 + s->match_length = longest_match (s, hash_head); 1.1561 + /* longest_match() sets match_start */ 1.1562 + } 1.1563 + if (s->match_length >= MIN_MATCH) { 1.1564 + check_match(s, s->strstart, s->match_start, s->match_length); 1.1565 + 1.1566 + _tr_tally_dist(s, s->strstart - s->match_start, 1.1567 + s->match_length - MIN_MATCH, bflush); 1.1568 + 1.1569 + s->lookahead -= s->match_length; 1.1570 + 1.1571 + /* Insert new strings in the hash table only if the match length 1.1572 + * is not too large. This saves time but degrades compression. 1.1573 + */ 1.1574 +#ifndef FASTEST 1.1575 + if (s->match_length <= s->max_insert_length && 1.1576 + s->lookahead >= MIN_MATCH) { 1.1577 + s->match_length--; /* string at strstart already in table */ 1.1578 + do { 1.1579 + s->strstart++; 1.1580 + INSERT_STRING(s, s->strstart, hash_head); 1.1581 + /* strstart never exceeds WSIZE-MAX_MATCH, so there are 1.1582 + * always MIN_MATCH bytes ahead. 1.1583 + */ 1.1584 + } while (--s->match_length != 0); 1.1585 + s->strstart++; 1.1586 + } else 1.1587 +#endif 1.1588 + { 1.1589 + s->strstart += s->match_length; 1.1590 + s->match_length = 0; 1.1591 + s->ins_h = s->window[s->strstart]; 1.1592 + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1.1593 +#if MIN_MATCH != 3 1.1594 + Call UPDATE_HASH() MIN_MATCH-3 more times 1.1595 +#endif 1.1596 + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not 1.1597 + * matter since it will be recomputed at next deflate call. 1.1598 + */ 1.1599 + } 1.1600 + } else { 1.1601 + /* No match, output a literal byte */ 1.1602 + Tracevv((stderr,"%c", s->window[s->strstart])); 1.1603 + _tr_tally_lit (s, s->window[s->strstart], bflush); 1.1604 + s->lookahead--; 1.1605 + s->strstart++; 1.1606 + } 1.1607 + if (bflush) FLUSH_BLOCK(s, 0); 1.1608 + } 1.1609 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1610 + return flush == Z_FINISH ? finish_done : block_done; 1.1611 +} 1.1612 + 1.1613 +#ifndef FASTEST 1.1614 +/* =========================================================================== 1.1615 + * Same as above, but achieves better compression. We use a lazy 1.1616 + * evaluation for matches: a match is finally adopted only if there is 1.1617 + * no better match at the next window position. 1.1618 + */ 1.1619 +local block_state deflate_slow(s, flush) 1.1620 + deflate_state *s; 1.1621 + int flush; 1.1622 +{ 1.1623 + IPos hash_head; /* head of hash chain */ 1.1624 + int bflush; /* set if current block must be flushed */ 1.1625 + 1.1626 + /* Process the input block. */ 1.1627 + for (;;) { 1.1628 + /* Make sure that we always have enough lookahead, except 1.1629 + * at the end of the input file. We need MAX_MATCH bytes 1.1630 + * for the next match, plus MIN_MATCH bytes to insert the 1.1631 + * string following the next match. 1.1632 + */ 1.1633 + if (s->lookahead < MIN_LOOKAHEAD) { 1.1634 + fill_window(s); 1.1635 + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1.1636 + return need_more; 1.1637 + } 1.1638 + if (s->lookahead == 0) break; /* flush the current block */ 1.1639 + } 1.1640 + 1.1641 + /* Insert the string window[strstart .. strstart+2] in the 1.1642 + * dictionary, and set hash_head to the head of the hash chain: 1.1643 + */ 1.1644 + hash_head = NIL; 1.1645 + if (s->lookahead >= MIN_MATCH) { 1.1646 + INSERT_STRING(s, s->strstart, hash_head); 1.1647 + } 1.1648 + 1.1649 + /* Find the longest match, discarding those <= prev_length. 1.1650 + */ 1.1651 + s->prev_length = s->match_length, s->prev_match = s->match_start; 1.1652 + s->match_length = MIN_MATCH-1; 1.1653 + 1.1654 + if (hash_head != NIL && s->prev_length < s->max_lazy_match && 1.1655 + s->strstart - hash_head <= MAX_DIST(s)) { 1.1656 + /* To simplify the code, we prevent matches with the string 1.1657 + * of window index 0 (in particular we have to avoid a match 1.1658 + * of the string with itself at the start of the input file). 1.1659 + */ 1.1660 + s->match_length = longest_match (s, hash_head); 1.1661 + /* longest_match() sets match_start */ 1.1662 + 1.1663 + if (s->match_length <= 5 && (s->strategy == Z_FILTERED 1.1664 +#if TOO_FAR <= 32767 1.1665 + || (s->match_length == MIN_MATCH && 1.1666 + s->strstart - s->match_start > TOO_FAR) 1.1667 +#endif 1.1668 + )) { 1.1669 + 1.1670 + /* If prev_match is also MIN_MATCH, match_start is garbage 1.1671 + * but we will ignore the current match anyway. 1.1672 + */ 1.1673 + s->match_length = MIN_MATCH-1; 1.1674 + } 1.1675 + } 1.1676 + /* If there was a match at the previous step and the current 1.1677 + * match is not better, output the previous match: 1.1678 + */ 1.1679 + if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { 1.1680 + uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; 1.1681 + /* Do not insert strings in hash table beyond this. */ 1.1682 + 1.1683 + check_match(s, s->strstart-1, s->prev_match, s->prev_length); 1.1684 + 1.1685 + _tr_tally_dist(s, s->strstart -1 - s->prev_match, 1.1686 + s->prev_length - MIN_MATCH, bflush); 1.1687 + 1.1688 + /* Insert in hash table all strings up to the end of the match. 1.1689 + * strstart-1 and strstart are already inserted. If there is not 1.1690 + * enough lookahead, the last two strings are not inserted in 1.1691 + * the hash table. 1.1692 + */ 1.1693 + s->lookahead -= s->prev_length-1; 1.1694 + s->prev_length -= 2; 1.1695 + do { 1.1696 + if (++s->strstart <= max_insert) { 1.1697 + INSERT_STRING(s, s->strstart, hash_head); 1.1698 + } 1.1699 + } while (--s->prev_length != 0); 1.1700 + s->match_available = 0; 1.1701 + s->match_length = MIN_MATCH-1; 1.1702 + s->strstart++; 1.1703 + 1.1704 + if (bflush) FLUSH_BLOCK(s, 0); 1.1705 + 1.1706 + } else if (s->match_available) { 1.1707 + /* If there was no match at the previous position, output a 1.1708 + * single literal. If there was a match but the current match 1.1709 + * is longer, truncate the previous match to a single literal. 1.1710 + */ 1.1711 + Tracevv((stderr,"%c", s->window[s->strstart-1])); 1.1712 + _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1.1713 + if (bflush) { 1.1714 + FLUSH_BLOCK_ONLY(s, 0); 1.1715 + } 1.1716 + s->strstart++; 1.1717 + s->lookahead--; 1.1718 + if (s->strm->avail_out == 0) return need_more; 1.1719 + } else { 1.1720 + /* There is no previous match to compare with, wait for 1.1721 + * the next step to decide. 1.1722 + */ 1.1723 + s->match_available = 1; 1.1724 + s->strstart++; 1.1725 + s->lookahead--; 1.1726 + } 1.1727 + } 1.1728 + Assert (flush != Z_NO_FLUSH, "no flush?"); 1.1729 + if (s->match_available) { 1.1730 + Tracevv((stderr,"%c", s->window[s->strstart-1])); 1.1731 + _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1.1732 + s->match_available = 0; 1.1733 + } 1.1734 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1735 + return flush == Z_FINISH ? finish_done : block_done; 1.1736 +} 1.1737 +#endif /* FASTEST */ 1.1738 + 1.1739 +/* =========================================================================== 1.1740 + * For Z_RLE, simply look for runs of bytes, generate matches only of distance 1.1741 + * one. Do not maintain a hash table. (It will be regenerated if this run of 1.1742 + * deflate switches away from Z_RLE.) 1.1743 + */ 1.1744 +local block_state deflate_rle(s, flush) 1.1745 + deflate_state *s; 1.1746 + int flush; 1.1747 +{ 1.1748 + int bflush; /* set if current block must be flushed */ 1.1749 + uInt prev; /* byte at distance one to match */ 1.1750 + Bytef *scan, *strend; /* scan goes up to strend for length of run */ 1.1751 + 1.1752 + for (;;) { 1.1753 + /* Make sure that we always have enough lookahead, except 1.1754 + * at the end of the input file. We need MAX_MATCH bytes 1.1755 + * for the longest encodable run. 1.1756 + */ 1.1757 + if (s->lookahead < MAX_MATCH) { 1.1758 + fill_window(s); 1.1759 + if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) { 1.1760 + return need_more; 1.1761 + } 1.1762 + if (s->lookahead == 0) break; /* flush the current block */ 1.1763 + } 1.1764 + 1.1765 + /* See how many times the previous byte repeats */ 1.1766 + s->match_length = 0; 1.1767 + if (s->lookahead >= MIN_MATCH && s->strstart > 0) { 1.1768 + scan = s->window + s->strstart - 1; 1.1769 + prev = *scan; 1.1770 + if (prev == *++scan && prev == *++scan && prev == *++scan) { 1.1771 + strend = s->window + s->strstart + MAX_MATCH; 1.1772 + do { 1.1773 + } while (prev == *++scan && prev == *++scan && 1.1774 + prev == *++scan && prev == *++scan && 1.1775 + prev == *++scan && prev == *++scan && 1.1776 + prev == *++scan && prev == *++scan && 1.1777 + scan < strend); 1.1778 + s->match_length = MAX_MATCH - (int)(strend - scan); 1.1779 + if (s->match_length > s->lookahead) 1.1780 + s->match_length = s->lookahead; 1.1781 + } 1.1782 + } 1.1783 + 1.1784 + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ 1.1785 + if (s->match_length >= MIN_MATCH) { 1.1786 + check_match(s, s->strstart, s->strstart - 1, s->match_length); 1.1787 + 1.1788 + _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); 1.1789 + 1.1790 + s->lookahead -= s->match_length; 1.1791 + s->strstart += s->match_length; 1.1792 + s->match_length = 0; 1.1793 + } else { 1.1794 + /* No match, output a literal byte */ 1.1795 + Tracevv((stderr,"%c", s->window[s->strstart])); 1.1796 + _tr_tally_lit (s, s->window[s->strstart], bflush); 1.1797 + s->lookahead--; 1.1798 + s->strstart++; 1.1799 + } 1.1800 + if (bflush) FLUSH_BLOCK(s, 0); 1.1801 + } 1.1802 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1803 + return flush == Z_FINISH ? finish_done : block_done; 1.1804 +} 1.1805 + 1.1806 +/* =========================================================================== 1.1807 + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. 1.1808 + * (It will be regenerated if this run of deflate switches away from Huffman.) 1.1809 + */ 1.1810 +local block_state deflate_huff(s, flush) 1.1811 + deflate_state *s; 1.1812 + int flush; 1.1813 +{ 1.1814 + int bflush; /* set if current block must be flushed */ 1.1815 + 1.1816 + for (;;) { 1.1817 + /* Make sure that we have a literal to write. */ 1.1818 + if (s->lookahead == 0) { 1.1819 + fill_window(s); 1.1820 + if (s->lookahead == 0) { 1.1821 + if (flush == Z_NO_FLUSH) 1.1822 + return need_more; 1.1823 + break; /* flush the current block */ 1.1824 + } 1.1825 + } 1.1826 + 1.1827 + /* Output a literal byte */ 1.1828 + s->match_length = 0; 1.1829 + Tracevv((stderr,"%c", s->window[s->strstart])); 1.1830 + _tr_tally_lit (s, s->window[s->strstart], bflush); 1.1831 + s->lookahead--; 1.1832 + s->strstart++; 1.1833 + if (bflush) FLUSH_BLOCK(s, 0); 1.1834 + } 1.1835 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1836 + return flush == Z_FINISH ? finish_done : block_done; 1.1837 +}