Subversion Repositories autosfx

Rev

Blame | Last modification | View Log | RSS feed

  1. #include "stdafx.h"
  2. #pragma hdrstop
  3. #include "dz_errs.h"
  4.  
  5. #undef _DZ_FILE_
  6. #define _DZ_FILE_ DZ_UNXPLODE_CPP
  7. /* explode.c -- put in the public domain by Mark Adler
  8.  * version c14, 22 November 1995
  9.  * This version modified by Chris Vleghert and Eric W. Engler
  10.  * for BCB/Delphi Zip, Jun 18, 2000.
  11.  
  12.   Copyright (c) 1990-2007 Info-ZIP.  All rights reserved.
  13.  
  14.   See the accompanying file LICENSE, version 2007-Mar-4 or later
  15.   (the contents of which are also included in zip.h) for terms of use.
  16.   If, for some reason, all these files are missing, the Info-ZIP license
  17.   also may be found at:  ftp://ftp.info-zip.org/pub/infozip/license.html
  18.  
  19.   parts Copyright (C) 1997 Mike White, Eric W. Engler
  20. ************************************************************************
  21.  Copyright (C) 2009, 2010  by Russell J. Peters, Roger Aelbrecht
  22.  
  23.    This file is part of TZipMaster Version 1.9.
  24.  
  25.     TZipMaster is free software: you can redistribute it and/or modify
  26.     it under the terms of the GNU Lesser General Public License as published by
  27.     the Free Software Foundation, either version 3 of the License, or
  28.     (at your option) any later version.
  29.  
  30.     TZipMaster is distributed in the hope that it will be useful,
  31.     but WITHOUT ANY WARRANTY; without even the implied warranty of
  32.     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  33.     GNU Lesser General Public License for more details.
  34.  
  35.     You should have received a copy of the GNU Lesser General Public License
  36.     along with TZipMaster.  If not, see <http://www.gnu.org/licenses/>.
  37.  
  38.     contact: problems@delphizip.org (include ZipMaster in the subject).
  39.     updates: http://www.delphizip.org
  40.     DelphiZip maillist subscribe at http://www.freelists.org/list/delphizip
  41. ************************************************************************/
  42.  
  43. /* Explode imploded (PKZIP method 6 compressed) data.  This compression
  44.  * method searches for as much of the current string of bytes (up to a length
  45.  * of ~320) in the previous 4K or 8K bytes.  If it doesn't find any matches
  46.  * (of at least length 2 or 3), it codes the next byte.  Otherwise, it codes
  47.  * the length of the matched string and its distance backwards from the
  48.  * current position.  Single bytes ("literals") are preceded by a one (a
  49.  * single bit) and are either uncoded (the eight bits go directly into the
  50.  * compressed stream for a total of nine bits) or Huffman coded with a
  51.  * supplied literal code tree.  If literals are coded, then the minimum match
  52.  * length is three, otherwise it is two.
  53.  *
  54.  * There are therefore four kinds of imploded streams: 8K search with coded
  55.  * literals (min match = 3), 4K search with coded literals (min match = 3),
  56.  * 8K with uncoded literals (min match = 2), and 4K with uncoded literals
  57.  * (min match = 2).  The kind of stream is identified in two bits of a
  58.  * general purpose bit flag that is outside of the compressed stream.
  59.  *
  60.  * Distance-length pairs for matched strings are preceded by a zero bit (to
  61.  * distinguish them from literals) and are always coded.  The distance comes
  62.  * first and is either the low six (4K) or low seven (8K) bits of the
  63.  * distance (uncoded), followed by the high six bits of the distance coded.
  64.  * Then the length is six bits coded (0..63 + min match length), and if the
  65.  * maximum such length is coded, then it's followed by another eight bits
  66.  * (uncoded) to be added to the coded length.  This gives a match length
  67.  * range of 2..320 or 3..321 bytes.
  68.  *
  69.  * The literal, length, and distance codes are all represented in a slightly
  70.  * compressed form themselves.  What is sent are the lengths of the codes for
  71.  * each value, which is sufficient to construct the codes.  Each byte of the
  72.  * code representation is the code length (the low four bits representing
  73.  * 1..16), and the number of values sequentially with that length (the high
  74.  * four bits also representing 1..16).  There are 256 literal code values (if
  75.  * literals are coded), 64 length code values, and 64 distance code values,
  76.  * in that order at the beginning of the compressed stream.  Each set of code
  77.  * values is preceded (redundantly) with a byte indicating how many bytes are
  78.  * in the code description that follows, in the range 1..256.
  79.  *
  80.  * The codes themselves are decoded using tables made by huft_build() from
  81.  * the bit lengths.  That routine and its comments are in the inflate.c
  82.  * module.
  83.  */
  84.  
  85. #include "UnzOp.h"
  86.  
  87. #  define wsize UWSIZE
  88.  
  89. /* The implode algorithm uses a sliding 4K or 8K byte window on the
  90. * uncompressed stream to find repeated byte strings.  This is implemented
  91. * here as a circular buffer.  The index is updated simply by incrementing
  92. * and then and'ing with 0x0fff (4K-1) or 0x1fff (8K-1).  Here, the 32K
  93. * buffer of inflate is used, and it works just as well to always have
  94. * a 32K circular buffer, so the index is anded with 0x7fff.  This is
  95. * done to allow the window to also be used as the output buffer. */
  96.  
  97. /* This must be supplied in an external module useable like "uch slide[8192];"
  98. * or "uch *slide;", where the latter would be malloc'ed.  In unzip, slide[]
  99. * is actually a 32K area for use by inflate, which uses a 32K sliding window. */
  100.  
  101. /* Tables for length and distance */
  102. static const ush cplen2[] =
  103.   {
  104.     2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
  105.     35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
  106.     59, 60, 61, 62, 63, 64, 65
  107.   };
  108. static const ush cplen3[] =
  109.   {
  110.     3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
  111.     36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
  112.     60, 61, 62, 63, 64, 65, 66
  113.   };
  114. static const uch extra[] =
  115.   {
  116.     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  117.     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8
  118.   };
  119. static const ush cpdist4[] =
  120.   {
  121.     1, 65, 129, 193, 257, 321, 385, 449, 513, 577, 641, 705, 769, 833, 897, 961, 1025, 1089, 1153, 1217, 1281, 1345, 1409, 1473,
  122.     1537, 1601, 1665, 1729, 1793, 1857, 1921, 1985, 2049, 2113, 2177, 2241, 2305, 2369, 2433, 2497, 2561, 2625, 2689, 2753,
  123.     2817, 2881, 2945, 3009, 3073, 3137, 3201, 3265, 3329, 3393, 3457, 3521, 3585, 3649, 3713, 3777, 3841, 3905, 3969, 4033
  124.   };
  125. static const ush cpdist8[] =
  126.   {
  127.     1, 129, 257, 385, 513, 641, 769, 897, 1025, 1153, 1281, 1409, 1537, 1665, 1793, 1921, 2049, 2177, 2305, 2433, 2561, 2689,
  128.     2817, 2945, 3073, 3201, 3329, 3457, 3585, 3713, 3841, 3969, 4097, 4225, 4353, 4481, 4609, 4737, 4865, 4993, 5121, 5249,
  129.     5377, 5505, 5633, 5761, 5889, 6017, 6145, 6273, 6401, 6529, 6657, 6785, 6913, 7041, 7169, 7297, 7425,
  130.     7553, 7681, 7809, 7937, 8065
  131.   };
  132.  
  133. /* Macros for inflate() bit peeking and grabbing.
  134. * The usage is:
  135. *
  136. *      NEEDBITS(j)
  137. *      x = b & mask_bits[j];
  138. *      DUMPBITS(j)
  139. *
  140. * where NEEDBITS makes sure that b has at least j bits in it, and
  141. * DUMPBITS removes the bits from b.  The macros use the variable k
  142. * for the number of bits in b.  Normally, b and k are register
  143. * variables for speed. */
  144. #define NEEDBITS(n) {while(k < (n)){b |= ((ulg)NEXTBYTE) << k;k += 8;}}
  145. #define DUMPBITS(n) {b >>= (n);k -= (n);}
  146.  
  147. //#define NEXTBYTE  (--fincnt >= 0 ? (int)(*finptr++) : readbyte())
  148.  
  149. /* ===========================================================================
  150. * Get the bit lengths for a code representation from the compressed
  151. * stream.  If get_tree() returns 4, then there is an error in the data. * Otherwise zero is returned.
  152. *l :: Bit lengths. n :: Number expected. */
  153. int UnzInf::get_tree(unsigned * l, unsigned n)
  154. {
  155.   unsigned i;
  156.   /* bytes remaining in list */
  157.   unsigned k;
  158.   /* lengths entered */
  159.   unsigned j;
  160.   /* number of codes */
  161.   unsigned b;
  162.   /* bit length for those codes */
  163.  
  164.   /* get bit lengths */
  165.   i = NEXTBYTE + 1;
  166.   /* length/count pairs to read */
  167.   k = 0;
  168.   /* next code */
  169.   do
  170.   {
  171.     b = ((j = NEXTBYTE) & 0xf) + 1;
  172.     /* bits in code (1..16) */
  173.     j = ((j & 0xf0) >> 4) + 1;
  174.     /* codes with those bits (1..16) */
  175.     if (k + j > n)
  176.       return 4;
  177.     /* don't overflow l[] */
  178.     do
  179.     {
  180.       l[k++] = b;
  181.     }
  182.     while (--j);
  183.   }
  184.   while (--i);
  185.   return k != n ? 4 : 0;
  186.   /* should have read n of them */
  187. }
  188.  
  189.  
  190. /* ===========================================================================
  191. * Decompress the imploded data using coded literals and an 8K sliding *  window.
  192. *tb, *tl, *td :: Literal, length, and distance tables. bb, bl, bd   :: Number of bits decoded by those */
  193. int UnzInf::explode_lit8(struct huft * tb, struct huft * tl, struct huft * td, int bb, int bl, int bd)
  194. {
  195. //  long s;
  196.   ZInt64 s;
  197.   /* bytes to decompress */
  198.   register unsigned e;
  199.   /* table entry flag/number of extra bits */
  200.   unsigned n, d;
  201.   /* length and index for copy */
  202.   unsigned w;
  203.   /* current window position */
  204.   struct huft * t;
  205.   /* pointer to table entry */
  206.   unsigned mb, ml, md;
  207.   /* masks for bb, bl, and bd bits */
  208.   register ulg b;
  209.   /* bit buffer */
  210.   register unsigned k;
  211.   /* number of bits in bit buffer */
  212.   unsigned u;
  213.   /* true if unflushed */
  214.  
  215.   /* explode the coded data */
  216.   b = k = w = 0;
  217.   /* initialize bit buffer, window */
  218.   u = 1;
  219.   /* buffer unflushed */
  220.   mb = mask_bits[bb];
  221.   /* precompute masks for speed */
  222.   ml = mask_bits[bl];
  223.   md = mask_bits[bd];
  224.   s = fucsize;
  225.   while (s > 0)
  226.   {
  227.     /* do until ucsize bytes uncompressed */
  228.     NEEDBITS(1)
  229.     if (b & 1)
  230.     {
  231.       /* then literal--decode it */
  232.       DUMPBITS(1) s--;
  233.       NEEDBITS((unsigned)bb) /* get coded literal */
  234.       if ((e = (t = tb + ((~(unsigned)b) & mb))->e) > 16)
  235.         do
  236.         {
  237.           if (e == 99)
  238.             return 1;
  239.           DUMPBITS(t->b) e -= 16;
  240.           NEEDBITS(e)
  241.         }
  242.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  243.       DUMPBITS(t->b) Slide[w++] = (uch)t->v.n;
  244.       if (w == wsize)
  245.       {
  246.         flush(Slide, w, 0);
  247.         w = u = 0;
  248.       }
  249.     }
  250.     else
  251.     {
  252.       /* else distance/length */
  253.       DUMPBITS(1) NEEDBITS(7) /* get distance low bits */
  254.       d = (unsigned)b & 0x7f;
  255.       DUMPBITS(7) NEEDBITS((unsigned)bd) /* get coded distance high bits */
  256.       if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16)
  257.         do
  258.         {
  259.           if (e == 99)
  260.             return 1;
  261.           DUMPBITS(t->b) e -= 16;
  262.           NEEDBITS(e)
  263.         }
  264.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  265.       DUMPBITS(t->b) d = w - d - t->v.n;
  266.       /* construct offset */
  267.       NEEDBITS((unsigned)bl) /* get coded length */
  268.       if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16)
  269.         do
  270.         {
  271.           if (e == 99)
  272.             return 1;
  273.           DUMPBITS(t->b) e -= 16;
  274.           NEEDBITS(e)
  275.         }
  276.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  277.       DUMPBITS(t->b) n = t->v.n;
  278.       if (e)
  279.       {
  280.         /* get length extra bits */
  281.         NEEDBITS(8) n += (unsigned)b & 0xff;
  282.         DUMPBITS(8)
  283.       }
  284.       /* do the copy */
  285.       s -= n;
  286.       do
  287.       {
  288. //# ifdef USE_STRM_OUTPUT
  289. //              if (fredirect_data) /* &= w/ wsize not needed and wrong if redirect */
  290. //                n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e);
  291. //              else
  292. //#endif
  293.  
  294.           n -= (e = (e = wsize - ((d &= wsize - 1) > w ? d : w)) > n ? n : e);
  295.         if (u && w <= d)
  296.         {
  297.           ZeroMemory(Slide + w, e);
  298.           w += e;
  299.           d += e;
  300.         }
  301.         else
  302. //#ifndef NOMEMCPY
  303.                   if (w - d >= e)
  304.           {
  305.             /* (this test assumes unsigned comparison) */
  306.             memcpy(Slide + w, Slide + d, e);
  307.             w += e;
  308.             d += e;
  309.           }
  310.           else /* do it slow to avoid memcpy() overlap */
  311. //#                               endif /* !NOMEMCPY */
  312.             do
  313.             {
  314.               Slide[w++] = Slide[d++];
  315.             }
  316.             while (--e);
  317.         if (w == wsize)
  318.         {
  319.           flush(Slide, w, 0);
  320.           w = u = 0;
  321.         }
  322.       }
  323.       while (n);
  324.     }
  325.   }
  326.   /* flush out Slide */
  327.   flush(Slide, w, 0);
  328.   /* should have read csize bytes, but */
  329.   /* sometimes read one too many:  k >> 3 compensates */
  330.   if (fcsize + fincnt + (k >> 3))
  331.   {
  332.     fused_csize = flrec.csize - fcsize - fincnt - (k >> 3);
  333.     return 5;
  334.   }
  335.   return 0;
  336. }
  337.  
  338.  
  339. /* ===========================================================================
  340. * Decompress the imploded data using coded literals and a 4K sliding * window.
  341. *tb, *tl, *td :: Literal, length, and distance tables. bb, bl, bd   :: Number of bits decoded by those. */
  342. int UnzInf::explode_lit4(struct huft * tb, struct huft * tl, struct huft * td, int bb, int bl, int bd)
  343. {
  344. //  long s;
  345.   ZInt64 s;
  346.   /* bytes to decompress */
  347.   register unsigned e;
  348.   /* table entry flag/number of extra bits */
  349.   unsigned n, d;
  350.   /* length and index for copy */
  351.   unsigned w;
  352.   /* current window position */
  353.   struct huft * t;
  354.   /* pointer to table entry */
  355.   unsigned mb, ml, md;
  356.   /* masks for bb, bl, and bd bits */
  357.   register ulg b;
  358.   /* bit buffer */
  359.   register unsigned k;
  360.   /* number of bits in bit buffer */
  361.   unsigned u;
  362.   /* true if unflushed */
  363.  
  364.   /* explode the coded data */
  365.   b = k = w = 0;
  366.   /* initialize bit buffer, window */
  367.   u = 1;
  368.   /* buffer unflushed */
  369.   mb = mask_bits[bb];
  370.   /* precompute masks for speed */
  371.   ml = mask_bits[bl];
  372.   md = mask_bits[bd];
  373.   s = fucsize;
  374.   while (s > 0)
  375.   {
  376.     /* do until ucsize bytes uncompressed */
  377.     NEEDBITS(1)
  378.     if (b & 1)
  379.     {
  380.       /* then literal--decode it */
  381.       DUMPBITS(1) s--;
  382.       NEEDBITS((unsigned)bb) /* get coded literal */
  383.       if ((e = (t = tb + ((~(unsigned)b) & mb))->e) > 16)
  384.         do
  385.         {
  386.           if (e == 99)
  387.             return 1;
  388.           DUMPBITS(t->b) e -= 16;
  389.           NEEDBITS(e)
  390.         }
  391.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  392.       DUMPBITS(t->b) Slide[w++] = (uch)t->v.n;
  393.       if (w == wsize)
  394.       {
  395.         flush(Slide, w, 0);
  396.         w = u = 0;
  397.       }
  398.     }
  399.     else
  400.     {
  401.       /* else distance/length */
  402.       DUMPBITS(1) NEEDBITS(6) /* get distance low bits */
  403.       d = (unsigned)b & 0x3f;
  404.       DUMPBITS(6) NEEDBITS((unsigned)bd) /* get coded distance high bits */
  405.       if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16)
  406.         do
  407.         {
  408.           if (e == 99)
  409.             return 1;
  410.           DUMPBITS(t->b) e -= 16;
  411.           NEEDBITS(e)
  412.         }
  413.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  414.       DUMPBITS(t->b) d = w - d - t->v.n;
  415.       /* construct offset */
  416.       NEEDBITS((unsigned)bl) /* get coded length */
  417.       if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16)
  418.         do
  419.         {
  420.           if (e == 99)
  421.             return 1;
  422.           DUMPBITS(t->b) e -= 16;
  423.           NEEDBITS(e)
  424.         }
  425.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  426.       DUMPBITS(t->b) n = t->v.n;
  427.       if (e)
  428.       {
  429.         /* get length extra bits */
  430.         NEEDBITS(8) n += (unsigned)b & 0xff;
  431.         DUMPBITS(8)
  432.       }
  433.       /* do the copy */
  434.       s -= n;
  435.       do
  436.       {
  437. //#ifdef USE_STRM_OUTPUT
  438. //              if (fredirect_data) /* &= w/ wsize not needed and wrong if redirect */
  439. //                n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e);
  440. //              else
  441. //#endif
  442.           n -= (e = (e = wsize - ((d &= wsize - 1) > w ? d : w)) > n ? n : e);
  443.         if (u && w <= d)
  444.         {
  445.           ZeroMemory(Slide + w, e);
  446.           w += e;
  447.           d += e;
  448.         }
  449.                 else
  450. //#ifndef NOMEMCPY
  451.                   if (w - d >= e)
  452.                   {
  453.                         /* (this test assumes unsigned comparison) */
  454.                         memcpy(Slide + w, Slide + d, e);
  455.                         w += e;
  456.                         d += e;
  457.                   }
  458.                   else /* do it slow to avoid memcpy() overlap */
  459. //#endif /* !NOMEMCPY */
  460.             do
  461.             {
  462.               Slide[w++] = Slide[d++];
  463.             }
  464.             while (--e);
  465.         if (w == wsize)
  466.         {
  467.           flush(Slide, w, 0);
  468.           w = u = 0;
  469.         }
  470.       }
  471.       while (n);
  472.     }
  473.   }
  474.   /* flush out Slide */
  475.   flush(Slide, w, 0);
  476.   /* should have read csize bytes, but */
  477.   /* sometimes read one too many:  k>>3 compensates */
  478.   if (fcsize + fincnt + (k >> 3))
  479.   {
  480.     fused_csize = flrec.csize - fcsize - fincnt - (k >> 3);
  481.     return 5;
  482.   }
  483.   return 0;
  484. }
  485.  
  486.  
  487. /* ===========================================================================
  488. * Decompress the imploded data using uncoded literals and an 8K sliding * window.
  489. *tl, *td :: Length and distance decoder tables. bl, bd  :: Number of bits decoded by tl[] and td[]. */
  490. int UnzInf::explode_nolit8(struct huft * tl, struct huft * td, int bl, int bd)
  491. {
  492. //  long s;
  493.   ZInt64 s;
  494.   /* bytes to decompress */
  495.   register unsigned e;
  496.   /* table entry flag/number of extra bits */
  497.   unsigned n, d;
  498.   /* length and index for copy */
  499.   unsigned w;
  500.   /* current window position */
  501.   struct huft * t;
  502.   /* pointer to table entry */
  503.   unsigned ml, md;
  504.   /* masks for bl and bd bits */
  505.   register ulg b;
  506.   /* bit buffer */
  507.   register unsigned k;
  508.   /* number of bits in bit buffer */
  509.   unsigned u;
  510.   /* true if unflushed */
  511.  
  512.   /* explode the coded data */
  513.   b = k = w = 0;
  514.   /* initialize bit buffer, window */
  515.   u = 1;
  516.   /* buffer unflushed */
  517.   ml = mask_bits[bl];
  518.   /* precompute masks for speed */
  519.   md = mask_bits[bd];
  520.   s = fucsize;
  521.   while (s > 0)
  522.   {
  523.     /* do until ucsize bytes uncompressed */
  524.     NEEDBITS(1)
  525.     if (b & 1)
  526.     {
  527.       /* then literal--get eight bits */
  528.       DUMPBITS(1) s--;
  529.       NEEDBITS(8) Slide[w++] = (uch)b;
  530.       if (w == wsize)
  531.       {
  532.         flush(Slide, w, 0);
  533.         w = u = 0;
  534.       }
  535.       DUMPBITS(8)
  536.     }
  537.     else
  538.     {
  539.       /* else distance/length */
  540.       DUMPBITS(1) NEEDBITS(7) /* get distance low bits */
  541.       d = (unsigned)b & 0x7f;
  542.       DUMPBITS(7) NEEDBITS((unsigned)bd) /* get coded distance high bits */
  543.       if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16)
  544.         do
  545.         {
  546.           if (e == 99)
  547.             return 1;
  548.           DUMPBITS(t->b) e -= 16;
  549.           NEEDBITS(e)
  550.         }
  551.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  552.       DUMPBITS(t->b) d = w - d - t->v.n;
  553.       /* construct offset */
  554.       NEEDBITS((unsigned)bl) /* get coded length */
  555.       if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16)
  556.         do
  557.         {
  558.           if (e == 99)
  559.             return 1;
  560.           DUMPBITS(t->b) e -= 16;
  561.           NEEDBITS(e)
  562.         }
  563.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  564.       DUMPBITS(t->b) n = t->v.n;
  565.       if (e)
  566.       {
  567.         /* get length extra bits */
  568.         NEEDBITS(8) n += (unsigned)b & 0xff;
  569.         DUMPBITS(8)
  570.       }
  571.       /* do the copy */
  572.       s -= n;
  573.       do
  574.       {
  575. //#ifdef USE_STRM_OUTPUT
  576. //              if (fredirect_data) /* &= w/ wsize not needed and wrong if redirect */
  577. //                n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e);
  578. //              else
  579. //#endif
  580.           n -= (e = (e = wsize - ((d &= wsize - 1) > w ? d : w)) > n ? n : e);
  581.         if (u && w <= d)
  582.         {
  583.           ZeroMemory(Slide + w, e);
  584.           w += e;
  585.           d += e;
  586.         }
  587.         else
  588. //#ifndef NOMEMCPY
  589.                   if (w - d >= e)
  590.           {
  591.             /* (this test assumes unsigned comparison) */
  592.             memcpy(Slide + w, Slide + d, e);
  593.             w += e;
  594.             d += e;
  595.           }
  596.           else /* do it slow to avoid memcpy() overlap */
  597. //#endif /* !NOMEMCPY */
  598.                         do
  599.             {
  600.               Slide[w++] = Slide[d++];
  601.             }
  602.             while (--e);
  603.         if (w == wsize)
  604.                 {
  605.           flush(Slide, w, 0);
  606.           w = u = 0;
  607.         }
  608.       }
  609.       while (n);
  610.     }
  611.   }
  612.   /* flush out Slide */
  613.   flush(Slide, w, 0);
  614.   /* should have read csize bytes, but */
  615.   /* sometimes read one too many:  k>>3 compensates */
  616.   if (fcsize + fincnt + (k >> 3))
  617.   {
  618.     fused_csize = flrec.csize - fcsize - fincnt - (k >> 3);
  619.     return 5;
  620.   }
  621.   return 0;
  622. }
  623.  
  624.  
  625. /* ===========================================================================
  626. * Decompress the imploded data using uncoded literals and a 4K sliding * window.
  627. *tl, *td :: Length and distance decoder tables. bl, bd  :: Number of bits decoded by tl[] and td[]. */
  628. int UnzInf::explode_nolit4(struct huft * tl, struct huft * td, int bl, int bd)
  629. {
  630. //  long s;
  631.   ZInt64 s;
  632.   /* bytes to decompress */
  633.   register unsigned e;
  634.   /* table entry flag/number of extra bits */
  635.   unsigned n, d;
  636.   /* length and index for copy */
  637.   unsigned w;
  638.   /* current window position */
  639.   struct huft * t;
  640.   /* pointer to table entry */
  641.   unsigned ml, md;
  642.   /* masks for bl and bd bits */
  643.   register ulg b;
  644.   /* bit buffer */
  645.   register unsigned k;
  646.   /* number of bits in bit buffer */
  647.   unsigned u;
  648.   /* true if unflushed */
  649.  
  650.   /* explode the coded data */
  651.   b = k = w = 0;
  652.   /* initialize bit buffer, window */
  653.   u = 1;
  654.   /* buffer unflushed */
  655.   ml = mask_bits[bl];
  656.   /* precompute masks for speed */
  657.   md = mask_bits[bd];
  658.   s = fucsize;
  659.   while (s > 0)
  660.   {
  661.     /* do until ucsize bytes uncompressed */
  662.     NEEDBITS(1)
  663.     if (b & 1)
  664.     {
  665.       /* then literal--get eight bits */
  666.       DUMPBITS(1) s--;
  667.       NEEDBITS(8) Slide[w++] = (uch)b;
  668.       if (w == wsize)
  669.       {
  670.         flush(Slide, w, 0);
  671.         w = u = 0;
  672.       }
  673.       DUMPBITS(8)
  674.     }
  675.     else
  676.     {
  677.       /* else distance/length */
  678.       DUMPBITS(1) NEEDBITS(6) /* get distance low bits */
  679.       d = (unsigned)b & 0x3f;
  680.       DUMPBITS(6) NEEDBITS((unsigned)bd) /* get coded distance high bits */
  681.       if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16)
  682.         do
  683.         {
  684.           if (e == 99)
  685.             return 1;
  686.           DUMPBITS(t->b) e -= 16;
  687.           NEEDBITS(e)
  688.         }
  689.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  690.       DUMPBITS(t->b) d = w - d - t->v.n;
  691.       /* construct offset */
  692.       NEEDBITS((unsigned)bl) /* get coded length */
  693.       if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16)
  694.         do
  695.         {
  696.           if (e == 99)
  697.             return 1;
  698.           DUMPBITS(t->b) e -= 16;
  699.           NEEDBITS(e)
  700.         }
  701.         while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16);
  702.       DUMPBITS(t->b) n = t->v.n;
  703.       if (e)
  704.       {
  705.         /* get length extra bits */
  706.         NEEDBITS(8) n += (unsigned)b & 0xff;
  707.         DUMPBITS(8)
  708.       }
  709.       /* do the copy */
  710.       s -= n;
  711.       do
  712.       {
  713. //#ifdef USE_STRM_OUTPUT
  714. //              if (fredirect_data) /* &= w/ wsize not needed and wrong if redirect */
  715. //                n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e);
  716. //              else
  717. //#endif
  718.                 n -= (e = (e = wsize - ((d &= wsize - 1) > w ? d : w)) > n ? n : e);
  719.         if (u && w <= d)
  720.         {
  721.           ZeroMemory(Slide + w, e);
  722.           w += e;
  723.           d += e;
  724.         }
  725.         else
  726. //#ifndef NOMEMCPY
  727.                   if (w - d >= e)
  728.                   {
  729.                         /* (this test assumes unsigned comparison) */
  730.                         memcpy(Slide + w, Slide + d, e);
  731.                         w += e;
  732.                         d += e;
  733.                   }
  734.                   else /* do it slow to avoid memcpy() overlap */
  735. //#endif /* !NOMEMCPY */
  736.  
  737.             do
  738.             {
  739.               Slide[w++] = Slide[d++];
  740.             }
  741.             while (--e);
  742.         if (w == wsize)
  743.         {
  744.           flush(Slide, w, 0);
  745.           w = u = 0;
  746.         }
  747.       }
  748.       while (n);
  749.     }
  750.   }
  751.   /* flush out Slide */
  752.   flush(Slide, w, 0);
  753.   /* should have read csize bytes, but */
  754.   /* sometimes read one too many:  k >> 3 compensates */
  755.   if (fcsize + fincnt + (k >> 3))
  756.   {
  757.     fused_csize = flrec.csize - fcsize - fincnt - (k >> 3);
  758.     return 5;
  759.   }
  760.   return 0;
  761. }
  762.  
  763.  
  764. /* ===========================================================================
  765. * Explode an imploded compressed stream.  Based on the general purpose
  766. * bit flag, decide on coded or uncoded literals, and an 8K or 4K sliding
  767. * window.  Construct the literal (if any), length, and distance codes and
  768. * the tables needed to decode them (using huft_build() from inflate.c),
  769. * and call the appropriate routine for the type of data in the remainder
  770. * of the stream.  The four routines are nearly identical, differing only
  771. * in whether the literal is decoded or simply read in, and in how many
  772. * bits are read in, uncoded, for the low distance bits. */
  773. int UnzInf::explode(void)
  774. {
  775.   unsigned r;
  776.   /* return codes */
  777.   struct huft * tb;
  778.   /* literal code table */
  779.   struct huft * tl;
  780.   /* length code table */
  781.   struct huft * td;
  782.   /* distance code table */
  783.   int bb;
  784.   /* bits for tb */
  785.   int bl;
  786.   /* bits for tl */
  787.   int bd;
  788.   /* bits for td */
  789.   unsigned l[256];
  790.   /* bit lengths for codes */
  791.  
  792. //# ifdef USE_STRM_OUTPUT
  793. //  if (fredirect_data)
  794. //  {
  795. //      wsize = fredirect_size;
  796. //      Slide = fredirect_buffer;
  797. //  }
  798. //  else
  799. //  {
  800. //      wsize = UWSIZE;
  801. //      Slide = Slide;
  802. //   }
  803. //#else
  804. ////    wsize = WSIZE;
  805. ////    Slide = Slide;
  806. //#endif
  807.  
  808.   /* Tune base table sizes.  Note: I thought that to truly optimize speed,
  809.   * I would have to select different bl, bd, and bb values for different
  810.   * compressed file sizes.  I was suprised to find out the the values of
  811.   * 7, 7, and 9 worked best over a very wide range of sizes, except that
  812.   * bd = 8 worked marginally better for large compressed sizes. */
  813.   bl = 7;
  814. //  bd = (fcsize + fincnt) > 200000L ? 8 : 7;
  815.   bd = (unsigned long)(fcsize + fincnt) > 200000L ? 8 : 7;
  816.  
  817.   /* With literal tree--minimum match length is 3 */
  818.   fhufts = 0;
  819.   /* initialize huft's malloc'ed */
  820.   if (flrec.general_purpose_bit_flag & 4)
  821.   {
  822.     bb = 9;
  823.     /* base table size for literals */
  824.     if ((r = get_tree(l, 256)) != 0)
  825.       return (int)r;
  826.     if ((r = huft_build(l, 256, 256, NULL, NULL, & tb, & bb)) != 0)
  827.     {
  828.       if (r == 1)
  829.         huft_free(tb);
  830.       return (int)r;
  831.     }
  832.     if ((r = get_tree(l, 64)) != 0)
  833.       return (int)r;
  834.     if ((r = huft_build(l, 64, 0, cplen3, extra, & tl, & bl)) != 0)
  835.     {
  836.       if (r == 1)
  837.         huft_free(tl);
  838.       huft_free(tb);
  839.       return (int)r;
  840.     }
  841.     if ((r = get_tree(l, 64)) != 0)
  842.       return (int)r;
  843.     if (flrec.general_purpose_bit_flag & 2)
  844.     {
  845.       /* true if 8K */
  846.       if ((r = huft_build(l, 64, 0, cpdist8, extra, & td, & bd)) != 0)
  847.       {
  848.         if (r == 1)
  849.           huft_free(td);
  850.         huft_free(tl);
  851.         huft_free(tb);
  852.         return (int)r;
  853.       }
  854.       r = explode_lit8(tb, tl, td, bb, bl, bd);
  855.     }
  856.     else
  857.     {
  858.       /* else 4K */
  859.       if ((r = huft_build(l, 64, 0, cpdist4, extra, & td, & bd)) != 0)
  860.       {
  861.         if (r == 1)
  862.           huft_free(td);
  863.         huft_free(tl);
  864.         huft_free(tb);
  865.         return (int)r;
  866.       }
  867.       r = explode_lit4(tb, tl, td, bb, bl, bd);
  868.     }
  869.     huft_free(td);
  870.     huft_free(tl);
  871.     huft_free(tb);
  872.   }
  873.   else
  874.   {
  875.     /* No literal tree--minimum match length is 2 */
  876.     if ((r = get_tree(l, 64)) != 0)
  877.       return (int)r;
  878.     if ((r = huft_build(l, 64, 0, cplen2, extra, & tl, & bl)) != 0)
  879.     {
  880.       if (r == 1)
  881.         huft_free(tl);
  882.       return (int)r;
  883.     }
  884.     if ((r = get_tree(l, 64)) != 0)
  885.       return (int)r;
  886.     if (flrec.general_purpose_bit_flag & 2)
  887.     {
  888.       /* true if 8K */
  889.           if ((r = huft_build(l, 64, 0, cpdist8, extra, & td, & bd)) != 0)
  890.       {
  891.         if (r == 1)
  892.           huft_free(td);
  893.         huft_free(tl);
  894.         return (int)r;
  895.       }
  896.       r = explode_nolit8(tl, td, bl, bd);
  897.     }
  898.     else
  899.     {
  900.       /* else 4K */
  901.       if ((r = huft_build(l, 64, 0, cpdist4, extra, & td, & bd)) != 0)
  902.       {
  903.         if (r == 1)
  904.           huft_free(td);
  905.         huft_free(tl);
  906.         return (int)r;
  907.       }
  908.           r = explode_nolit4(tl, td, bl, bd);
  909.     }
  910.     huft_free(td);
  911.     huft_free(tl);
  912.   }
  913.   return (int)r;
  914. }
  915.  
  916. /* so explode.c and inflate.c can be compiled together into one object: */
  917. #undef NEXTBYTE
  918. #undef NEEDBITS
  919. #undef DUMPBITS
  920.