💾 Archived View for gmi.noulin.net › gitRepositories › lz4 › file › lz4.c.gmi captured on 2024-08-18 at 18:49:47. Gemini links have been rewritten to link to archived content

View Raw

More Information

⬅️ Previous capture (2023-01-29)

-=-=-=-=-=-=-

lz4

Log

Files

Refs

LICENSE

lz4.c (62206B)

     1 /*
     2    LZ4 - Fast LZ compression algorithm
     3    Copyright (C) 2011-2017, Yann Collet.
     4 
     5    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
     6 
     7    Redistribution and use in source and binary forms, with or without
     8    modification, are permitted provided that the following conditions are
     9    met:
    10 
    11        * Redistributions of source code must retain the above copyright
    12    notice, this list of conditions and the following disclaimer.
    13        * Redistributions in binary form must reproduce the above
    14    copyright notice, this list of conditions and the following disclaimer
    15    in the documentation and/or other materials provided with the
    16    distribution.
    17 
    18    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    19    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    20    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    21    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    22    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    23    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    24    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    25    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    26    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    27    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    28    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    29 
    30    You can contact the author at :
    31     - LZ4 homepage : http://www.lz4.org
    32     - LZ4 source repository : https://github.com/lz4/lz4
    33 */
    34 
    35 
    36 /*-************************************
    37 *  Tuning parameters
    38 **************************************/
    39 /*
    40  * LZ4_HEAPMODE :
    41  * Select how default compression functions will allocate memory for their hash table,
    42  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
    43  */
    44 #ifndef LZ4_HEAPMODE
    45 #  define LZ4_HEAPMODE 0
    46 #endif
    47 
    48 /*
    49  * ACCELERATION_DEFAULT :
    50  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
    51  */
    52 #define ACCELERATION_DEFAULT 1
    53 
    54 
    55 /*-************************************
    56 *  CPU Feature Detection
    57 **************************************/
    58 /* LZ4_FORCE_MEMORY_ACCESS
    59  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
    60  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
    61  * The below switch allow to select different access method for improved performance.
    62  * Method 0 (default) : use `memcpy()`. Safe and portable.
    63  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
    64  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
    65  * Method 2 : direct access. This method is portable but violate C standard.
    66  *            It can generate buggy code on targets which assembly generation depends on alignment.
    67  *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
    68  * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
    69  * Prefer these methods in priority order (0 > 1 > 2)
    70  */
    71 #ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally */
    72 #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
    73 #    define LZ4_FORCE_MEMORY_ACCESS 2
    74 #  elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
    75 #    define LZ4_FORCE_MEMORY_ACCESS 1
    76 #  endif
    77 #endif
    78 
    79 /*
    80  * LZ4_FORCE_SW_BITCOUNT
    81  * Define this parameter if your target system or compiler does not support hardware bit count
    82  */
    83 #if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for Windows CE does not support Hardware bit count */
    84 #  define LZ4_FORCE_SW_BITCOUNT
    85 #endif
    86 
    87 
    88 
    89 /*-************************************
    90 *  Dependency
    91 **************************************/
    92 #include "lz4.h"
    93 /* see also "memory routines" below */
    94 
    95 
    96 /*-************************************
    97 *  Compiler Options
    98 **************************************/
    99 #ifdef _MSC_VER    /* Visual Studio */
   100 #  include <intrin.h>
   101 #  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
   102 #  pragma warning(disable : 4293)        /* disable: C4293: too large shift (32-bits) */
   103 #endif  /* _MSC_VER */
   104 
   105 #ifndef LZ4_FORCE_INLINE
   106 #  ifdef _MSC_VER    /* Visual Studio */
   107 #    define LZ4_FORCE_INLINE static __forceinline
   108 #  else
   109 #    if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
   110 #      ifdef __GNUC__
   111 #        define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
   112 #      else
   113 #        define LZ4_FORCE_INLINE static inline
   114 #      endif
   115 #    else
   116 #      define LZ4_FORCE_INLINE static
   117 #    endif /* __STDC_VERSION__ */
   118 #  endif  /* _MSC_VER */
   119 #endif /* LZ4_FORCE_INLINE */
   120 
   121 /* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
   122  * Gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy,
   123  * together with a simple 8-byte copy loop as a fall-back path.
   124  * However, this optimization hurts the decompression speed by >30%,
   125  * because the execution does not go to the optimized loop
   126  * for typical compressible data, and all of the preamble checks
   127  * before going to the fall-back path become useless overhead.
   128  * This optimization happens only with the -O3 flag, and -O2 generates
   129  * a simple 8-byte copy loop.
   130  * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy
   131  * functions are annotated with __attribute__((optimize("O2"))),
   132  * and also LZ4_wildCopy is forcibly inlined, so that the O2 attribute
   133  * of LZ4_wildCopy does not affect the compression speed.
   134  */
   135 #if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__)
   136 #  define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
   137 #  define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
   138 #else
   139 #  define LZ4_FORCE_O2_GCC_PPC64LE
   140 #  define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
   141 #endif
   142 
   143 #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
   144 #  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
   145 #else
   146 #  define expect(expr,value)    (expr)
   147 #endif
   148 
   149 #define likely(expr)     expect((expr) != 0, 1)
   150 #define unlikely(expr)   expect((expr) != 0, 0)
   151 
   152 
   153 /*-************************************
   154 *  Memory routines
   155 **************************************/
   156 #include <stdlib.h>   /* malloc, calloc, free */
   157 #define ALLOCATOR(n,s) calloc(n,s)
   158 #define FREEMEM        free
   159 #include <string.h>   /* memset, memcpy */
   160 #define MEM_INIT       memset
   161 
   162 
   163 /*-************************************
   164 *  Basic Types
   165 **************************************/
   166 #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
   167 # include <stdint.h>
   168   typedef  uint8_t BYTE;
   169   typedef uint16_t U16;
   170   typedef uint32_t U32;
   171   typedef  int32_t S32;
   172   typedef uint64_t U64;
   173   typedef uintptr_t uptrval;
   174 #else
   175   typedef unsigned char       BYTE;
   176   typedef unsigned short      U16;
   177   typedef unsigned int        U32;
   178   typedef   signed int        S32;
   179   typedef unsigned long long  U64;
   180   typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
   181 #endif
   182 
   183 #if defined(__x86_64__)
   184   typedef U64    reg_t;   /* 64-bits in x32 mode */
   185 #else
   186   typedef size_t reg_t;   /* 32-bits in x32 mode */
   187 #endif
   188 
   189 /*-************************************
   190 *  Reading and writing into memory
   191 **************************************/
   192 static unsigned LZ4_isLittleEndian(void)
   193 {
   194     const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
   195     return one.c[0];
   196 }
   197 
   198 
   199 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
   200 /* lie to the compiler about data alignment; use with caution */
   201 
   202 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
   203 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
   204 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
   205 
   206 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
   207 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
   208 
   209 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
   210 
   211 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
   212 /* currently only defined for gcc and icc */
   213 typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
   214 
   215 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
   216 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
   217 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
   218 
   219 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
   220 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
   221 
   222 #else  /* safe and portable access through memcpy() */
   223 
   224 static U16 LZ4_read16(const void* memPtr)
   225 {
   226     U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
   227 }
   228 
   229 static U32 LZ4_read32(const void* memPtr)
   230 {
   231     U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
   232 }
   233 
   234 static reg_t LZ4_read_ARCH(const void* memPtr)
   235 {
   236     reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
   237 }
   238 
   239 static void LZ4_write16(void* memPtr, U16 value)
   240 {
   241     memcpy(memPtr, &value, sizeof(value));
   242 }
   243 
   244 static void LZ4_write32(void* memPtr, U32 value)
   245 {
   246     memcpy(memPtr, &value, sizeof(value));
   247 }
   248 
   249 #endif /* LZ4_FORCE_MEMORY_ACCESS */
   250 
   251 
   252 static U16 LZ4_readLE16(const void* memPtr)
   253 {
   254     if (LZ4_isLittleEndian()) {
   255         return LZ4_read16(memPtr);
   256     } else {
   257         const BYTE* p = (const BYTE*)memPtr;
   258         return (U16)((U16)p[0] + (p[1]<<8));
   259     }
   260 }
   261 
   262 static void LZ4_writeLE16(void* memPtr, U16 value)
   263 {
   264     if (LZ4_isLittleEndian()) {
   265         LZ4_write16(memPtr, value);
   266     } else {
   267         BYTE* p = (BYTE*)memPtr;
   268         p[0] = (BYTE) value;
   269         p[1] = (BYTE)(value>>8);
   270     }
   271 }
   272 
   273 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
   274 LZ4_FORCE_O2_INLINE_GCC_PPC64LE
   275 void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
   276 {
   277     BYTE* d = (BYTE*)dstPtr;
   278     const BYTE* s = (const BYTE*)srcPtr;
   279     BYTE* const e = (BYTE*)dstEnd;
   280 
   281     do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
   282 }
   283 
   284 
   285 /*-************************************
   286 *  Common Constants
   287 **************************************/
   288 #define MINMATCH 4
   289 
   290 #define WILDCOPYLENGTH 8
   291 #define LASTLITERALS 5
   292 #define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
   293 static const int LZ4_minLength = (MFLIMIT+1);
   294 
   295 #define KB *(1 <<10)
   296 #define MB *(1 <<20)
   297 #define GB *(1U<<30)
   298 
   299 #define MAXD_LOG 16
   300 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
   301 
   302 #define ML_BITS  4
   303 #define ML_MASK  ((1U<<ML_BITS)-1)
   304 #define RUN_BITS (8-ML_BITS)
   305 #define RUN_MASK ((1U<<RUN_BITS)-1)
   306 
   307 
   308 /*-************************************
   309 *  Error detection
   310 **************************************/
   311 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
   312 #  include <assert.h>
   313 #else
   314 #  ifndef assert
   315 #    define assert(condition) ((void)0)
   316 #  endif
   317 #endif
   318 
   319 #define LZ4_STATIC_ASSERT(c)   { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
   320 
   321 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
   322 #  include <stdio.h>
   323 static int g_debuglog_enable = 1;
   324 #  define DEBUGLOG(l, ...) {                                  \
   325                 if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) {  \
   326                     fprintf(stderr, __FILE__ ": ");           \
   327                     fprintf(stderr, __VA_ARGS__);             \
   328                     fprintf(stderr, " \n");                   \
   329             }   }
   330 #else
   331 #  define DEBUGLOG(l, ...)      {}    /* disabled */
   332 #endif
   333 
   334 
   335 /*-************************************
   336 *  Common functions
   337 **************************************/
   338 static unsigned LZ4_NbCommonBytes (reg_t val)
   339 {
   340     if (LZ4_isLittleEndian()) {
   341         if (sizeof(val)==8) {
   342 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
   343             unsigned long r = 0;
   344             _BitScanForward64( &r, (U64)val );
   345             return (int)(r>>3);
   346 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
   347             return (__builtin_ctzll((U64)val) >> 3);
   348 #       else
   349             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
   350                                                      0, 3, 1, 3, 1, 4, 2, 7,
   351                                                      0, 2, 3, 6, 1, 5, 3, 5,
   352                                                      1, 3, 4, 4, 2, 5, 6, 7,
   353                                                      7, 0, 1, 2, 3, 3, 4, 6,
   354                                                      2, 6, 5, 5, 3, 4, 5, 6,
   355                                                      7, 1, 2, 4, 6, 4, 4, 5,
   356                                                      7, 2, 6, 5, 7, 6, 7, 7 };
   357             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
   358 #       endif
   359         } else /* 32 bits */ {
   360 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
   361             unsigned long r;
   362             _BitScanForward( &r, (U32)val );
   363             return (int)(r>>3);
   364 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
   365             return (__builtin_ctz((U32)val) >> 3);
   366 #       else
   367             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
   368                                                      3, 2, 2, 1, 3, 2, 0, 1,
   369                                                      3, 3, 1, 2, 2, 2, 2, 0,
   370                                                      3, 1, 2, 0, 1, 0, 1, 1 };
   371             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
   372 #       endif
   373         }
   374     } else   /* Big Endian CPU */ {
   375         if (sizeof(val)==8) {   /* 64-bits */
   376 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
   377             unsigned long r = 0;
   378             _BitScanReverse64( &r, val );
   379             return (unsigned)(r>>3);
   380 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
   381             return (__builtin_clzll((U64)val) >> 3);
   382 #       else
   383             static const U32 by32 = sizeof(val)*4;  /* 32 on 64 bits (goal), 16 on 32 bits.
   384                 Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
   385                 Note that this code path is never triggered in 32-bits mode. */
   386             unsigned r;
   387             if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
   388             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
   389             r += (!val);
   390             return r;
   391 #       endif
   392         } else /* 32 bits */ {
   393 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
   394             unsigned long r = 0;
   395             _BitScanReverse( &r, (unsigned long)val );
   396             return (unsigned)(r>>3);
   397 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
   398             return (__builtin_clz((U32)val) >> 3);
   399 #       else
   400             unsigned r;
   401             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
   402             r += (!val);
   403             return r;
   404 #       endif
   405         }
   406     }
   407 }
   408 
   409 #define STEPSIZE sizeof(reg_t)
   410 LZ4_FORCE_INLINE
   411 unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
   412 {
   413     const BYTE* const pStart = pIn;
   414 
   415     if (likely(pIn < pInLimit-(STEPSIZE-1))) {
   416         reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
   417         if (!diff) {
   418             pIn+=STEPSIZE; pMatch+=STEPSIZE;
   419         } else {
   420             return LZ4_NbCommonBytes(diff);
   421     }   }
   422 
   423     while (likely(pIn < pInLimit-(STEPSIZE-1))) {
   424         reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
   425         if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
   426         pIn += LZ4_NbCommonBytes(diff);
   427         return (unsigned)(pIn - pStart);
   428     }
   429 
   430     if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
   431     if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
   432     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
   433     return (unsigned)(pIn - pStart);
   434 }
   435 
   436 
   437 #ifndef LZ4_COMMONDEFS_ONLY
   438 /*-************************************
   439 *  Local Constants
   440 **************************************/
   441 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
   442 static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
   443 
   444 
   445 /*-************************************
   446 *  Local Structures and types
   447 **************************************/
   448 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
   449 typedef enum { byPtr, byU32, byU16 } tableType_t;
   450 
   451 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
   452 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
   453 
   454 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
   455 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
   456 
   457 
   458 /*-************************************
   459 *  Local Utils
   460 **************************************/
   461 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
   462 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
   463 int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
   464 int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
   465 
   466 
   467 /*-******************************
   468 *  Compression functions
   469 ********************************/
   470 static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
   471 {
   472     if (tableType == byU16)
   473         return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
   474     else
   475         return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
   476 }
   477 
   478 static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
   479 {
   480     static const U64 prime5bytes = 889523592379ULL;
   481     static const U64 prime8bytes = 11400714785074694791ULL;
   482     const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
   483     if (LZ4_isLittleEndian())
   484         return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
   485     else
   486         return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
   487 }
   488 
   489 LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
   490 {
   491     if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
   492     return LZ4_hash4(LZ4_read32(p), tableType);
   493 }
   494 
   495 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
   496 {
   497     switch (tableType)
   498     {
   499     case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
   500     case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
   501     case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
   502     }
   503 }
   504 
   505 LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
   506 {
   507     U32 const h = LZ4_hashPosition(p, tableType);
   508     LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
   509 }
   510 
   511 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
   512 {
   513     if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
   514     if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
   515     { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
   516 }
   517 
   518 LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
   519 {
   520     U32 const h = LZ4_hashPosition(p, tableType);
   521     return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
   522 }
   523 
   524 
   525 /** LZ4_compress_generic() :
   526     inlined, to ensure branches are decided at compilation time */
   527 LZ4_FORCE_INLINE int LZ4_compress_generic(
   528                  LZ4_stream_t_internal* const cctx,
   529                  const char* const source,
   530                  char* const dest,
   531                  const int inputSize,
   532                  const int maxOutputSize,
   533                  const limitedOutput_directive outputLimited,
   534                  const tableType_t tableType,
   535                  const dict_directive dict,
   536                  const dictIssue_directive dictIssue,
   537                  const U32 acceleration)
   538 {
   539     const BYTE* ip = (const BYTE*) source;
   540     const BYTE* base;
   541     const BYTE* lowLimit;
   542     const BYTE* const lowRefLimit = ip - cctx->dictSize;
   543     const BYTE* const dictionary = cctx->dictionary;
   544     const BYTE* const dictEnd = dictionary + cctx->dictSize;
   545     const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
   546     const BYTE* anchor = (const BYTE*) source;
   547     const BYTE* const iend = ip + inputSize;
   548     const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
   549     const BYTE* const matchlimit = iend - LASTLITERALS;
   550 
   551     BYTE* op = (BYTE*) dest;
   552     BYTE* const olimit = op + maxOutputSize;
   553 
   554     U32 forwardH;
   555 
   556     /* Init conditions */
   557     if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;   /* Unsupported inputSize, too large (or negative) */
   558     switch(dict)
   559     {
   560     case noDict:
   561     default:
   562         base = (const BYTE*)source;
   563         lowLimit = (const BYTE*)source;
   564         break;
   565     case withPrefix64k:
   566         base = (const BYTE*)source - cctx->currentOffset;
   567         lowLimit = (const BYTE*)source - cctx->dictSize;
   568         break;
   569     case usingExtDict:
   570         base = (const BYTE*)source - cctx->currentOffset;
   571         lowLimit = (const BYTE*)source;
   572         break;
   573     }
   574     if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
   575     if (inputSize<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
   576 
   577     /* First Byte */
   578     LZ4_putPosition(ip, cctx->hashTable, tableType, base);
   579     ip++; forwardH = LZ4_hashPosition(ip, tableType);
   580 
   581     /* Main Loop */
   582     for ( ; ; ) {
   583         ptrdiff_t refDelta = 0;
   584         const BYTE* match;
   585         BYTE* token;
   586 
   587         /* Find a match */
   588         {   const BYTE* forwardIp = ip;
   589             unsigned step = 1;
   590             unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
   591             do {
   592                 U32 const h = forwardH;
   593                 ip = forwardIp;
   594                 forwardIp += step;
   595                 step = (searchMatchNb++ >> LZ4_skipTrigger);
   596 
   597                 if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
   598                 assert(ip < mflimitPlusOne);
   599 
   600                 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
   601                 if (dict==usingExtDict) {
   602                     if (match < (const BYTE*)source) {
   603                         refDelta = dictDelta;
   604                         lowLimit = dictionary;
   605                     } else {
   606                         refDelta = 0;
   607                         lowLimit = (const BYTE*)source;
   608                 }   }
   609                 forwardH = LZ4_hashPosition(forwardIp, tableType);
   610                 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
   611 
   612             } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
   613                 || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
   614                 || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
   615         }
   616 
   617         /* Catch up */
   618         while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
   619 
   620         /* Encode Literals */
   621         {   unsigned const litLength = (unsigned)(ip - anchor);
   622             token = op++;
   623             if ((outputLimited) &&  /* Check output buffer overflow */
   624                 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
   625                 return 0;
   626             if (litLength >= RUN_MASK) {
   627                 int len = (int)litLength-RUN_MASK;
   628                 *token = (RUN_MASK<<ML_BITS);
   629                 for(; len >= 255 ; len-=255) *op++ = 255;
   630                 *op++ = (BYTE)len;
   631             }
   632             else *token = (BYTE)(litLength<<ML_BITS);
   633 
   634             /* Copy Literals */
   635             LZ4_wildCopy(op, anchor, op+litLength);
   636             op+=litLength;
   637         }
   638 
   639 _next_match:
   640         /* Encode Offset */
   641         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
   642 
   643         /* Encode MatchLength */
   644         {   unsigned matchCode;
   645 
   646             if ((dict==usingExtDict) && (lowLimit==dictionary)) {
   647                 const BYTE* limit;
   648                 match += refDelta;
   649                 limit = ip + (dictEnd-match);
   650                 if (limit > matchlimit) limit = matchlimit;
   651                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
   652                 ip += MINMATCH + matchCode;
   653                 if (ip==limit) {
   654                     unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
   655                     matchCode += more;
   656                     ip += more;
   657                 }
   658             } else {
   659                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
   660                 ip += MINMATCH + matchCode;
   661             }
   662 
   663             if ( outputLimited &&    /* Check output buffer overflow */
   664                 (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
   665                 return 0;
   666             if (matchCode >= ML_MASK) {
   667                 *token += ML_MASK;
   668                 matchCode -= ML_MASK;
   669                 LZ4_write32(op, 0xFFFFFFFF);
   670                 while (matchCode >= 4*255) {
   671                     op+=4;
   672                     LZ4_write32(op, 0xFFFFFFFF);
   673                     matchCode -= 4*255;
   674                 }
   675                 op += matchCode / 255;
   676                 *op++ = (BYTE)(matchCode % 255);
   677             } else
   678                 *token += (BYTE)(matchCode);
   679         }
   680 
   681         anchor = ip;
   682 
   683         /* Test end of chunk */
   684         if (ip >= mflimitPlusOne) break;
   685 
   686         /* Fill table */
   687         LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
   688 
   689         /* Test next position */
   690         match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
   691         if (dict==usingExtDict) {
   692             if (match < (const BYTE*)source) {
   693                 refDelta = dictDelta;
   694                 lowLimit = dictionary;
   695             } else {
   696                 refDelta = 0;
   697                 lowLimit = (const BYTE*)source;
   698         }   }
   699         LZ4_putPosition(ip, cctx->hashTable, tableType, base);
   700         if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
   701             && (match+MAX_DISTANCE>=ip)
   702             && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
   703         { token=op++; *token=0; goto _next_match; }
   704 
   705         /* Prepare next loop */
   706         forwardH = LZ4_hashPosition(++ip, tableType);
   707     }
   708 
   709 _last_literals:
   710     /* Encode Last Literals */
   711     {   size_t const lastRun = (size_t)(iend - anchor);
   712         if ( (outputLimited) &&  /* Check output buffer overflow */
   713             ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
   714             return 0;
   715         if (lastRun >= RUN_MASK) {
   716             size_t accumulator = lastRun - RUN_MASK;
   717             *op++ = RUN_MASK << ML_BITS;
   718             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
   719             *op++ = (BYTE) accumulator;
   720         } else {
   721             *op++ = (BYTE)(lastRun<<ML_BITS);
   722         }
   723         memcpy(op, anchor, lastRun);
   724         op += lastRun;
   725     }
   726 
   727     /* End */
   728     return (int) (((char*)op)-dest);
   729 }
   730 
   731 
   732 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
   733 {
   734     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
   735     LZ4_resetStream((LZ4_stream_t*)state);
   736     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
   737 
   738     if (maxOutputSize >= LZ4_compressBound(inputSize)) {
   739         if (inputSize < LZ4_64Klimit)
   740             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited,                        byU16, noDict, noDictIssue, acceleration);
   741         else
   742             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
   743     } else {
   744         if (inputSize < LZ4_64Klimit)
   745             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput,                        byU16, noDict, noDictIssue, acceleration);
   746         else
   747             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
   748     }
   749 }
   750 
   751 
   752 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
   753 {
   754 #if (LZ4_HEAPMODE)
   755     void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
   756 #else
   757     LZ4_stream_t ctx;
   758     void* const ctxPtr = &ctx;
   759 #endif
   760 
   761     int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
   762 
   763 #if (LZ4_HEAPMODE)
   764     FREEMEM(ctxPtr);
   765 #endif
   766     return result;
   767 }
   768 
   769 
   770 int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
   771 {
   772     return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
   773 }
   774 
   775 
   776 /* hidden debug function */
   777 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
   778 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
   779 {
   780     LZ4_stream_t ctx;
   781     LZ4_resetStream(&ctx);
   782 
   783     if (inputSize < LZ4_64Klimit)
   784         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16,                        noDict, noDictIssue, acceleration);
   785     else
   786         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
   787 }
   788 
   789 
   790 /*-******************************
   791 *  *_destSize() variant
   792 ********************************/
   793 
   794 static int LZ4_compress_destSize_generic(
   795                        LZ4_stream_t_internal* const ctx,
   796                  const char* const src,
   797                        char* const dst,
   798                        int*  const srcSizePtr,
   799                  const int targetDstSize,
   800                  const tableType_t tableType)
   801 {
   802     const BYTE* ip = (const BYTE*) src;
   803     const BYTE* base = (const BYTE*) src;
   804     const BYTE* lowLimit = (const BYTE*) src;
   805     const BYTE* anchor = ip;
   806     const BYTE* const iend = ip + *srcSizePtr;
   807     const BYTE* const mflimit = iend - MFLIMIT;
   808     const BYTE* const matchlimit = iend - LASTLITERALS;
   809 
   810     BYTE* op = (BYTE*) dst;
   811     BYTE* const oend = op + targetDstSize;
   812     BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
   813     BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
   814     BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
   815 
   816     U32 forwardH;
   817 
   818 
   819     /* Init conditions */
   820     if (targetDstSize < 1) return 0;                                     /* Impossible to store anything */
   821     if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0;            /* Unsupported input size, too large (or negative) */
   822     if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
   823     if (*srcSizePtr<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
   824 
   825     /* First Byte */
   826     *srcSizePtr = 0;
   827     LZ4_putPosition(ip, ctx->hashTable, tableType, base);
   828     ip++; forwardH = LZ4_hashPosition(ip, tableType);
   829 
   830     /* Main Loop */
   831     for ( ; ; ) {
   832         const BYTE* match;
   833         BYTE* token;
   834 
   835         /* Find a match */
   836         {   const BYTE* forwardIp = ip;
   837             unsigned step = 1;
   838             unsigned searchMatchNb = 1 << LZ4_skipTrigger;
   839 
   840             do {
   841                 U32 h = forwardH;
   842                 ip = forwardIp;
   843                 forwardIp += step;
   844                 step = (searchMatchNb++ >> LZ4_skipTrigger);
   845 
   846                 if (unlikely(forwardIp > mflimit)) goto _last_literals;
   847 
   848                 match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
   849                 forwardH = LZ4_hashPosition(forwardIp, tableType);
   850                 LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
   851 
   852             } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
   853                 || (LZ4_read32(match) != LZ4_read32(ip)) );
   854         }
   855 
   856         /* Catch up */
   857         while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
   858 
   859         /* Encode Literal length */
   860         {   unsigned litLength = (unsigned)(ip - anchor);
   861             token = op++;
   862             if (op + ((litLength+240)/255) + litLength > oMaxLit) {
   863                 /* Not enough space for a last match */
   864                 op--;
   865                 goto _last_literals;
   866             }
   867             if (litLength>=RUN_MASK) {
   868                 unsigned len = litLength - RUN_MASK;
   869                 *token=(RUN_MASK<<ML_BITS);
   870                 for(; len >= 255 ; len-=255) *op++ = 255;
   871                 *op++ = (BYTE)len;
   872             }
   873             else *token = (BYTE)(litLength<<ML_BITS);
   874 
   875             /* Copy Literals */
   876             LZ4_wildCopy(op, anchor, op+litLength);
   877             op += litLength;
   878         }
   879 
   880 _next_match:
   881         /* Encode Offset */
   882         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
   883 
   884         /* Encode MatchLength */
   885         {   size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
   886 
   887             if (op + ((matchLength+240)/255) > oMaxMatch) {
   888                 /* Match description too long : reduce it */
   889                 matchLength = (15-1) + (oMaxMatch-op) * 255;
   890             }
   891             ip += MINMATCH + matchLength;
   892 
   893             if (matchLength>=ML_MASK) {
   894                 *token += ML_MASK;
   895                 matchLength -= ML_MASK;
   896                 while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
   897                 *op++ = (BYTE)matchLength;
   898             }
   899             else *token += (BYTE)(matchLength);
   900         }
   901 
   902         anchor = ip;
   903 
   904         /* Test end of block */
   905         if (ip > mflimit) break;
   906         if (op > oMaxSeq) break;
   907 
   908         /* Fill table */
   909         LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
   910 
   911         /* Test next position */
   912         match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
   913         LZ4_putPosition(ip, ctx->hashTable, tableType, base);
   914         if ( (match+MAX_DISTANCE>=ip)
   915             && (LZ4_read32(match)==LZ4_read32(ip)) )
   916         { token=op++; *token=0; goto _next_match; }
   917 
   918         /* Prepare next loop */
   919         forwardH = LZ4_hashPosition(++ip, tableType);
   920     }
   921 
   922 _last_literals:
   923     /* Encode Last Literals */
   924     {   size_t lastRunSize = (size_t)(iend - anchor);
   925         if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
   926             /* adapt lastRunSize to fill 'dst' */
   927             lastRunSize  = (oend-op) - 1;
   928             lastRunSize -= (lastRunSize+240)/255;
   929         }
   930         ip = anchor + lastRunSize;
   931 
   932         if (lastRunSize >= RUN_MASK) {
   933             size_t accumulator = lastRunSize - RUN_MASK;
   934             *op++ = RUN_MASK << ML_BITS;
   935             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
   936             *op++ = (BYTE) accumulator;
   937         } else {
   938             *op++ = (BYTE)(lastRunSize<<ML_BITS);
   939         }
   940         memcpy(op, anchor, lastRunSize);
   941         op += lastRunSize;
   942     }
   943 
   944     /* End */
   945     *srcSizePtr = (int) (((const char*)ip)-src);
   946     return (int) (((char*)op)-dst);
   947 }
   948 
   949 
   950 static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
   951 {
   952     LZ4_resetStream(state);
   953 
   954     if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
   955         return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
   956     } else {
   957         if (*srcSizePtr < LZ4_64Klimit)
   958             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
   959         else
   960             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
   961     }
   962 }
   963 
   964 
   965 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
   966 {
   967 #if (LZ4_HEAPMODE)
   968     LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
   969 #else
   970     LZ4_stream_t ctxBody;
   971     LZ4_stream_t* ctx = &ctxBody;
   972 #endif
   973 
   974     int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
   975 
   976 #if (LZ4_HEAPMODE)
   977     FREEMEM(ctx);
   978 #endif
   979     return result;
   980 }
   981 
   982 
   983 
   984 /*-******************************
   985 *  Streaming functions
   986 ********************************/
   987 
   988 LZ4_stream_t* LZ4_createStream(void)
   989 {
   990     LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
   991     LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal));    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
   992     LZ4_resetStream(lz4s);
   993     return lz4s;
   994 }
   995 
   996 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
   997 {
   998     DEBUGLOG(4, "LZ4_resetStream");
   999     MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
  1000 }
  1001 
  1002 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
  1003 {
  1004     if (!LZ4_stream) return 0;   /* support free on NULL */
  1005     FREEMEM(LZ4_stream);
  1006     return (0);
  1007 }
  1008 
  1009 
  1010 #define HASH_UNIT sizeof(reg_t)
  1011 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
  1012 {
  1013     LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
  1014     const BYTE* p = (const BYTE*)dictionary;
  1015     const BYTE* const dictEnd = p + dictSize;
  1016     const BYTE* base;
  1017 
  1018     if ((dict->initCheck) || (dict->currentOffset > 1 GB))  /* Uninitialized structure, or reuse overflow */
  1019         LZ4_resetStream(LZ4_dict);
  1020 
  1021     if (dictSize < (int)HASH_UNIT) {
  1022         dict->dictionary = NULL;
  1023         dict->dictSize = 0;
  1024         return 0;
  1025     }
  1026 
  1027     if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
  1028     dict->currentOffset += 64 KB;
  1029     base = p - dict->currentOffset;
  1030     dict->dictionary = p;
  1031     dict->dictSize = (U32)(dictEnd - p);
  1032     dict->currentOffset += dict->dictSize;
  1033 
  1034     while (p <= dictEnd-HASH_UNIT) {
  1035         LZ4_putPosition(p, dict->hashTable, byU32, base);
  1036         p+=3;
  1037     }
  1038 
  1039     return dict->dictSize;
  1040 }
  1041 
  1042 
  1043 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
  1044 {
  1045     if ((LZ4_dict->currentOffset > 0x80000000) ||
  1046         ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {   /* address space overflow */
  1047         /* rescale hash table */
  1048         U32 const delta = LZ4_dict->currentOffset - 64 KB;
  1049         const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
  1050         int i;
  1051         for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
  1052             if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
  1053             else LZ4_dict->hashTable[i] -= delta;
  1054         }
  1055         LZ4_dict->currentOffset = 64 KB;
  1056         if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
  1057         LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
  1058     }
  1059 }
  1060 
  1061 
  1062 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
  1063 {
  1064     LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
  1065     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
  1066 
  1067     const BYTE* smallest = (const BYTE*) source;
  1068     if (streamPtr->initCheck) return 0;   /* Uninitialized structure detected */
  1069     if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
  1070     LZ4_renormDictT(streamPtr, smallest);
  1071     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
  1072 
  1073     /* Check overlapping input/dictionary space */
  1074     {   const BYTE* sourceEnd = (const BYTE*) source + inputSize;
  1075         if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
  1076             streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
  1077             if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
  1078             if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
  1079             streamPtr->dictionary = dictEnd - streamPtr->dictSize;
  1080         }
  1081     }
  1082 
  1083     /* prefix mode : source data follows dictionary */
  1084     if (dictEnd == (const BYTE*)source) {
  1085         int result;
  1086         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
  1087             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
  1088         else
  1089             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
  1090         streamPtr->dictSize += (U32)inputSize;
  1091         streamPtr->currentOffset += (U32)inputSize;
  1092         return result;
  1093     }
  1094 
  1095     /* external dictionary mode */
  1096     {   int result;
  1097         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
  1098             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
  1099         else
  1100             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
  1101         streamPtr->dictionary = (const BYTE*)source;
  1102         streamPtr->dictSize = (U32)inputSize;
  1103         streamPtr->currentOffset += (U32)inputSize;
  1104         return result;
  1105     }
  1106 }
  1107 
  1108 
  1109 /* Hidden debug function, to force external dictionary mode */
  1110 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
  1111 {
  1112     LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
  1113     int result;
  1114     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
  1115 
  1116     const BYTE* smallest = dictEnd;
  1117     if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
  1118     LZ4_renormDictT(streamPtr, smallest);
  1119 
  1120     result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
  1121 
  1122     streamPtr->dictionary = (const BYTE*)source;
  1123     streamPtr->dictSize = (U32)inputSize;
  1124     streamPtr->currentOffset += (U32)inputSize;
  1125 
  1126     return result;
  1127 }
  1128 
  1129 
  1130 /*! LZ4_saveDict() :
  1131  *  If previously compressed data block is not guaranteed to remain available at its memory location,
  1132  *  save it into a safer place (char* safeBuffer).
  1133  *  Note : you don't need to call LZ4_loadDict() afterwards,
  1134  *         dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
  1135  *  Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
  1136  */
  1137 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
  1138 {
  1139     LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
  1140     const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
  1141 
  1142     if ((U32)dictSize > 64 KB) dictSize = 64 KB;   /* useless to define a dictionary > 64 KB */
  1143     if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
  1144 
  1145     memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
  1146 
  1147     dict->dictionary = (const BYTE*)safeBuffer;
  1148     dict->dictSize = (U32)dictSize;
  1149 
  1150     return dictSize;
  1151 }
  1152 
  1153 
  1154 
  1155 /*-*****************************
  1156 *  Decompression functions
  1157 *******************************/
  1158 /*! LZ4_decompress_generic() :
  1159  *  This generic decompression function covers all use cases.
  1160  *  It shall be instantiated several times, using different sets of directives.
  1161  *  Note that it is important for performance that this function really get inlined,
  1162  *  in order to remove useless branches during compilation optimization.
  1163  */
  1164 LZ4_FORCE_O2_GCC_PPC64LE
  1165 LZ4_FORCE_INLINE int LZ4_decompress_generic(
  1166                  const char* const src,
  1167                  char* const dst,
  1168                  int srcSize,
  1169                  int outputSize,         /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
  1170 
  1171                  int endOnInput,         /* endOnOutputSize, endOnInputSize */
  1172                  int partialDecoding,    /* full, partial */
  1173                  int targetOutputSize,   /* only used if partialDecoding==partial */
  1174                  int dict,               /* noDict, withPrefix64k, usingExtDict */
  1175                  const BYTE* const lowPrefix,  /* always <= dst, == dst when no prefix */
  1176                  const BYTE* const dictStart,  /* only if dict==usingExtDict */
  1177                  const size_t dictSize         /* note : = 0 if noDict */
  1178                  )
  1179 {
  1180     const BYTE* ip = (const BYTE*) src;
  1181     const BYTE* const iend = ip + srcSize;
  1182 
  1183     BYTE* op = (BYTE*) dst;
  1184     BYTE* const oend = op + outputSize;
  1185     BYTE* cpy;
  1186     BYTE* oexit = op + targetOutputSize;
  1187 
  1188     const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
  1189     const unsigned inc32table[8] = {0, 1, 2,  1,  0,  4, 4, 4};
  1190     const int      dec64table[8] = {0, 0, 0, -1, -4,  1, 2, 3};
  1191 
  1192     const int safeDecode = (endOnInput==endOnInputSize);
  1193     const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
  1194 
  1195 
  1196     /* Special cases */
  1197     if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT;                      /* targetOutputSize too high => just decode everything */
  1198     if ((endOnInput) && (unlikely(outputSize==0))) return ((srcSize==1) && (*ip==0)) ? 0 : -1;  /* Empty output buffer */
  1199     if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
  1200 
  1201     /* Main Loop : decode sequences */
  1202     while (1) {
  1203         size_t length;
  1204         const BYTE* match;
  1205         size_t offset;
  1206 
  1207         unsigned const token = *ip++;
  1208 
  1209         /* shortcut for common case :
  1210          * in most circumstances, we expect to decode small matches (<= 18 bytes) separated by few literals (<= 14 bytes).
  1211          * this shortcut was tested on x86 and x64, where it improves decoding speed.
  1212          * it has not yet been benchmarked on ARM, Power, mips, etc. */
  1213         if (((ip + 14 /*maxLL*/ + 2 /*offset*/ <= iend)
  1214           & (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend))
  1215           & ((token < (15<<ML_BITS)) & ((token & ML_MASK) != 15)) ) {
  1216             size_t const ll = token >> ML_BITS;
  1217             size_t const off = LZ4_readLE16(ip+ll);
  1218             const BYTE* const matchPtr = op + ll - off;  /* pointer underflow risk ? */
  1219             if ((off >= 8) /* do not deal with overlapping matches */ & (matchPtr >= lowPrefix)) {
  1220                 size_t const ml = (token & ML_MASK) + MINMATCH;
  1221                 memcpy(op, ip, 16); op += ll; ip += ll + 2 /*offset*/;
  1222                 memcpy(op + 0, matchPtr + 0, 8);
  1223                 memcpy(op + 8, matchPtr + 8, 8);
  1224                 memcpy(op +16, matchPtr +16, 2);
  1225                 op += ml;
  1226                 continue;
  1227             }
  1228         }
  1229 
  1230         /* decode literal length */
  1231         if ((length=(token>>ML_BITS)) == RUN_MASK) {
  1232             unsigned s;
  1233             do {
  1234                 s = *ip++;
  1235                 length += s;
  1236             } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
  1237             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error;   /* overflow detection */
  1238             if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error;   /* overflow detection */
  1239         }
  1240 
  1241         /* copy literals */
  1242         cpy = op+length;
  1243         if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
  1244             || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
  1245         {
  1246             if (partialDecoding) {
  1247                 if (cpy > oend) goto _output_error;                           /* Error : write attempt beyond end of output buffer */
  1248                 if ((endOnInput) && (ip+length > iend)) goto _output_error;   /* Error : read attempt beyond end of input buffer */
  1249             } else {
  1250                 if ((!endOnInput) && (cpy != oend)) goto _output_error;       /* Error : block decoding must stop exactly there */
  1251                 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   /* Error : input must be consumed */
  1252             }
  1253             memcpy(op, ip, length);
  1254             ip += length;
  1255             op += length;
  1256             break;     /* Necessarily EOF, due to parsing restrictions */
  1257         }
  1258         LZ4_wildCopy(op, ip, cpy);
  1259         ip += length; op = cpy;
  1260 
  1261         /* get offset */
  1262         offset = LZ4_readLE16(ip); ip+=2;
  1263         match = op - offset;
  1264         if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error;   /* Error : offset outside buffers */
  1265         LZ4_write32(op, (U32)offset);   /* costs ~1%; silence an msan warning when offset==0 */
  1266 
  1267         /* get matchlength */
  1268         length = token & ML_MASK;
  1269         if (length == ML_MASK) {
  1270             unsigned s;
  1271             do {
  1272                 s = *ip++;
  1273                 if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
  1274                 length += s;
  1275             } while (s==255);
  1276             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
  1277         }
  1278         length += MINMATCH;
  1279 
  1280         /* check external dictionary */
  1281         if ((dict==usingExtDict) && (match < lowPrefix)) {
  1282             if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error;   /* doesn't respect parsing restriction */
  1283 
  1284             if (length <= (size_t)(lowPrefix-match)) {
  1285                 /* match can be copied as a single segment from external dictionary */
  1286                 memmove(op, dictEnd - (lowPrefix-match), length);
  1287                 op += length;
  1288             } else {
  1289                 /* match encompass external dictionary and current block */
  1290                 size_t const copySize = (size_t)(lowPrefix-match);
  1291                 size_t const restSize = length - copySize;
  1292                 memcpy(op, dictEnd - copySize, copySize);
  1293                 op += copySize;
  1294                 if (restSize > (size_t)(op-lowPrefix)) {  /* overlap copy */
  1295                     BYTE* const endOfMatch = op + restSize;
  1296                     const BYTE* copyFrom = lowPrefix;
  1297                     while (op < endOfMatch) *op++ = *copyFrom++;
  1298                 } else {
  1299                     memcpy(op, lowPrefix, restSize);
  1300                     op += restSize;
  1301             }   }
  1302             continue;
  1303         }
  1304 
  1305         /* copy match within block */
  1306         cpy = op + length;
  1307         if (unlikely(offset<8)) {
  1308             op[0] = match[0];
  1309             op[1] = match[1];
  1310             op[2] = match[2];
  1311             op[3] = match[3];
  1312             match += inc32table[offset];
  1313             memcpy(op+4, match, 4);
  1314             match -= dec64table[offset];
  1315         } else { memcpy(op, match, 8); match+=8; }
  1316         op += 8;
  1317 
  1318         if (unlikely(cpy>oend-12)) {
  1319             BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
  1320             if (cpy > oend-LASTLITERALS) goto _output_error;    /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
  1321             if (op < oCopyLimit) {
  1322                 LZ4_wildCopy(op, match, oCopyLimit);
  1323                 match += oCopyLimit - op;
  1324                 op = oCopyLimit;
  1325             }
  1326             while (op<cpy) *op++ = *match++;
  1327         } else {
  1328             memcpy(op, match, 8);
  1329             if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
  1330         }
  1331         op = cpy;   /* correction */
  1332     }
  1333 
  1334     /* end of decoding */
  1335     if (endOnInput)
  1336        return (int) (((char*)op)-dst);     /* Nb of output bytes decoded */
  1337     else
  1338        return (int) (((const char*)ip)-src);   /* Nb of input bytes read */
  1339 
  1340     /* Overflow error detected */
  1341 _output_error:
  1342     return (int) (-(((const char*)ip)-src))-1;
  1343 }
  1344 
  1345 
  1346 LZ4_FORCE_O2_GCC_PPC64LE
  1347 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
  1348 {
  1349     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
  1350 }
  1351 
  1352 LZ4_FORCE_O2_GCC_PPC64LE
  1353 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
  1354 {
  1355     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
  1356 }
  1357 
  1358 LZ4_FORCE_O2_GCC_PPC64LE
  1359 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
  1360 {
  1361     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
  1362 }
  1363 
  1364 
  1365 /*===== streaming decompression functions =====*/
  1366 
  1367 LZ4_streamDecode_t* LZ4_createStreamDecode(void)
  1368 {
  1369     LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
  1370     return lz4s;
  1371 }
  1372 
  1373 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
  1374 {
  1375     if (!LZ4_stream) return 0;   /* support free on NULL */
  1376     FREEMEM(LZ4_stream);
  1377     return 0;
  1378 }
  1379 
  1380 /*!
  1381  * LZ4_setStreamDecode() :
  1382  * Use this function to instruct where to find the dictionary.
  1383  * This function is not necessary if previous data is still available where it was decoded.
  1384  * Loading a size of 0 is allowed (same effect as no dictionary).
  1385  * Return : 1 if OK, 0 if error
  1386  */
  1387 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
  1388 {
  1389     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
  1390     lz4sd->prefixSize = (size_t) dictSize;
  1391     lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
  1392     lz4sd->externalDict = NULL;
  1393     lz4sd->extDictSize  = 0;
  1394     return 1;
  1395 }
  1396 
  1397 /*
  1398 *_continue() :
  1399     These decoding functions allow decompression of multiple blocks in "streaming" mode.
  1400     Previously decoded blocks must still be available at the memory position where they were decoded.
  1401     If it's not possible, save the relevant part of decoded data into a safe buffer,
  1402     and indicate where it stands using LZ4_setStreamDecode()
  1403 */
  1404 LZ4_FORCE_O2_GCC_PPC64LE
  1405 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
  1406 {
  1407     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
  1408     int result;
  1409 
  1410     if (lz4sd->prefixEnd == (BYTE*)dest) {
  1411         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
  1412                                         endOnInputSize, full, 0,
  1413                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
  1414         if (result <= 0) return result;
  1415         lz4sd->prefixSize += result;
  1416         lz4sd->prefixEnd  += result;
  1417     } else {
  1418         lz4sd->extDictSize = lz4sd->prefixSize;
  1419         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
  1420         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
  1421                                         endOnInputSize, full, 0,
  1422                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
  1423         if (result <= 0) return result;
  1424         lz4sd->prefixSize = result;
  1425         lz4sd->prefixEnd  = (BYTE*)dest + result;
  1426     }
  1427 
  1428     return result;
  1429 }
  1430 
  1431 LZ4_FORCE_O2_GCC_PPC64LE
  1432 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
  1433 {
  1434     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
  1435     int result;
  1436 
  1437     if (lz4sd->prefixEnd == (BYTE*)dest) {
  1438         result = LZ4_decompress_generic(source, dest, 0, originalSize,
  1439                                         endOnOutputSize, full, 0,
  1440                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
  1441         if (result <= 0) return result;
  1442         lz4sd->prefixSize += originalSize;
  1443         lz4sd->prefixEnd  += originalSize;
  1444     } else {
  1445         lz4sd->extDictSize = lz4sd->prefixSize;
  1446         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
  1447         result = LZ4_decompress_generic(source, dest, 0, originalSize,
  1448                                         endOnOutputSize, full, 0,
  1449                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
  1450         if (result <= 0) return result;
  1451         lz4sd->prefixSize = originalSize;
  1452         lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
  1453     }
  1454 
  1455     return result;
  1456 }
  1457 
  1458 
  1459 /*
  1460 Advanced decoding functions :
  1461 *_usingDict() :
  1462     These decoding functions work the same as "_continue" ones,
  1463     the dictionary must be explicitly provided within parameters
  1464 */
  1465 
  1466 LZ4_FORCE_O2_GCC_PPC64LE
  1467 LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
  1468 {
  1469     if (dictSize==0)
  1470         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
  1471     if (dictStart+dictSize == dest) {
  1472         if (dictSize >= (int)(64 KB - 1))
  1473             return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
  1474         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
  1475     }
  1476     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
  1477 }
  1478 
  1479 LZ4_FORCE_O2_GCC_PPC64LE
  1480 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
  1481 {
  1482     return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
  1483 }
  1484 
  1485 LZ4_FORCE_O2_GCC_PPC64LE
  1486 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
  1487 {
  1488     return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
  1489 }
  1490 
  1491 /* debug function */
  1492 LZ4_FORCE_O2_GCC_PPC64LE
  1493 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
  1494 {
  1495     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
  1496 }
  1497 
  1498 
  1499 /*=*************************************************
  1500 *  Obsolete Functions
  1501 ***************************************************/
  1502 /* obsolete compression functions */
  1503 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
  1504 int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
  1505 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
  1506 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
  1507 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
  1508 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
  1509 
  1510 /*
  1511 These function names are deprecated and should no longer be used.
  1512 They are only provided here for compatibility with older user programs.
  1513 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
  1514 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
  1515 */
  1516 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
  1517 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
  1518 
  1519 
  1520 /* Obsolete Streaming functions */
  1521 
  1522 int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
  1523 
  1524 static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
  1525 {
  1526     MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
  1527     lz4ds->internal_donotuse.bufferStart = base;
  1528 }
  1529 
  1530 int LZ4_resetStreamState(void* state, char* inputBuffer)
  1531 {
  1532     if ((((uptrval)state) & 3) != 0) return 1;   /* Error : pointer is not aligned on 4-bytes boundary */
  1533     LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
  1534     return 0;
  1535 }
  1536 
  1537 void* LZ4_create (char* inputBuffer)
  1538 {
  1539     LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
  1540     LZ4_init (lz4ds, (BYTE*)inputBuffer);
  1541     return lz4ds;
  1542 }
  1543 
  1544 char* LZ4_slideInputBuffer (void* LZ4_Data)
  1545 {
  1546     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
  1547     int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
  1548     return (char*)(ctx->bufferStart + dictSize);
  1549 }
  1550 
  1551 /* Obsolete streaming decompression functions */
  1552 
  1553 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
  1554 {
  1555     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
  1556 }
  1557 
  1558 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
  1559 {
  1560     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
  1561 }
  1562 
  1563 #endif   /* LZ4_COMMONDEFS_ONLY */