From c8e200f2c23da1fbdba9c78b98e3773ce5ca1157 Mon Sep 17 00:00:00 2001 From: Luke Lu Date: Mon, 5 Aug 2013 21:10:54 +0000 Subject: [PATCH] HADOOP-9319. Update bundled LZ4 source to r99. (Binglin Chang via llu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1510734 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../hadoop-common/LICENSE.txt | 16 +- hadoop-common-project/hadoop-common/pom.xml | 5 + .../hadoop-common/src/CMakeLists.txt | 1 + .../hadoop/fs/CommonConfigurationKeys.java | 10 +- .../apache/hadoop/io/compress/Lz4Codec.java | 7 +- .../hadoop/io/compress/lz4/Lz4Compressor.java | 19 +- .../src/main/native/native.vcxproj | 1 + .../src/main/native/native.vcxproj.filters | 3 + .../hadoop/io/compress/lz4/Lz4Compressor.c | 58 +- .../hadoop/io/compress/lz4/Lz4Decompressor.c | 15 +- .../org/apache/hadoop/io/compress/lz4/lz4.c | 1141 +++++++++-------- .../org/apache/hadoop/io/compress/lz4/lz4.h | 179 +++ .../hadoop/io/compress/lz4/lz4_encoder.h | 258 ++++ .../org/apache/hadoop/io/compress/lz4/lz4hc.c | 584 +++++++++ .../org/apache/hadoop/io/compress/lz4/lz4hc.h | 111 ++ .../hadoop/io/compress/lz4/lz4hc_encoder.h | 349 +++++ .../apache/hadoop/io/compress/TestCodec.java | 8 + 18 files changed, 2184 insertions(+), 583 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4_encoder.h create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 1cef543f6a..8302f0df2d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -283,6 +283,8 @@ Release 2.3.0 - UNRELEASED IMPROVEMENTS + HADOOP-9319. Update bundled LZ4 source to r99. (Binglin Chang via llu) + HADOOP-9241. DU refresh interval is not configurable (harsh) HADOOP-9417. Support for symlink resolution in LocalFileSystem / diff --git a/hadoop-common-project/hadoop-common/LICENSE.txt b/hadoop-common-project/hadoop-common/LICENSE.txt index 6ccfd09277..946a6df602 100644 --- a/hadoop-common-project/hadoop-common/LICENSE.txt +++ b/hadoop-common-project/hadoop-common/LICENSE.txt @@ -252,24 +252,26 @@ in src/main/native/src/org/apache/hadoop/util: * BSD-style license that can be found in the LICENSE file. */ - For src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c: +For src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c, +lz4_encoder.h,lz4hc.h,lz4hc.c,lz4hc_encoder.h}, /* LZ4 - Fast LZ compression algorithm - Copyright (C) 2011, Yann Collet. - BSD License + Header File + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -281,4 +283,8 @@ in src/main/native/src/org/apache/hadoop/util: THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : http://code.google.com/p/lz4/ */ diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 7b3a2f13bf..e29c3a5dfc 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -456,7 +456,12 @@ src/test/empty-file src/test/all-tests src/test/resources/kdc/ldif/users.ldif + src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c + src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4_encoder.h + src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h + src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c + src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h src/test/java/org/apache/hadoop/fs/test-untar.tgz diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt index bf8ac7be23..4a8f8a1cf1 100644 --- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt +++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt @@ -170,6 +170,7 @@ add_dual_library(hadoop ${D}/io/compress/lz4/Lz4Compressor.c ${D}/io/compress/lz4/Lz4Decompressor.c ${D}/io/compress/lz4/lz4.c + ${D}/io/compress/lz4/lz4hc.c ${SNAPPY_SOURCE_FILES} ${D}/io/compress/zlib/ZlibCompressor.c ${D}/io/compress/zlib/ZlibDecompressor.c diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 68632503e9..dcab369e38 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -96,7 +96,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { public static final int IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT = 256 * 1024; - /** Internal buffer size for Snappy compressor/decompressors */ + /** Internal buffer size for Lz4 compressor/decompressors */ public static final String IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY = "io.compression.codec.lz4.buffersize"; @@ -104,6 +104,14 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { public static final int IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT = 256 * 1024; + /** Use lz4hc(slow but with high compression ratio) for lz4 compression */ + public static final String IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY = + "io.compression.codec.lz4.use.lz4hc"; + + /** Default value for IO_COMPRESSION_CODEC_USELZ4HC_KEY */ + public static final boolean IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT = + false; + /** * Service Authorization */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java index e963a95b99..4b0ea796b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java @@ -107,7 +107,7 @@ public CompressionOutputStream createOutputStream(OutputStream out, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT); - int compressionOverhead = Math.max((int)(bufferSize * 0.01), 10); + int compressionOverhead = bufferSize/255 + 16; return new BlockCompressorStream(out, compressor, bufferSize, compressionOverhead); @@ -140,7 +140,10 @@ public Compressor createCompressor() { int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT); - return new Lz4Compressor(bufferSize); + boolean useLz4HC = conf.getBoolean( + CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, + CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT); + return new Lz4Compressor(bufferSize, useLz4HC); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java index 5fefcb76db..b5db99f92d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java @@ -52,6 +52,7 @@ public class Lz4Compressor implements Compressor { private long bytesRead = 0L; private long bytesWritten = 0L; + private final boolean useLz4HC; static { if (NativeCodeLoader.isNativeCodeLoaded()) { @@ -72,8 +73,11 @@ public class Lz4Compressor implements Compressor { * Creates a new compressor. * * @param directBufferSize size of the direct buffer to be used. + * @param useLz4HC use high compression ratio version of lz4, + * which trades CPU for compression ratio. */ - public Lz4Compressor(int directBufferSize) { + public Lz4Compressor(int directBufferSize, boolean useLz4HC) { + this.useLz4HC = useLz4HC; this.directBufferSize = directBufferSize; uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); @@ -81,6 +85,15 @@ public Lz4Compressor(int directBufferSize) { compressedDirectBuf.position(directBufferSize); } + /** + * Creates a new compressor. + * + * @param directBufferSize size of the direct buffer to be used. + */ + public Lz4Compressor(int directBufferSize) { + this(directBufferSize, false); + } + /** * Creates a new compressor with the default buffer size. */ @@ -227,7 +240,7 @@ public synchronized int compress(byte[] b, int off, int len) } // Compress data - n = compressBytesDirect(); + n = useLz4HC ? compressBytesDirectHC() : compressBytesDirect(); compressedDirectBuf.limit(n); uncompressedDirectBuf.clear(); // lz4 consumes all buffer input @@ -297,5 +310,7 @@ public synchronized void end() { private native int compressBytesDirect(); + private native int compressBytesDirectHC(); + public native static String getLibraryName(); } diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj index 73b6cb82a3..312660285a 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj +++ b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj @@ -72,6 +72,7 @@ + diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters index 0ef3a17bcd..2c94f4311e 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters +++ b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters @@ -51,6 +51,9 @@ Source Files + + Source Files + Source Files diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c index 10063bb7c2..9f14312cde 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c @@ -23,21 +23,9 @@ #ifdef UNIX #include "config.h" #endif // UNIX +#include "lz4.h" +#include "lz4hc.h" -//**************************** -// Simple Functions -//**************************** - -extern int LZ4_compress (const char* source, char* dest, int isize); - -/* -LZ4_compress() : - return : the number of bytes in compressed buffer dest - note : destination buffer must be already allocated. - To avoid any problem, size it to handle worst cases situations (input data not compressible) - Worst case size is : "inputsize + 0.4%", with "0.4%" being at least 8 bytes. - -*/ static jfieldID Lz4Compressor_clazz; static jfieldID Lz4Compressor_uncompressedDirectBuf; @@ -107,5 +95,45 @@ JNIEXPORT jstring JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_getLibraryName( JNIEnv *env, jclass class ) { - return (*env)->NewStringUTF(env, "revision:43"); + return (*env)->NewStringUTF(env, "revision:99"); +} + +JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_compressBytesDirectHC +(JNIEnv *env, jobject thisj){ + const char* uncompressed_bytes = NULL; + char* compressed_bytes = NULL; + + // Get members of Lz4Compressor + jobject clazz = (*env)->GetStaticObjectField(env, thisj, Lz4Compressor_clazz); + jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf); + jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen); + jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_compressedDirectBuf); + jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_directBufferSize); + + // Get the input direct buffer + LOCK_CLASS(env, clazz, "Lz4Compressor"); + uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf); + UNLOCK_CLASS(env, clazz, "Lz4Compressor"); + + if (uncompressed_bytes == 0) { + return (jint)0; + } + + // Get the output direct buffer + LOCK_CLASS(env, clazz, "Lz4Compressor"); + compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf); + UNLOCK_CLASS(env, clazz, "Lz4Compressor"); + + if (compressed_bytes == 0) { + return (jint)0; + } + + compressed_direct_buf_len = LZ4_compressHC(uncompressed_bytes, compressed_bytes, uncompressed_direct_buf_len); + if (compressed_direct_buf_len < 0){ + THROW(env, "java/lang/InternalError", "LZ4_compressHC failed"); + } + + (*env)->SetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen, 0); + + return (jint)compressed_direct_buf_len; } diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c index 08d1b606f8..2b8c91c348 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c @@ -22,18 +22,7 @@ #ifdef UNIX #include "config.h" #endif // UNIX - -int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize, int maxOutputSize); - -/* -LZ4_uncompress_unknownOutputSize() : - isize : is the input size, therefore the compressed size - maxOutputSize : is the size of the destination buffer (which must be already allocated) - return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize) - If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction - This version never writes beyond dest + maxOutputSize, and is therefore protected against malicious data packets - note : This version is a bit slower than LZ4_uncompress -*/ +#include "lz4.h" static jfieldID Lz4Decompressor_clazz; @@ -89,7 +78,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_de return (jint)0; } - uncompressed_direct_buf_len = LZ4_uncompress_unknownOutputSize(compressed_bytes, uncompressed_bytes, compressed_direct_buf_len, uncompressed_direct_buf_len); + uncompressed_direct_buf_len = LZ4_decompress_safe(compressed_bytes, uncompressed_bytes, compressed_direct_buf_len, uncompressed_direct_buf_len); if (uncompressed_direct_buf_len < 0) { THROW(env, "java/lang/InternalError", "LZ4_uncompress_unknownOutputSize failed."); } diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c index be15615a78..8eda7aebd3 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c @@ -1,19 +1,19 @@ /* LZ4 - Fast LZ compression algorithm - Copyright (C) 2011, Yann Collet. - BSD License + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -25,621 +25,672 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : http://code.google.com/p/lz4/ +*/ + +/* +Note : this source file requires "lz4_encoder.h" */ //************************************** -// Copy from: -// URL: http://lz4.googlecode.com/svn/trunk/lz4.c -// Repository Root: http://lz4.googlecode.com/svn -// Repository UUID: 650e7d94-2a16-8b24-b05c-7c0b3f6821cd -// Revision: 43 -// Node Kind: file -// Last Changed Author: yann.collet.73@gmail.com -// Last Changed Rev: 43 -// Last Changed Date: 2011-12-16 15:41:46 -0800 (Fri, 16 Dec 2011) -// Sha1: 9db7b2c57698c528d79572e6bce2e7dc33fa5998 +// Tuning parameters //************************************** +// MEMORY_USAGE : +// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +// Increasing memory usage improves compression ratio +// Reduced memory usage can improve speed, due to cache effect +// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache +#define MEMORY_USAGE 14 + +// HEAPMODE : +// Select how default compression function will allocate memory for its hash table, +// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)). +// Default allocation strategy is to use stack (HEAPMODE 0) +// Note : explicit functions *_stack* and *_heap* are unaffected by this setting +#define HEAPMODE 0 + //************************************** -// Compilation Directives +// CPU Feature Detection //************************************** -#if __STDC_VERSION__ >= 199901L - /* "restrict" is a known keyword */ +// 32 or 64 bits ? +#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \ + || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \ + || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \ + || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode +# define LZ4_ARCH64 1 #else -#define restrict // Disable restrict +# define LZ4_ARCH64 0 #endif +// Little Endian or Big Endian ? +// Overwrite the #define below if you know your architecture endianess +#if defined (__GLIBC__) +# include +# if (__BYTE_ORDER == __BIG_ENDIAN) +# define LZ4_BIG_ENDIAN 1 +# endif +#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN)) +# define LZ4_BIG_ENDIAN 1 +#elif defined(__sparc) || defined(__sparc__) \ + || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \ + || defined(__hpux) || defined(__hppa) \ + || defined(_MIPSEB) || defined(__s390__) +# define LZ4_BIG_ENDIAN 1 +#else +// Little Endian assumed. PDP Endian and other very rare endian format are unsupported. +#endif + +// Unaligned memory access is automatically enabled for "common" CPU, such as x86. +// For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property +// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance +#if defined(__ARM_FEATURE_UNALIGNED) +# define LZ4_FORCE_UNALIGNED_ACCESS 1 +#endif + +// Define this parameter if your target system or compiler does not support hardware bit count +#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count +# define LZ4_FORCE_SW_BITCOUNT +#endif + +// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE : +// This option may provide a small boost to performance for some big endian cpu, although probably modest. +// You may set this option to 1 if data will remain within closed environment. +// This option is useless on Little_Endian CPU (such as x86) +//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 + + +//************************************** +// Compiler Options +//************************************** +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) // C99 +/* "restrict" is a known keyword */ +#else +# define restrict // Disable restrict +#endif + +#ifdef _MSC_VER // Visual Studio +# define forceinline static __forceinline +# include // For Visual 2005 +# if LZ4_ARCH64 // 64-bits +# pragma intrinsic(_BitScanForward64) // For Visual 2005 +# pragma intrinsic(_BitScanReverse64) // For Visual 2005 +# else // 32-bits +# pragma intrinsic(_BitScanForward) // For Visual 2005 +# pragma intrinsic(_BitScanReverse) // For Visual 2005 +# endif +# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant +#else +# ifdef __GNUC__ +# define forceinline static inline __attribute__((always_inline)) +# else +# define forceinline static inline +# endif +#endif + +#ifdef _MSC_VER +# define lz4_bswap16(x) _byteswap_ushort(x) +#else +# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) +#endif + +#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__) +# define expect(expr,value) (__builtin_expect ((expr),(value)) ) +#else +# define expect(expr,value) (expr) +#endif + +#define likely(expr) expect((expr) != 0, 1) +#define unlikely(expr) expect((expr) != 0, 0) + //************************************** // Includes //************************************** #include // for malloc #include // for memset - - -//************************************** -// Performance parameter -//************************************** -// Increasing this value improves compression ratio -// Lowering this value reduces memory usage -// Lowering may also improve speed, typically on reaching cache size limits (L1 32KB for Intel, 64KB for AMD) -// Memory usage formula for 32 bits systems : N->2^(N+2) Bytes (examples : 17 -> 512KB ; 12 -> 16KB) -#define HASH_LOG 12 +#include "lz4.h" //************************************** // Basic Types //************************************** -#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively -#define BYTE unsigned __int8 -#define U16 unsigned __int16 -#define U32 unsigned __int32 -#define S32 __int32 +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99 +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; #else -#include -#define BYTE uint8_t -#define U16 uint16_t -#define U32 uint32_t -#define S32 int32_t + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; #endif - -//************************************** -// Constants -//************************************** -#define MINMATCH 4 -#define SKIPSTRENGTH 6 -#define STACKLIMIT 13 -#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()). -#define COPYTOKEN 4 -#define COPYLENGTH 8 -#define LASTLITERALS 5 -#define MFLIMIT (COPYLENGTH+MINMATCH) -#define MINLENGTH (MFLIMIT+1) - -#define MAXD_LOG 16 -#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) - -#define HASHTABLESIZE (1 << HASH_LOG) -#define HASH_MASK (HASHTABLESIZE - 1) - -#define ML_BITS 4 -#define ML_MASK ((1U<v) -#define A16(x) (((U16_S *)(x))->v) +#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__) +# pragma pack(pop) +#endif + +#define A16(x) (((U16_S *)(x))->v) +#define A32(x) (((U32_S *)(x))->v) +#define A64(x) (((U64_S *)(x))->v) +#define AARCH(x) (((size_t_S *)(x))->v) + + +//************************************** +// Constants +//************************************** +#define HASHTABLESIZE (1 << MEMORY_USAGE) + +#define MINMATCH 4 + +#define COPYLENGTH 8 +#define LASTLITERALS 5 +#define MFLIMIT (COPYLENGTH+MINMATCH) +#define MINLENGTH (MFLIMIT+1) + +#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1)) +#define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data + +#define MAXD_LOG 16 +#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) + +#define ML_BITS 4 +#define ML_MASK ((1U<> ((MINMATCH*8)-HASH_LOG)) -#define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p)) -#define LZ4_COPYPACKET(s,d) A32(d) = A32(s); d+=4; s+=4; A32(d) = A32(s); d+=4; s+=4; -#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (dhashTable; - memset((void*)HashTable, 0, sizeof(srt->hashTable)); -#else - (void) ctx; -#endif - - - // First Byte - HashTable[LZ4_HASH_VALUE(ip)] = ip; - ip++; forwardH = LZ4_HASH_VALUE(ip); - - // Main Loop - for ( ; ; ) - { - int findMatchAttempts = (1U << skipStrength) + 3; - const BYTE* forwardIp = ip; - const BYTE* ref; - BYTE* token; - - // Find a match - do { - U32 h = forwardH; - int step = findMatchAttempts++ >> skipStrength; - ip = forwardIp; - forwardIp = ip + step; - - if (forwardIp > mflimit) { goto _last_literals; } - - forwardH = LZ4_HASH_VALUE(forwardIp); - ref = HashTable[h]; - HashTable[h] = ip; - - } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); - - // Catch up - while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } - - // Encode Literal length - length = ip - anchor; - token = op++; - if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } - else *token = (length<>8; } -#endif - - // Start Counting - ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified - anchor = ip; - while (ip> 27]; -#else - if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; } - if (A16(ref) == A16(ip)) { ip+=2; ref+=2; } - if (*ref == *ip) ip++; -#endif - goto _endCount; - } - if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; } - if ((ip=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } - else *token += len; - - // Test end of chunk - if (ip > mflimit) { anchor = ip; break; } - - // Fill table - HashTable[LZ4_HASH_VALUE(ip-2)] = ip-2; - - // Test next position - ref = HashTable[LZ4_HASH_VALUE(ip)]; - HashTable[LZ4_HASH_VALUE(ip)] = ip; - if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; } - - // Prepare next loop - anchor = ip++; - forwardH = LZ4_HASH_VALUE(ip); - } - -_last_literals: - // Encode Last Literals - { - int lastRun = iend - anchor; - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (lastRun<>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clzll(val) >> 3); +# else + int r; + if (!(val>>32)) { r=4; } else { r=0; val>>=32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif +# else +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctzll(val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif +# endif } - - -// Note : this function is valid only if isize < LZ4_64KLIMIT -#define LZ4_64KLIMIT ((1U<<16) + (MFLIMIT-1)) -#define HASHLOG64K (HASH_LOG+1) -#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K)) -#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p)) -int LZ4_compress64kCtx(void** ctx, - char* source, - char* dest, - int isize) -{ -#if HEAPMODE - struct refTables *srt = (struct refTables *) (*ctx); - U16* HashTable; #else - U16 HashTable[HASHTABLESIZE<<1] = {0}; -#endif - const BYTE* ip = (BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const base = ip; - const BYTE* const iend = ip + isize; - const BYTE* const mflimit = iend - MFLIMIT; -#define matchlimit (iend - LASTLITERALS) - - BYTE* op = (BYTE*) dest; - -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; -#endif - int len, length; - const int skipStrength = SKIPSTRENGTH; - U32 forwardH; - - - // Init - if (isizehashTable); - memset((void*)HashTable, 0, sizeof(srt->hashTable)); -#else - (void) ctx; -#endif - - - // First Byte - ip++; forwardH = LZ4_HASH64K_VALUE(ip); - - // Main Loop - for ( ; ; ) - { - int findMatchAttempts = (1U << skipStrength) + 3; - const BYTE* forwardIp = ip; - const BYTE* ref; - BYTE* token; - - // Find a match - do { - U32 h = forwardH; - int step = findMatchAttempts++ >> skipStrength; - ip = forwardIp; - forwardIp = ip + step; - - if (forwardIp > mflimit) { goto _last_literals; } - - forwardH = LZ4_HASH64K_VALUE(forwardIp); - ref = base + HashTable[h]; - HashTable[h] = ip - base; - - } while (A32(ref) != A32(ip)); - - // Catch up - while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } - - // Encode Literal length - length = ip - anchor; - token = op++; - if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } - else *token = (length<>8; } -#endif - - // Start Counting - ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified - anchor = ip; - while (ip> 27]; -#else - if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; } - if (A16(ref) == A16(ip)) { ip+=2; ref+=2; } - if (*ref == *ip) ip++; -#endif - goto _endCount; - } - if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; } - if ((ip=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } - else *token += len; - - // Test end of chunk - if (ip > mflimit) { anchor = ip; break; } - - // Test next position - ref = base + HashTable[LZ4_HASH64K_VALUE(ip)]; - HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base; - if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; } - - // Prepare next loop - anchor = ip++; - forwardH = LZ4_HASH64K_VALUE(ip); - } - -_last_literals: - // Encode Last Literals - { - int lastRun = iend - anchor; - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (lastRun<>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clz(val) >> 3); +# else + int r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif +# else +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctz(val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif +# endif } +#endif -int LZ4_compress(char* source, - char* dest, - int isize) + +//****************************** +// Compression functions +//****************************** + +/* +int LZ4_compress_stack( + const char* source, + char* dest, + int inputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest'. +Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize). +return : the number of bytes written in buffer 'dest' +*/ +#define FUNCTION_NAME LZ4_compress_stack +#include "lz4_encoder.h" + + +/* +int LZ4_compress_stack_limitedOutput( + const char* source, + char* dest, + int inputSize, + int maxOutputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. +If it cannot achieve it, compression will stop, and result of the function will be zero. +return : the number of bytes written in buffer 'dest', or 0 if the compression fails +*/ +#define FUNCTION_NAME LZ4_compress_stack_limitedOutput +#define LIMITED_OUTPUT +#include "lz4_encoder.h" + + +/* +int LZ4_compress64k_stack( + const char* source, + char* dest, + int inputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest'. +This function compresses better than LZ4_compress_stack(), on the condition that +'inputSize' must be < to LZ4_64KLIMIT, or the function will fail. +Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize). +return : the number of bytes written in buffer 'dest', or 0 if compression fails +*/ +#define FUNCTION_NAME LZ4_compress64k_stack +#define COMPRESS_64K +#include "lz4_encoder.h" + + +/* +int LZ4_compress64k_stack_limitedOutput( + const char* source, + char* dest, + int inputSize, + int maxOutputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. +This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that +'inputSize' must be < to LZ4_64KLIMIT, or the function will fail. +If it cannot achieve it, compression will stop, and result of the function will be zero. +return : the number of bytes written in buffer 'dest', or 0 if the compression fails +*/ +#define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput +#define COMPRESS_64K +#define LIMITED_OUTPUT +#include "lz4_encoder.h" + + +/* +void* LZ4_createHeapMemory(); +int LZ4_freeHeapMemory(void* ctx); + +Used to allocate and free hashTable memory +to be used by the LZ4_compress_heap* family of functions. +LZ4_createHeapMemory() returns NULL is memory allocation fails. +*/ +void* LZ4_create() { return malloc(HASHTABLESIZE); } +int LZ4_free(void* ctx) { free(ctx); return 0; } + + +/* +int LZ4_compress_heap( + void* ctx, + const char* source, + char* dest, + int inputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest'. +The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'. +Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize). +return : the number of bytes written in buffer 'dest' +*/ +#define FUNCTION_NAME LZ4_compress_heap +#define USE_HEAPMEMORY +#include "lz4_encoder.h" + + +/* +int LZ4_compress_heap_limitedOutput( + void* ctx, + const char* source, + char* dest, + int inputSize, + int maxOutputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. +If it cannot achieve it, compression will stop, and result of the function will be zero. +The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'. +return : the number of bytes written in buffer 'dest', or 0 if the compression fails +*/ +#define FUNCTION_NAME LZ4_compress_heap_limitedOutput +#define LIMITED_OUTPUT +#define USE_HEAPMEMORY +#include "lz4_encoder.h" + + +/* +int LZ4_compress64k_heap( + void* ctx, + const char* source, + char* dest, + int inputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest'. +The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'. +'inputSize' must be < to LZ4_64KLIMIT, or the function will fail. +Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize). +return : the number of bytes written in buffer 'dest' +*/ +#define FUNCTION_NAME LZ4_compress64k_heap +#define COMPRESS_64K +#define USE_HEAPMEMORY +#include "lz4_encoder.h" + + +/* +int LZ4_compress64k_heap_limitedOutput( + void* ctx, + const char* source, + char* dest, + int inputSize, + int maxOutputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. +If it cannot achieve it, compression will stop, and result of the function will be zero. +The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'. +'inputSize' must be < to LZ4_64KLIMIT, or the function will fail. +return : the number of bytes written in buffer 'dest', or 0 if the compression fails +*/ +#define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput +#define COMPRESS_64K +#define LIMITED_OUTPUT +#define USE_HEAPMEMORY +#include "lz4_encoder.h" + + +int LZ4_compress(const char* source, char* dest, int inputSize) { #if HEAPMODE - void* ctx = malloc(sizeof(struct refTables)); - int result; - if (isize < LZ4_64KLIMIT) - result = LZ4_compress64kCtx(&ctx, source, dest, isize); - else result = LZ4_compressCtx(&ctx, source, dest, isize); - free(ctx); - return result; + void* ctx = LZ4_create(); + int result; + if (ctx == NULL) return 0; // Failed allocation => compression not done + if (inputSize < LZ4_64KLIMIT) + result = LZ4_compress64k_heap(ctx, source, dest, inputSize); + else result = LZ4_compress_heap(ctx, source, dest, inputSize); + LZ4_free(ctx); + return result; #else - if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize); - return LZ4_compressCtx(NULL, source, dest, isize); + if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize); + return LZ4_compress_stack(source, dest, inputSize); #endif } +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) +{ +#if HEAPMODE + void* ctx = LZ4_create(); + int result; + if (ctx == NULL) return 0; // Failed allocation => compression not done + if (inputSize < LZ4_64KLIMIT) + result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize); + else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize); + LZ4_free(ctx); + return result; +#else + if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize); + return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize); +#endif +} //**************************** -// Decompression CODE +// Decompression functions //**************************** -// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize() -// are safe against "buffer overflow" attack type -// since they will *never* write outside of the provided output buffer : -// they both check this condition *before* writing anything. -// A corrupted packet however can make them *read* within the first 64K before the output buffer. - -int LZ4_uncompress(char* source, - char* dest, - int osize) -{ - // Local Variables - const BYTE* restrict ip = (const BYTE*) source; - const BYTE* restrict ref; - - BYTE* restrict op = (BYTE*) dest; - BYTE* const oend = op + osize; - BYTE* cpy; - - BYTE token; - - U32 dec[4]={0, 3, 2, 3}; - int len, length; +typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive; +typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; +typedef enum { full = 0, partial = 1 } earlyEnd_directive; - // Main Loop - while (1) - { - // get runlength - token = *ip++; - if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } +// This generic decompression function cover all use cases. +// It shall be instanciated several times, using different sets of directives +// Note that it is essential this generic function is really inlined, +// in order to remove useless branches during compilation optimisation. +forceinline int LZ4_decompress_generic( + const char* source, + char* dest, + int inputSize, // + int outputSize, // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer. - // copy literals - cpy = op+length; - if (cpy>oend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - memcpy(op, ip, length); - ip += length; - break; // Necessarily EOF - } - LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; + int endOnInput, // endOnOutputSize, endOnInputSize + int prefix64k, // noPrefix, withPrefix + int partialDecoding, // full, partial + int targetOutputSize // only used if partialDecoding==partial + ) +{ + // Local Variables + const BYTE* restrict ip = (const BYTE*) source; + const BYTE* ref; + const BYTE* const iend = ip + inputSize; + BYTE* op = (BYTE*) dest; + BYTE* const oend = op + outputSize; + BYTE* cpy; + BYTE* oexit = op + targetOutputSize; - // get offset -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - ref = cpy - A16(ip); ip+=2; -#else - { int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; } + size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; +#if LZ4_ARCH64 + size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3}; #endif - // get matchlength - if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; } - // copy repeated sequence - if (op-ref oend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH)); - while(op oend-MFLIMIT)) oexit = oend-MFLIMIT; // targetOutputSize too high => decode everything + if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1; // Empty output buffer + if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1); - // end of decoding - return (int) (((char*)ip)-source); - // write overflow error detected + // Main Loop + while (1) + { + unsigned token; + size_t length; + + // get runlength + token = *ip++; + if ((length=(token>>ML_BITS)) == RUN_MASK) + { + unsigned s=255; + while (((endOnInput)?ip(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) ) + || ((!endOnInput) && (cpy>oend-COPYLENGTH))) + { + if (partialDecoding) + { + if (cpy > oend) goto _output_error; // Error : write attempt beyond end of output buffer + if ((endOnInput) && (ip+length > iend)) goto _output_error; // Error : read attempt beyond end of input buffer + } + else + { + if ((!endOnInput) && (cpy != oend)) goto _output_error; // Error : block decoding must stop exactly there + if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; // Error : input must be consumed + } + memcpy(op, ip, length); + ip += length; + op += length; + break; // Necessarily EOF, due to parsing restrictions + } + LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; + + // get offset + LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2; + if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error; // Error : offset outside destination buffer + + // get matchlength + if ((length=(token&ML_MASK)) == ML_MASK) + { + for ( ; (!endOnInput) || (ipoend-(COPYLENGTH)-(STEPSIZE-4)) + { + if (cpy > oend-LASTLITERALS) goto _output_error; // Error : last 5 bytes must be literals + LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH)); + while(op>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } - - // copy literals - cpy = op+length; - if (cpy>oend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - memcpy(op, ip, length); - op += length; - break; // Necessarily EOF - } - LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; - if (ip>=iend) break; // check EOF - - // get offset -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - ref = cpy - A16(ip); ip+=2; -#else - { int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; } -#endif - - // get matchlength - if ((length=(token&ML_MASK)) == ML_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } - - // copy repeated sequence - if (op-refoend-COPYLENGTH) - { - if (cpy > oend) goto _output_error; - LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH)); - while(op> ((MINMATCH*8)-HASHLOG)) +#define LZ4_HASHVALUE(p) LZ4_HASH(A32(p)) + + + +//**************************** +// Function code +//**************************** + +int FUNCTION_NAME( +#ifdef USE_HEAPMEMORY + void* ctx, +#endif + const char* source, + char* dest, + int inputSize +#ifdef LIMITED_OUTPUT + ,int maxOutputSize +#endif + ) +{ +#ifdef USE_HEAPMEMORY + CURRENT_H_TYPE* HashTable = (CURRENT_H_TYPE*)ctx; +#else + CURRENT_H_TYPE HashTable[HASHTABLE_NBCELLS] = {0}; +#endif + + const BYTE* ip = (BYTE*) source; + CURRENTBASE(base); + const BYTE* anchor = ip; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; +#define matchlimit (iend - LASTLITERALS) + + BYTE* op = (BYTE*) dest; +#ifdef LIMITED_OUTPUT + BYTE* const oend = op + maxOutputSize; +#endif + + int length; + const int skipStrength = SKIPSTRENGTH; + U32 forwardH; + + + // Init + if (inputSize=LZ4_64KLIMIT) return 0; // Size too large (not within 64K limit) +#endif +#ifdef USE_HEAPMEMORY + memset((void*)HashTable, 0, HASHTABLESIZE); +#endif + + // First Byte + HashTable[LZ4_HASHVALUE(ip)] = (CURRENT_H_TYPE)(ip - base); + ip++; forwardH = LZ4_HASHVALUE(ip); + + // Main Loop + for ( ; ; ) + { + int findMatchAttempts = (1U << skipStrength) + 3; + const BYTE* forwardIp = ip; + const BYTE* ref; + BYTE* token; + + // Find a match + do { + U32 h = forwardH; + int step = findMatchAttempts++ >> skipStrength; + ip = forwardIp; + forwardIp = ip + step; + + if unlikely(forwardIp > mflimit) { goto _last_literals; } + + forwardH = LZ4_HASHVALUE(forwardIp); + ref = base + HashTable[h]; + HashTable[h] = (CURRENT_H_TYPE)(ip - base); + + } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); + + // Catch up + while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; } + + // Encode Literal length + length = (int)(ip - anchor); + token = op++; +#ifdef LIMITED_OUTPUT + if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit +#endif + if (length>=(int)RUN_MASK) + { + int len = length-RUN_MASK; + *token=(RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(length<>8) > oend) return 0; // Check output limit +#endif + if (length>=(int)ML_MASK) + { + *token += ML_MASK; + length -= ML_MASK; + for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; } + if (length >= 255) { length-=255; *op++ = 255; } + *op++ = (BYTE)length; + } + else *token += (BYTE)(length); + + // Test end of chunk + if (ip > mflimit) { anchor = ip; break; } + + // Fill table + HashTable[LZ4_HASHVALUE(ip-2)] = (CURRENT_H_TYPE)(ip - 2 - base); + + // Test next position + ref = base + HashTable[LZ4_HASHVALUE(ip)]; + HashTable[LZ4_HASHVALUE(ip)] = (CURRENT_H_TYPE)(ip - base); + if ((ref >= ip - MAX_DISTANCE) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; } + + // Prepare next loop + anchor = ip++; + forwardH = LZ4_HASHVALUE(ip); + } + +_last_literals: + // Encode Last Literals + { + int lastRun = (int)(iend - anchor); +#ifdef LIMITED_OUTPUT + if (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) return 0; // Check output limit +#endif + if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } + else *op++ = (BYTE)(lastRun< // calloc, free +#define ALLOCATOR(s) calloc(1,s) +#define FREEMEM free +#include // memset, memcpy +#define MEM_INIT memset + + +//************************************** +// CPU Feature Detection +//************************************** +// 32 or 64 bits ? +#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \ + || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \ + || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \ + || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode +# define LZ4_ARCH64 1 +#else +# define LZ4_ARCH64 0 +#endif + +// Little Endian or Big Endian ? +// Overwrite the #define below if you know your architecture endianess +#if defined (__GLIBC__) +# include +# if (__BYTE_ORDER == __BIG_ENDIAN) +# define LZ4_BIG_ENDIAN 1 +# endif +#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN)) +# define LZ4_BIG_ENDIAN 1 +#elif defined(__sparc) || defined(__sparc__) \ + || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \ + || defined(__hpux) || defined(__hppa) \ + || defined(_MIPSEB) || defined(__s390__) +# define LZ4_BIG_ENDIAN 1 +#else +// Little Endian assumed. PDP Endian and other very rare endian format are unsupported. +#endif + +// Unaligned memory access is automatically enabled for "common" CPU, such as x86. +// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected +// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance +#if defined(__ARM_FEATURE_UNALIGNED) +# define LZ4_FORCE_UNALIGNED_ACCESS 1 +#endif + +// Define this parameter if your target system or compiler does not support hardware bit count +#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count +# define LZ4_FORCE_SW_BITCOUNT +#endif + + +//************************************** +// Compiler Options +//************************************** +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99 + /* "restrict" is a known keyword */ +#else +# define restrict // Disable restrict +#endif + +#ifdef _MSC_VER // Visual Studio +# define forceinline static __forceinline +# include // For Visual 2005 +# if LZ4_ARCH64 // 64-bits +# pragma intrinsic(_BitScanForward64) // For Visual 2005 +# pragma intrinsic(_BitScanReverse64) // For Visual 2005 +# else // 32-bits +# pragma intrinsic(_BitScanForward) // For Visual 2005 +# pragma intrinsic(_BitScanReverse) // For Visual 2005 +# endif +# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant +# pragma warning(disable : 4701) // disable: C4701: potentially uninitialized local variable used +#else +# ifdef __GNUC__ +# define forceinline static inline __attribute__((always_inline)) +# else +# define forceinline static inline +# endif +#endif + +#ifdef _MSC_VER // Visual Studio +# define lz4_bswap16(x) _byteswap_ushort(x) +#else +# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) +#endif + + +//************************************** +// Includes +//************************************** +#include "lz4hc.h" +#include "lz4.h" + + +//************************************** +// Basic Types +//************************************** +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99 +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; +#endif + +#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS) +# define _PACKED __attribute__ ((packed)) +#else +# define _PACKED +#endif + +#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__) +# ifdef __IBMC__ +# pragma pack(1) +# else +# pragma pack(push, 1) +# endif +#endif + +typedef struct _U16_S { U16 v; } _PACKED U16_S; +typedef struct _U32_S { U32 v; } _PACKED U32_S; +typedef struct _U64_S { U64 v; } _PACKED U64_S; + +#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__) +# pragma pack(pop) +#endif + +#define A64(x) (((U64_S *)(x))->v) +#define A32(x) (((U32_S *)(x))->v) +#define A16(x) (((U16_S *)(x))->v) + + +//************************************** +// Constants +//************************************** +#define MINMATCH 4 + +#define DICTIONARY_LOGSIZE 16 +#define MAXD (1<> ((MINMATCH*8)-HASH_LOG)) +#define HASH_VALUE(p) HASH_FUNCTION(A32(p)) +#define HASH_POINTER(p) (HashTable[HASH_VALUE(p)] + base) +#define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK] +#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p)) + + +//************************************** +// Private functions +//************************************** +#if LZ4_ARCH64 + +forceinline int LZ4_NbCommonBytes (register U64 val) +{ +#if defined(LZ4_BIG_ENDIAN) +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clzll(val) >> 3); +# else + int r; + if (!(val>>32)) { r=4; } else { r=0; val>>=32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif +#else +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctzll(val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58]; +# endif +#endif +} + +#else + +forceinline int LZ4_NbCommonBytes (register U32 val) +{ +#if defined(LZ4_BIG_ENDIAN) +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanReverse( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clz(val) >> 3); +# else + int r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif +#else +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward( &r, val ); + return (int)(r>>3); +# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctz(val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif +#endif +} + +#endif + + +forceinline int LZ4_InitHC (LZ4HC_Data_Structure* hc4, const BYTE* base) +{ + MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable)); + MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); + hc4->nextToUpdate = base + 1; + hc4->base = base; + hc4->inputBuffer = base; + hc4->end = base; + return 1; +} + + +void* LZ4_createHC (const char* slidingInputBuffer) +{ + void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure)); + LZ4_InitHC ((LZ4HC_Data_Structure*)hc4, (const BYTE*)slidingInputBuffer); + return hc4; +} + + +int LZ4_freeHC (void* LZ4HC_Data) +{ + FREEMEM(LZ4HC_Data); + return (0); +} + + +// Update chains up to ip (excluded) +forceinline void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip) +{ + U16* chainTable = hc4->chainTable; + HTYPE* HashTable = hc4->hashTable; + INITBASE(base,hc4->base); + + while(hc4->nextToUpdate < ip) + { + const BYTE* const p = hc4->nextToUpdate; + size_t delta = (p) - HASH_POINTER(p); + if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; + DELTANEXT(p) = (U16)delta; + HashTable[HASH_VALUE(p)] = (HTYPE)((p) - base); + hc4->nextToUpdate++; + } +} + + +char* LZ4_slideInputBufferHC(void* LZ4HC_Data) +{ + LZ4HC_Data_Structure* hc4 = (LZ4HC_Data_Structure*)LZ4HC_Data; + U32 distance = (U32)(hc4->end - hc4->inputBuffer) - 64 KB; + distance = (distance >> 16) << 16; // Must be a multiple of 64 KB + LZ4HC_Insert(hc4, hc4->end - MINMATCH); + memcpy((void*)(hc4->end - 64 KB - distance), (const void*)(hc4->end - 64 KB), 64 KB); + hc4->nextToUpdate -= distance; + hc4->base -= distance; + if ((U32)(hc4->inputBuffer - hc4->base) > 1 GB + 64 KB) // Avoid overflow + { + int i; + hc4->base += 1 GB; + for (i=0; ihashTable[i] -= 1 GB; + } + hc4->end -= distance; + return (char*)(hc4->end); +} + + +forceinline size_t LZ4HC_CommonLength (const BYTE* p1, const BYTE* p2, const BYTE* const matchlimit) +{ + const BYTE* p1t = p1; + + while (p1tchainTable; + HTYPE* const HashTable = hc4->hashTable; + const BYTE* ref; + INITBASE(base,hc4->base); + int nbAttempts=MAX_NB_ATTEMPTS; + size_t repl=0, ml=0; + U16 delta=0; // useless assignment, to remove an uninitialization warning + + // HC4 match finder + LZ4HC_Insert(hc4, ip); + ref = HASH_POINTER(ip); + +#define REPEAT_OPTIMIZATION +#ifdef REPEAT_OPTIMIZATION + // Detect repetitive sequences of length <= 4 + if ((U32)(ip-ref) <= 4) // potential repetition + { + if (A32(ref) == A32(ip)) // confirmed + { + delta = (U16)(ip-ref); + repl = ml = LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit) + MINMATCH; + *matchpos = ref; + } + ref = GETNEXT(ref); + } +#endif + + while (((U32)(ip-ref) <= MAX_DISTANCE) && (nbAttempts)) + { + nbAttempts--; + if (*(ref+ml) == *(ip+ml)) + if (A32(ref) == A32(ip)) + { + size_t mlt = LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit) + MINMATCH; + if (mlt > ml) { ml = mlt; *matchpos = ref; } + } + ref = GETNEXT(ref); + } + +#ifdef REPEAT_OPTIMIZATION + // Complete table + if (repl) + { + const BYTE* ptr = ip; + const BYTE* end; + + end = ip + repl - (MINMATCH-1); + while(ptr < end-delta) + { + DELTANEXT(ptr) = delta; // Pre-Load + ptr++; + } + do + { + DELTANEXT(ptr) = delta; + HashTable[HASH_VALUE(ptr)] = (HTYPE)((ptr) - base); // Head of chain + ptr++; + } while(ptr < end); + hc4->nextToUpdate = end; + } +#endif + + return (int)ml; +} + + +forceinline int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos) +{ + U16* const chainTable = hc4->chainTable; + HTYPE* const HashTable = hc4->hashTable; + INITBASE(base,hc4->base); + const BYTE* ref; + int nbAttempts = MAX_NB_ATTEMPTS; + int delta = (int)(ip-startLimit); + + // First Match + LZ4HC_Insert(hc4, ip); + ref = HASH_POINTER(ip); + + while (((U32)(ip-ref) <= MAX_DISTANCE) && (nbAttempts)) + { + nbAttempts--; + if (*(startLimit + longest) == *(ref - delta + longest)) + if (A32(ref) == A32(ip)) + { +#if 1 + const BYTE* reft = ref+MINMATCH; + const BYTE* ipt = ip+MINMATCH; + const BYTE* startt = ip; + + while (iptstartLimit) && (reft > hc4->inputBuffer) && (startt[-1] == reft[-1])) {startt--; reft--;} + + if ((ipt-startt) > longest) + { + longest = (int)(ipt-startt); + *matchpos = reft; + *startpos = startt; + } + } + ref = GETNEXT(ref); + } + + return longest; +} + + + +//************************************** +// Compression functions +//************************************** + +/* +int LZ4_compressHC( + const char* source, + char* dest, + int inputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest'. +Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize). +return : the number of bytes written in buffer 'dest' +*/ +#define FUNCTION_NAME LZ4_compressHC +#include "lz4hc_encoder.h" + + +/* +int LZ4_compressHC_limitedOutput( + const char* source, + char* dest, + int inputSize, + int maxOutputSize) + +Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. +If it cannot achieve it, compression will stop, and result of the function will be zero. +return : the number of bytes written in buffer 'dest', or 0 if the compression fails +*/ +#define FUNCTION_NAME LZ4_compressHC_limitedOutput +#define LIMITED_OUTPUT +#include "lz4hc_encoder.h" + diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h new file mode 100644 index 0000000000..2bdbdb0ed6 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h @@ -0,0 +1,111 @@ +/* + LZ4 HC - High Compression Mode of LZ4 + Header File + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : http://code.google.com/p/lz4/ +*/ +#pragma once + + +#if defined (__cplusplus) +extern "C" { +#endif + + +int LZ4_compressHC (const char* source, char* dest, int inputSize); +/* +LZ4_compressHC : + return : the number of bytes in compressed buffer dest + or 0 if compression fails. + note : destination buffer must be already allocated. + To avoid any problem, size it to handle worst cases situations (input data not compressible) + Worst case size evaluation is provided by function LZ4_compressBound() (see "lz4.h") +*/ + +int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); +/* +LZ4_compress_limitedOutput() : + Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. + If it cannot achieve it, compression will stop, and result of the function will be zero. + This function never writes outside of provided output buffer. + + inputSize : Max supported value is 1 GB + maxOutputSize : is maximum allowed size into the destination buffer (which must be already allocated) + return : the number of output bytes written in buffer 'dest' + or 0 if compression fails. +*/ + + +/* Note : +Decompression functions are provided within LZ4 source code (see "lz4.h") (BSD license) +*/ + + +/* Advanced Functions */ + +void* LZ4_createHC (const char* slidingInputBuffer); +int LZ4_compressHC_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize); +int LZ4_compressHC_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize); +char* LZ4_slideInputBufferHC (void* LZ4HC_Data); +int LZ4_freeHC (void* LZ4HC_Data); + +/* +These functions allow the compression of dependent blocks, where each block benefits from prior 64 KB within preceding blocks. +In order to achieve this, it is necessary to start creating the LZ4HC Data Structure, thanks to the function : + +void* LZ4_createHC (const char* slidingInputBuffer); +The result of the function is the (void*) pointer on the LZ4HC Data Structure. +This pointer will be needed in all other functions. +If the pointer returned is NULL, then the allocation has failed, and compression must be aborted. +The only parameter 'const char* slidingInputBuffer' must, obviously, point at the beginning of input buffer. +The input buffer must be already allocated, and size at least 192KB. +'slidingInputBuffer' will also be the 'const char* source' of the first block. + +All blocks are expected to lay next to each other within the input buffer, starting from 'slidingInputBuffer'. +To compress each block, use either LZ4_compressHC_continue() or LZ4_compressHC_limitedOutput_continue(). +Their behavior are identical to LZ4_compressHC() or LZ4_compressHC_limitedOutput(), +but require the LZ4HC Data Structure as their first argument, and check that each block starts right after the previous one. +If next block does not begin immediately after the previous one, the compression will fail (return 0). + +When it's no longer possible to lay the next block after the previous one (not enough space left into input buffer), a call to : +char* LZ4_slideInputBufferHC(void* LZ4HC_Data); +must be performed. It will typically copy the latest 64KB of input at the beginning of input buffer. +Note that, for this function to work properly, minimum size of an input buffer must be 192KB. +==> The memory position where the next input data block must start is provided as the result of the function. + +Compression can then resume, using LZ4_compressHC_continue() or LZ4_compressHC_limitedOutput_continue(), as usual. + +When compression is completed, a call to LZ4_freeHC() will release the memory used by the LZ4HC Data Structure. +*/ + + +#if defined (__cplusplus) +} +#endif diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h new file mode 100644 index 0000000000..6c5374348f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h @@ -0,0 +1,349 @@ +/* + LZ4 HC Encoder - Part of LZ4 HC algorithm + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : http://code.google.com/p/lz4/ +*/ + +/* lz4hc_encoder.h must be included into lz4hc.c + The objective of this file is to create a single LZ4 compression function source + which will be instanciated multiple times with minor variations + depending on a set of #define. +*/ + +//**************************** +// Check required defines +//**************************** + +#ifndef FUNCTION_NAME +# error "FUNTION_NAME is not defined" +#endif + + +//**************************** +// Local definitions +//**************************** +#define COMBINED_NAME_RAW(n1,n2) n1 ## n2 +#define COMBINED_NAME(n1,n2) COMBINED_NAME_RAW(n1,n2) +#define ENCODE_SEQUENCE_NAME COMBINED_NAME(FUNCTION_NAME,_encodeSequence) +#ifdef LIMITED_OUTPUT +# define ENCODE_SEQUENCE(i,o,a,m,r,d) if (ENCODE_SEQUENCE_NAME(i,o,a,m,r,d)) return 0; +#else +# define ENCODE_SEQUENCE(i,o,a,m,r,d) ENCODE_SEQUENCE_NAME(i,o,a,m,r) +#endif + +//**************************** +// Function code +//**************************** + +forceinline int ENCODE_SEQUENCE_NAME ( + const BYTE** ip, + BYTE** op, + const BYTE** anchor, + int matchLength, + const BYTE* ref +#ifdef LIMITED_OUTPUT + ,BYTE* oend +#endif + ) +{ + int length, len; + BYTE* token; + + // Encode Literal length + length = (int)(*ip - *anchor); + token = (*op)++; +#ifdef LIMITED_OUTPUT + if ((*op + length + (2 + 1 + LASTLITERALS) + (length>>8)) > oend) return 1; // Check output limit +#endif + if (length>=(int)RUN_MASK) { *token=(RUN_MASK< 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; } + else *token = (BYTE)(length<>8) > oend) return 1; // Check output limit +#endif + if (length>=(int)ML_MASK) { *token+=ML_MASK; length-=ML_MASK; for(; length > 509 ; length-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (length > 254) { length-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)length; } + else *token += (BYTE)(length); + + // Prepare next loop + *ip += matchLength; + *anchor = *ip; + + return 0; +} + + +int COMBINED_NAME(FUNCTION_NAME,_continue) ( + void* ctxvoid, + const char* source, + char* dest, + int inputSize +#ifdef LIMITED_OUTPUT + ,int maxOutputSize +#endif + ) +{ + LZ4HC_Data_Structure* ctx = (LZ4HC_Data_Structure*) ctxvoid; + const BYTE* ip = (const BYTE*) source; + const BYTE* anchor = ip; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = (iend - LASTLITERALS); + + BYTE* op = (BYTE*) dest; +#ifdef LIMITED_OUTPUT + BYTE* const oend = op + maxOutputSize; +#endif + + int ml, ml2, ml3, ml0; + const BYTE* ref=NULL; + const BYTE* start2=NULL; + const BYTE* ref2=NULL; + const BYTE* start3=NULL; + const BYTE* ref3=NULL; + const BYTE* start0; + const BYTE* ref0; + + // Ensure blocks follow each other + if (ip != ctx->end) return 0; + ctx->end += inputSize; + + ip++; + + // Main Loop + while (ip < mflimit) + { + ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref)); + if (!ml) { ip++; continue; } + + // saved, in case we would skip too much + start0 = ip; + ref0 = ref; + ml0 = ml; + +_Search2: + if (ip+ml < mflimit) + ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2); + else ml2 = ml; + + if (ml2 == ml) // No better match + { + ENCODE_SEQUENCE(&ip, &op, &anchor, ml, ref, oend); + continue; + } + + if (start0 < ip) + { + if (start2 < ip + ml0) // empirical + { + ip = start0; + ref = ref0; + ml = ml0; + } + } + + // Here, start0==ip + if ((start2 - ip) < 3) // First Match too small : removed + { + ml = ml2; + ip = start2; + ref =ref2; + goto _Search2; + } + +_Search3: + // Currently we have : + // ml2 > ml1, and + // ip1+3 <= ip2 (usually < ip1+ml1) + if ((start2 - ip) < OPTIMAL_ML) + { + int correction; + int new_ml = ml; + if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; + if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH; + correction = new_ml - (int)(start2 - ip); + if (correction > 0) + { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } + // Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) + + if (start2 + ml2 < mflimit) + ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3); + else ml3 = ml2; + + if (ml3 == ml2) // No better match : 2 sequences to encode + { + // ip & ref are known; Now for ml + if (start2 < ip+ml) ml = (int)(start2 - ip); + // Now, encode 2 sequences + ENCODE_SEQUENCE(&ip, &op, &anchor, ml, ref, oend); + ip = start2; + ENCODE_SEQUENCE(&ip, &op, &anchor, ml2, ref2, oend); + continue; + } + + if (start3 < ip+ml+3) // Not enough space for match 2 : remove it + { + if (start3 >= (ip+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 + { + if (start2 < ip+ml) + { + int correction = (int)(ip+ml - start2); + start2 += correction; + ref2 += correction; + ml2 -= correction; + if (ml2 < MINMATCH) + { + start2 = start3; + ref2 = ref3; + ml2 = ml3; + } + } + + ENCODE_SEQUENCE(&ip, &op, &anchor, ml, ref, oend); + ip = start3; + ref = ref3; + ml = ml3; + + start0 = start2; + ref0 = ref2; + ml0 = ml2; + goto _Search2; + } + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + goto _Search3; + } + + // OK, now we have 3 ascending matches; let's write at least the first one + // ip & ref are known; Now for ml + if (start2 < ip+ml) + { + if ((start2 - ip) < (int)ML_MASK) + { + int correction; + if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; + if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; + correction = ml - (int)(start2 - ip); + if (correction > 0) + { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } + else + { + ml = (int)(start2 - ip); + } + } + ENCODE_SEQUENCE(&ip, &op, &anchor, ml, ref, oend); + + ip = start2; + ref = ref2; + ml = ml2; + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + + goto _Search3; + + } + + // Encode Last Literals + { + int lastRun = (int)(iend - anchor); +#ifdef LIMITED_OUTPUT + if (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) return 0; // Check output limit +#endif + if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } + else *op++ = (BYTE)(lastRun<