HADOOP-8686. Fix warnings in native code. Contributed by Colin Patrick McCabe
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1375301 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d5f080825c
commit
357e990c40
@ -302,6 +302,8 @@ Branch-2 ( Unreleased changes )
|
|||||||
HADOOP-8700. Use enum to define the checksum constants in DataChecksum.
|
HADOOP-8700. Use enum to define the checksum constants in DataChecksum.
|
||||||
(szetszwo)
|
(szetszwo)
|
||||||
|
|
||||||
|
HADOOP-8686. Fix warnings in native code. (Colin Patrick McCabe via eli)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
|
HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
// Simple Functions
|
// Simple Functions
|
||||||
//****************************
|
//****************************
|
||||||
|
|
||||||
extern int LZ4_compress (char* source, char* dest, int isize);
|
extern int LZ4_compress (const char* source, char* dest, int isize);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
LZ4_compress() :
|
LZ4_compress() :
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#include "org_apache_hadoop.h"
|
#include "org_apache_hadoop.h"
|
||||||
#include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
|
#include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
|
||||||
|
|
||||||
int LZ4_uncompress_unknownOutputSize (char* source, char* dest, int isize, int maxOutputSize);
|
int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize, int maxOutputSize);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
LZ4_uncompress_unknownOutputSize() :
|
LZ4_uncompress_unknownOutputSize() :
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
#include "org_apache_hadoop_io_compress_snappy.h"
|
#include "org_apache_hadoop_io_compress_snappy.h"
|
||||||
#include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
|
#include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
|
||||||
|
|
||||||
|
#define JINT_MAX 0x7fffffff
|
||||||
|
|
||||||
static jfieldID SnappyCompressor_clazz;
|
static jfieldID SnappyCompressor_clazz;
|
||||||
static jfieldID SnappyCompressor_uncompressedDirectBuf;
|
static jfieldID SnappyCompressor_uncompressedDirectBuf;
|
||||||
static jfieldID SnappyCompressor_uncompressedDirectBufLen;
|
static jfieldID SnappyCompressor_uncompressedDirectBufLen;
|
||||||
@ -39,7 +41,7 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
|
|||||||
// Load libsnappy.so
|
// Load libsnappy.so
|
||||||
void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
|
void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
|
||||||
if (!libsnappy) {
|
if (!libsnappy) {
|
||||||
char* msg = (char*)malloc(1000);
|
char msg[1000];
|
||||||
snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror());
|
snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror());
|
||||||
THROW(env, "java/lang/UnsatisfiedLinkError", msg);
|
THROW(env, "java/lang/UnsatisfiedLinkError", msg);
|
||||||
return;
|
return;
|
||||||
@ -71,6 +73,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
|
|||||||
jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
|
jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
|
||||||
jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
|
jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
|
||||||
jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize);
|
jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize);
|
||||||
|
size_t buf_len;
|
||||||
|
|
||||||
// Get the input direct buffer
|
// Get the input direct buffer
|
||||||
LOCK_CLASS(env, clazz, "SnappyCompressor");
|
LOCK_CLASS(env, clazz, "SnappyCompressor");
|
||||||
@ -78,7 +81,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
|
|||||||
UNLOCK_CLASS(env, clazz, "SnappyCompressor");
|
UNLOCK_CLASS(env, clazz, "SnappyCompressor");
|
||||||
|
|
||||||
if (uncompressed_bytes == 0) {
|
if (uncompressed_bytes == 0) {
|
||||||
return (jint)0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the output direct buffer
|
// Get the output direct buffer
|
||||||
@ -87,15 +90,22 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
|
|||||||
UNLOCK_CLASS(env, clazz, "SnappyCompressor");
|
UNLOCK_CLASS(env, clazz, "SnappyCompressor");
|
||||||
|
|
||||||
if (compressed_bytes == 0) {
|
if (compressed_bytes == 0) {
|
||||||
return (jint)0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
snappy_status ret = dlsym_snappy_compress(uncompressed_bytes, uncompressed_direct_buf_len, compressed_bytes, &compressed_direct_buf_len);
|
/* size_t should always be 4 bytes or larger. */
|
||||||
|
buf_len = (size_t)compressed_direct_buf_len;
|
||||||
|
snappy_status ret = dlsym_snappy_compress(uncompressed_bytes,
|
||||||
|
uncompressed_direct_buf_len, compressed_bytes, &buf_len);
|
||||||
if (ret != SNAPPY_OK){
|
if (ret != SNAPPY_OK){
|
||||||
THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
|
THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (buf_len > JINT_MAX) {
|
||||||
|
THROW(env, "Ljava/lang/InternalError", "Invalid return buffer length.");
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
(*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
|
(*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
|
||||||
|
return (jint)buf_len;
|
||||||
return (jint)compressed_direct_buf_len;
|
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,8 @@
|
|||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define _GNU_SOURCE
|
||||||
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
@ -366,23 +368,15 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_chmod(
|
|||||||
*/
|
*/
|
||||||
static void throw_ioe(JNIEnv* env, int errnum)
|
static void throw_ioe(JNIEnv* env, int errnum)
|
||||||
{
|
{
|
||||||
const char* message;
|
char message[80];
|
||||||
char buffer[80];
|
|
||||||
jstring jstr_message;
|
jstring jstr_message;
|
||||||
|
|
||||||
buffer[0] = 0;
|
if ((errnum >= 0) && (errnum < sys_nerr)) {
|
||||||
#ifdef STRERROR_R_CHAR_P
|
snprintf(message, sizeof(message), "%s", sys_errlist[errnum]);
|
||||||
// GNU strerror_r
|
|
||||||
message = strerror_r(errnum, buffer, sizeof(buffer));
|
|
||||||
assert (message != NULL);
|
|
||||||
#else
|
|
||||||
int ret = strerror_r(errnum, buffer, sizeof(buffer));
|
|
||||||
if (ret == 0) {
|
|
||||||
message = buffer;
|
|
||||||
} else {
|
} else {
|
||||||
message = "Unknown error";
|
snprintf(message, sizeof(message), "Unknown error %d", errnum);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
jobject errno_obj = errno_to_enum(env, errnum);
|
jobject errno_obj = errno_to_enum(env, errnum);
|
||||||
|
|
||||||
if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL)
|
if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL)
|
||||||
|
@ -40,8 +40,8 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupForUser
|
|||||||
(JNIEnv *env, jobject jobj, jstring juser) {
|
(JNIEnv *env, jobject jobj, jstring juser) {
|
||||||
extern int getGroupIDList(const char *user, int *ngroups, gid_t **groups);
|
extern int getGroupIDList(const char *user, int *ngroups, gid_t **groups);
|
||||||
extern int getGroupDetails(gid_t group, char **grpBuf);
|
extern int getGroupDetails(gid_t group, char **grpBuf);
|
||||||
|
const char *cuser = NULL;
|
||||||
jobjectArray jgroups;
|
jobjectArray jgroups = NULL;
|
||||||
int error = -1;
|
int error = -1;
|
||||||
|
|
||||||
if (emptyGroups == NULL) {
|
if (emptyGroups == NULL) {
|
||||||
@ -56,7 +56,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupForUser
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
char *grpBuf = NULL;
|
char *grpBuf = NULL;
|
||||||
const char *cuser = (*env)->GetStringUTFChars(env, juser, NULL);
|
cuser = (*env)->GetStringUTFChars(env, juser, NULL);
|
||||||
if (cuser == NULL) {
|
if (cuser == NULL) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,8 @@ typedef struct listElement UserList;
|
|||||||
JNIEXPORT jobjectArray JNICALL
|
JNIEXPORT jobjectArray JNICALL
|
||||||
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI
|
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI
|
||||||
(JNIEnv *env, jobject jobj, jstring jgroup) {
|
(JNIEnv *env, jobject jobj, jstring jgroup) {
|
||||||
|
UserList *userListHead = NULL;
|
||||||
|
int userListSize = 0;
|
||||||
|
|
||||||
// pointers to free at the end
|
// pointers to free at the end
|
||||||
const char *cgroup = NULL;
|
const char *cgroup = NULL;
|
||||||
@ -65,9 +67,6 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
|
|||||||
// get users
|
// get users
|
||||||
// see man pages for setnetgrent, getnetgrent and endnetgrent
|
// see man pages for setnetgrent, getnetgrent and endnetgrent
|
||||||
|
|
||||||
UserList *userListHead = NULL;
|
|
||||||
int userListSize = 0;
|
|
||||||
|
|
||||||
// set the name of the group for subsequent calls to getnetgrent
|
// set the name of the group for subsequent calls to getnetgrent
|
||||||
// note that we want to end group lokup regardless whether setnetgrent
|
// note that we want to end group lokup regardless whether setnetgrent
|
||||||
// was successful or not (as long as it was called we need to call
|
// was successful or not (as long as it was called we need to call
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
#include <arpa/inet.h>
|
#include <arpa/inet.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <inttypes.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
@ -50,7 +51,7 @@ static void throw_checksum_exception(JNIEnv *env,
|
|||||||
|
|
||||||
// Format error message
|
// Format error message
|
||||||
snprintf(message, sizeof(message),
|
snprintf(message, sizeof(message),
|
||||||
"Checksum error: %s at %ld exp: %d got: %d",
|
"Checksum error: %s at %"PRId64" exp: %"PRId32" got: %"PRId32,
|
||||||
filename, pos, expected_crc, got_crc);
|
filename, pos, expected_crc, got_crc);
|
||||||
if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL) {
|
if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
@ -41,7 +41,7 @@ static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length);
|
|||||||
|
|
||||||
#ifdef USE_PIPELINED
|
#ifdef USE_PIPELINED
|
||||||
static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks);
|
static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks);
|
||||||
#endif USE_PIPELINED
|
#endif
|
||||||
static int cached_cpu_supports_crc32; // initialized by constructor below
|
static int cached_cpu_supports_crc32; // initialized by constructor below
|
||||||
static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length);
|
static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user