HDFS-14304: High lock contention on hdfsHashMutex in libhdfs
This closes #595 Signed-off-by: Todd Lipcon <todd@apache.org>
This commit is contained in:
parent
f426b7ce8f
commit
18c57cf046
@ -29,8 +29,8 @@ include_directories(
|
|||||||
|
|
||||||
add_library(native_mini_dfs
|
add_library(native_mini_dfs
|
||||||
native_mini_dfs.c
|
native_mini_dfs.c
|
||||||
../libhdfs/common/htable.c
|
|
||||||
../libhdfs/exception.c
|
../libhdfs/exception.c
|
||||||
|
../libhdfs/jclasses.c
|
||||||
../libhdfs/jni_helper.c
|
../libhdfs/jni_helper.c
|
||||||
${OS_DIR}/mutexes.c
|
${OS_DIR}/mutexes.c
|
||||||
${OS_DIR}/thread_local_storage.c
|
${OS_DIR}/thread_local_storage.c
|
||||||
@ -39,6 +39,3 @@ add_library(native_mini_dfs
|
|||||||
add_executable(test_native_mini_dfs test_native_mini_dfs.c)
|
add_executable(test_native_mini_dfs test_native_mini_dfs.c)
|
||||||
target_link_libraries(test_native_mini_dfs native_mini_dfs ${JAVA_JVM_LIBRARY})
|
target_link_libraries(test_native_mini_dfs native_mini_dfs ${JAVA_JVM_LIBRARY})
|
||||||
add_test(test_test_native_mini_dfs test_native_mini_dfs)
|
add_test(test_test_native_mini_dfs test_native_mini_dfs)
|
||||||
|
|
||||||
add_executable(test_htable ../libhdfs/common/htable.c test_htable.c)
|
|
||||||
target_link_libraries(test_htable ${OS_LINK_LIBRARIES})
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "exception.h"
|
#include "exception.h"
|
||||||
|
#include "jclasses.h"
|
||||||
#include "jni_helper.h"
|
#include "jni_helper.h"
|
||||||
#include "native_mini_dfs.h"
|
#include "native_mini_dfs.h"
|
||||||
#include "platform.h"
|
#include "platform.h"
|
||||||
@ -36,9 +37,7 @@
|
|||||||
|
|
||||||
#define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
|
#define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
|
||||||
#define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
|
#define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
|
||||||
#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
|
|
||||||
#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
|
#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
|
||||||
#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
|
|
||||||
|
|
||||||
struct NativeMiniDfsCluster {
|
struct NativeMiniDfsCluster {
|
||||||
/**
|
/**
|
||||||
@ -60,8 +59,7 @@ static int hdfsDisableDomainSocketSecurity(void)
|
|||||||
errno = EINTERNAL;
|
errno = EINTERNAL;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, STATIC, NULL,
|
jthr = invokeMethod(env, NULL, STATIC, NULL, JC_DOMAIN_SOCKET,
|
||||||
"org/apache/hadoop/net/unix/DomainSocket",
|
|
||||||
"disableBindPathValidation", "()V");
|
"disableBindPathValidation", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -126,11 +124,6 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
|
|||||||
"nmdCreate: new Configuration");
|
"nmdCreate: new Configuration");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (jthr) {
|
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
|
||||||
"nmdCreate: Configuration::setBoolean");
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
// Disable 'minimum block size' -- it's annoying in tests.
|
// Disable 'minimum block size' -- it's annoying in tests.
|
||||||
(*env)->DeleteLocalRef(env, jconfStr);
|
(*env)->DeleteLocalRef(env, jconfStr);
|
||||||
jconfStr = NULL;
|
jconfStr = NULL;
|
||||||
@ -140,8 +133,9 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
|
|||||||
"nmdCreate: new String");
|
"nmdCreate: new String");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
|
jthr = invokeMethod(env, NULL, INSTANCE, cobj,
|
||||||
"setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL);
|
JC_CONFIGURATION, "setLong", "(Ljava/lang/String;J)V", jconfStr,
|
||||||
|
0LL);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"nmdCreate: Configuration::setLong");
|
"nmdCreate: Configuration::setLong");
|
||||||
@ -163,7 +157,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
||||||
"format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
|
"format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
|
||||||
@ -172,7 +166,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
|
|||||||
}
|
}
|
||||||
(*env)->DeleteLocalRef(env, val.l);
|
(*env)->DeleteLocalRef(env, val.l);
|
||||||
if (conf->webhdfsEnabled) {
|
if (conf->webhdfsEnabled) {
|
||||||
jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
||||||
"nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
|
"nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
|
||||||
conf->namenodeHttpPort);
|
conf->namenodeHttpPort);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
@ -183,7 +177,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
|
|||||||
(*env)->DeleteLocalRef(env, val.l);
|
(*env)->DeleteLocalRef(env, val.l);
|
||||||
}
|
}
|
||||||
if (conf->numDataNodes) {
|
if (conf->numDataNodes) {
|
||||||
jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
||||||
"numDataNodes", "(I)L" MINIDFS_CLUSTER_BUILDER ";", conf->numDataNodes);
|
"numDataNodes", "(I)L" MINIDFS_CLUSTER_BUILDER ";", conf->numDataNodes);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
|
||||||
@ -192,7 +186,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
(*env)->DeleteLocalRef(env, val.l);
|
(*env)->DeleteLocalRef(env, val.l);
|
||||||
jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
|
||||||
"build", "()L" MINIDFS_CLUSTER ";");
|
"build", "()L" MINIDFS_CLUSTER ";");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -242,7 +236,7 @@ int nmdShutdown(struct NativeMiniDfsCluster* cl)
|
|||||||
fprintf(stderr, "nmdShutdown: getJNIEnv failed\n");
|
fprintf(stderr, "nmdShutdown: getJNIEnv failed\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
|
jthr = findClassAndInvokeMethod(env, NULL, INSTANCE, cl->obj,
|
||||||
MINIDFS_CLUSTER, "shutdown", "()V");
|
MINIDFS_CLUSTER, "shutdown", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -260,7 +254,7 @@ int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl)
|
|||||||
fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n");
|
fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
|
jthr = findClassAndInvokeMethod(env, NULL, INSTANCE, cl->obj,
|
||||||
MINIDFS_CLUSTER, "waitClusterUp", "()V");
|
MINIDFS_CLUSTER, "waitClusterUp", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -282,7 +276,7 @@ int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl)
|
|||||||
}
|
}
|
||||||
// Note: this will have to be updated when HA nativeMiniDfs clusters are
|
// Note: this will have to be updated when HA nativeMiniDfs clusters are
|
||||||
// supported
|
// supported
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj,
|
jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, cl->obj,
|
||||||
MINIDFS_CLUSTER, "getNameNodePort", "()I");
|
MINIDFS_CLUSTER, "getNameNodePort", "()I");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -307,7 +301,7 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
// First get the (first) NameNode of the cluster
|
// First get the (first) NameNode of the cluster
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
|
jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
|
||||||
"getNameNode", "()L" HADOOP_NAMENODE ";");
|
"getNameNode", "()L" HADOOP_NAMENODE ";");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -318,8 +312,8 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
|
|||||||
jNameNode = jVal.l;
|
jNameNode = jVal.l;
|
||||||
|
|
||||||
// Then get the http address (InetSocketAddress) of the NameNode
|
// Then get the http address (InetSocketAddress) of the NameNode
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
|
jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
|
||||||
"getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
|
"getHttpAddress", "()L" JAVA_NET_ISA ";");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"nmdGetNameNodeHttpAddress: "
|
"nmdGetNameNodeHttpAddress: "
|
||||||
@ -328,8 +322,8 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
|
|||||||
}
|
}
|
||||||
jAddress = jVal.l;
|
jAddress = jVal.l;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
|
jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jAddress,
|
||||||
JAVA_INETSOCKETADDRESS, "getPort", "()I");
|
JAVA_NET_ISA, "getPort", "()I");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"nmdGetNameNodeHttpAddress: "
|
"nmdGetNameNodeHttpAddress: "
|
||||||
@ -338,7 +332,7 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
|
|||||||
}
|
}
|
||||||
*port = jVal.i;
|
*port = jVal.i;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
|
jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_NET_ISA,
|
||||||
"getHostName", "()Ljava/lang/String;");
|
"getHostName", "()Ljava/lang/String;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
|
@ -1,100 +0,0 @@
|
|||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "common/htable.h"
|
|
||||||
#include "expect.h"
|
|
||||||
#include "hdfs_test.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <inttypes.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
// Disable type cast and loss of precision warnings, because the test
|
|
||||||
// manipulates void* values manually on purpose.
|
|
||||||
#ifdef WIN32
|
|
||||||
#pragma warning(disable: 4244 4306)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static uint32_t simple_hash(const void *key, uint32_t size)
|
|
||||||
{
|
|
||||||
uintptr_t k = (uintptr_t)key;
|
|
||||||
return ((13 + k) * 6367) % size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int simple_compare(const void *a, const void *b)
|
|
||||||
{
|
|
||||||
return a == b;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void expect_102(void *f, void *k, void *v)
|
|
||||||
{
|
|
||||||
int *found_102 = f;
|
|
||||||
uintptr_t key = (uintptr_t)k;
|
|
||||||
uintptr_t val = (uintptr_t)v;
|
|
||||||
|
|
||||||
if ((key == 2) && (val == 102)) {
|
|
||||||
*found_102 = 1;
|
|
||||||
} else {
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *htable_pop_val(struct htable *ht, void *key)
|
|
||||||
{
|
|
||||||
void *old_key, *old_val;
|
|
||||||
|
|
||||||
htable_pop(ht, key, &old_key, &old_val);
|
|
||||||
return old_val;
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(void)
|
|
||||||
{
|
|
||||||
struct htable *ht;
|
|
||||||
int found_102 = 0;
|
|
||||||
|
|
||||||
ht = htable_alloc(4, simple_hash, simple_compare);
|
|
||||||
EXPECT_INT_EQ(0, htable_used(ht));
|
|
||||||
EXPECT_INT_EQ(4, htable_capacity(ht));
|
|
||||||
EXPECT_NULL(htable_get(ht, (void*)123));
|
|
||||||
EXPECT_NULL(htable_pop_val(ht, (void*)123));
|
|
||||||
EXPECT_ZERO(htable_put(ht, (void*)123, (void*)456));
|
|
||||||
EXPECT_INT_EQ(456, (uintptr_t)htable_get(ht, (void*)123));
|
|
||||||
EXPECT_INT_EQ(456, (uintptr_t)htable_pop_val(ht, (void*)123));
|
|
||||||
EXPECT_NULL(htable_pop_val(ht, (void*)123));
|
|
||||||
|
|
||||||
// Enlarge the hash table
|
|
||||||
EXPECT_ZERO(htable_put(ht, (void*)1, (void*)101));
|
|
||||||
EXPECT_ZERO(htable_put(ht, (void*)2, (void*)102));
|
|
||||||
EXPECT_ZERO(htable_put(ht, (void*)3, (void*)103));
|
|
||||||
EXPECT_INT_EQ(3, htable_used(ht));
|
|
||||||
EXPECT_INT_EQ(8, htable_capacity(ht));
|
|
||||||
EXPECT_INT_EQ(102, (uintptr_t)htable_get(ht, (void*)2));
|
|
||||||
EXPECT_INT_EQ(101, (uintptr_t)htable_pop_val(ht, (void*)1));
|
|
||||||
EXPECT_INT_EQ(103, (uintptr_t)htable_pop_val(ht, (void*)3));
|
|
||||||
EXPECT_INT_EQ(1, htable_used(ht));
|
|
||||||
htable_visit(ht, expect_102, &found_102);
|
|
||||||
EXPECT_INT_EQ(1, found_102);
|
|
||||||
htable_free(ht);
|
|
||||||
|
|
||||||
fprintf(stderr, "SUCCESS.\n");
|
|
||||||
return EXIT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim: ts=4:sw=4:tw=79:et
|
|
@ -35,7 +35,7 @@ hadoop_add_dual_library(hdfs
|
|||||||
exception.c
|
exception.c
|
||||||
jni_helper.c
|
jni_helper.c
|
||||||
hdfs.c
|
hdfs.c
|
||||||
common/htable.c
|
jclasses.c
|
||||||
${OS_DIR}/mutexes.c
|
${OS_DIR}/mutexes.c
|
||||||
${OS_DIR}/thread_local_storage.c
|
${OS_DIR}/thread_local_storage.c
|
||||||
)
|
)
|
||||||
|
@ -1,287 +0,0 @@
|
|||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "common/htable.h"
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <inttypes.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
struct htable_pair {
|
|
||||||
void *key;
|
|
||||||
void *val;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A hash table which uses linear probing.
|
|
||||||
*/
|
|
||||||
struct htable {
|
|
||||||
uint32_t capacity;
|
|
||||||
uint32_t used;
|
|
||||||
htable_hash_fn_t hash_fun;
|
|
||||||
htable_eq_fn_t eq_fun;
|
|
||||||
struct htable_pair *elem;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* An internal function for inserting a value into the hash table.
|
|
||||||
*
|
|
||||||
* Note: this function assumes that you have made enough space in the table.
|
|
||||||
*
|
|
||||||
* @param nelem The new element to insert.
|
|
||||||
* @param capacity The capacity of the hash table.
|
|
||||||
* @param hash_fun The hash function to use.
|
|
||||||
* @param key The key to insert.
|
|
||||||
* @param val The value to insert.
|
|
||||||
*/
|
|
||||||
static void htable_insert_internal(struct htable_pair *nelem,
|
|
||||||
uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
|
|
||||||
void *val)
|
|
||||||
{
|
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
i = hash_fun(key, capacity);
|
|
||||||
while (1) {
|
|
||||||
if (!nelem[i].key) {
|
|
||||||
nelem[i].key = key;
|
|
||||||
nelem[i].val = val;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
i++;
|
|
||||||
if (i == capacity) {
|
|
||||||
i = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int htable_realloc(struct htable *htable, uint32_t new_capacity)
|
|
||||||
{
|
|
||||||
struct htable_pair *nelem;
|
|
||||||
uint32_t i, old_capacity = htable->capacity;
|
|
||||||
htable_hash_fn_t hash_fun = htable->hash_fun;
|
|
||||||
|
|
||||||
nelem = calloc(new_capacity, sizeof(struct htable_pair));
|
|
||||||
if (!nelem) {
|
|
||||||
return ENOMEM;
|
|
||||||
}
|
|
||||||
for (i = 0; i < old_capacity; i++) {
|
|
||||||
struct htable_pair *pair = htable->elem + i;
|
|
||||||
if (pair->key) {
|
|
||||||
htable_insert_internal(nelem, new_capacity, hash_fun,
|
|
||||||
pair->key, pair->val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
free(htable->elem);
|
|
||||||
htable->elem = nelem;
|
|
||||||
htable->capacity = new_capacity;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint32_t round_up_to_power_of_2(uint32_t i)
|
|
||||||
{
|
|
||||||
if (i == 0) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
i--;
|
|
||||||
i |= i >> 1;
|
|
||||||
i |= i >> 2;
|
|
||||||
i |= i >> 4;
|
|
||||||
i |= i >> 8;
|
|
||||||
i |= i >> 16;
|
|
||||||
i++;
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct htable *htable_alloc(uint32_t size,
|
|
||||||
htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
|
|
||||||
{
|
|
||||||
struct htable *htable;
|
|
||||||
|
|
||||||
htable = calloc(1, sizeof(*htable));
|
|
||||||
if (!htable) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
size = round_up_to_power_of_2(size);
|
|
||||||
if (size < HTABLE_MIN_SIZE) {
|
|
||||||
size = HTABLE_MIN_SIZE;
|
|
||||||
}
|
|
||||||
htable->hash_fun = hash_fun;
|
|
||||||
htable->eq_fun = eq_fun;
|
|
||||||
htable->used = 0;
|
|
||||||
if (htable_realloc(htable, size)) {
|
|
||||||
free(htable);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
return htable;
|
|
||||||
}
|
|
||||||
|
|
||||||
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
|
|
||||||
{
|
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
for (i = 0; i != htable->capacity; ++i) {
|
|
||||||
struct htable_pair *elem = htable->elem + i;
|
|
||||||
if (elem->key) {
|
|
||||||
fun(ctx, elem->key, elem->val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void htable_free(struct htable *htable)
|
|
||||||
{
|
|
||||||
if (htable) {
|
|
||||||
free(htable->elem);
|
|
||||||
free(htable);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int htable_put(struct htable *htable, void *key, void *val)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
uint32_t nused;
|
|
||||||
|
|
||||||
// NULL is not a valid key value.
|
|
||||||
// This helps us implement htable_get_internal efficiently, since we know
|
|
||||||
// that we can stop when we encounter the first NULL key.
|
|
||||||
if (!key) {
|
|
||||||
return EINVAL;
|
|
||||||
}
|
|
||||||
// NULL is not a valid value. Otherwise the results of htable_get would
|
|
||||||
// be confusing (does a NULL return mean entry not found, or that the
|
|
||||||
// entry was found and was NULL?)
|
|
||||||
if (!val) {
|
|
||||||
return EINVAL;
|
|
||||||
}
|
|
||||||
// Re-hash if we have used more than half of the hash table
|
|
||||||
nused = htable->used + 1;
|
|
||||||
if (nused >= (htable->capacity / 2)) {
|
|
||||||
ret = htable_realloc(htable, htable->capacity * 2);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
htable_insert_internal(htable->elem, htable->capacity,
|
|
||||||
htable->hash_fun, key, val);
|
|
||||||
htable->used++;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int htable_get_internal(const struct htable *htable,
|
|
||||||
const void *key, uint32_t *out)
|
|
||||||
{
|
|
||||||
uint32_t start_idx, idx;
|
|
||||||
|
|
||||||
start_idx = htable->hash_fun(key, htable->capacity);
|
|
||||||
idx = start_idx;
|
|
||||||
while (1) {
|
|
||||||
struct htable_pair *pair = htable->elem + idx;
|
|
||||||
if (!pair->key) {
|
|
||||||
// We always maintain the invariant that the entries corresponding
|
|
||||||
// to a given key are stored in a contiguous block, not separated
|
|
||||||
// by any NULLs. So if we encounter a NULL, our search is over.
|
|
||||||
return ENOENT;
|
|
||||||
} else if (htable->eq_fun(pair->key, key)) {
|
|
||||||
*out = idx;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
idx++;
|
|
||||||
if (idx == htable->capacity) {
|
|
||||||
idx = 0;
|
|
||||||
}
|
|
||||||
if (idx == start_idx) {
|
|
||||||
return ENOENT;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void *htable_get(const struct htable *htable, const void *key)
|
|
||||||
{
|
|
||||||
uint32_t idx;
|
|
||||||
|
|
||||||
if (htable_get_internal(htable, key, &idx)) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
return htable->elem[idx].val;
|
|
||||||
}
|
|
||||||
|
|
||||||
void htable_pop(struct htable *htable, const void *key,
|
|
||||||
void **found_key, void **found_val)
|
|
||||||
{
|
|
||||||
uint32_t hole, i;
|
|
||||||
const void *nkey;
|
|
||||||
|
|
||||||
if (htable_get_internal(htable, key, &hole)) {
|
|
||||||
*found_key = NULL;
|
|
||||||
*found_val = NULL;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
i = hole;
|
|
||||||
htable->used--;
|
|
||||||
// We need to maintain the compactness invariant used in
|
|
||||||
// htable_get_internal. This invariant specifies that the entries for any
|
|
||||||
// given key are never separated by NULLs (although they may be separated
|
|
||||||
// by entries for other keys.)
|
|
||||||
while (1) {
|
|
||||||
i++;
|
|
||||||
if (i == htable->capacity) {
|
|
||||||
i = 0;
|
|
||||||
}
|
|
||||||
nkey = htable->elem[i].key;
|
|
||||||
if (!nkey) {
|
|
||||||
*found_key = htable->elem[hole].key;
|
|
||||||
*found_val = htable->elem[hole].val;
|
|
||||||
htable->elem[hole].key = NULL;
|
|
||||||
htable->elem[hole].val = NULL;
|
|
||||||
return;
|
|
||||||
} else if (htable->eq_fun(key, nkey)) {
|
|
||||||
htable->elem[hole].key = htable->elem[i].key;
|
|
||||||
htable->elem[hole].val = htable->elem[i].val;
|
|
||||||
hole = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t htable_used(const struct htable *htable)
|
|
||||||
{
|
|
||||||
return htable->used;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t htable_capacity(const struct htable *htable)
|
|
||||||
{
|
|
||||||
return htable->capacity;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t ht_hash_string(const void *str, uint32_t max)
|
|
||||||
{
|
|
||||||
const char *s = str;
|
|
||||||
uint32_t hash = 0;
|
|
||||||
|
|
||||||
while (*s) {
|
|
||||||
hash = (hash * 31) + *s;
|
|
||||||
s++;
|
|
||||||
}
|
|
||||||
return hash % max;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ht_compare_string(const void *a, const void *b)
|
|
||||||
{
|
|
||||||
return strcmp(a, b) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim: ts=4:sw=4:tw=79:et
|
|
@ -1,161 +0,0 @@
|
|||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef HADOOP_CORE_COMMON_HASH_TABLE
|
|
||||||
#define HADOOP_CORE_COMMON_HASH_TABLE
|
|
||||||
|
|
||||||
#include <inttypes.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
#define HTABLE_MIN_SIZE 4
|
|
||||||
|
|
||||||
struct htable;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* An HTable hash function.
|
|
||||||
*
|
|
||||||
* @param key The key.
|
|
||||||
* @param capacity The total capacity.
|
|
||||||
*
|
|
||||||
* @return The hash slot. Must be less than the capacity.
|
|
||||||
*/
|
|
||||||
typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* An HTable equality function. Compares two keys.
|
|
||||||
*
|
|
||||||
* @param a First key.
|
|
||||||
* @param b Second key.
|
|
||||||
*
|
|
||||||
* @return nonzero if the keys are equal.
|
|
||||||
*/
|
|
||||||
typedef int (*htable_eq_fn_t)(const void *a, const void *b);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Allocate a new hash table.
|
|
||||||
*
|
|
||||||
* @param capacity The minimum suggested starting capacity.
|
|
||||||
* @param hash_fun The hash function to use in this hash table.
|
|
||||||
* @param eq_fun The equals function to use in this hash table.
|
|
||||||
*
|
|
||||||
* @return The new hash table on success; NULL on OOM.
|
|
||||||
*/
|
|
||||||
struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
|
|
||||||
htable_eq_fn_t eq_fun);
|
|
||||||
|
|
||||||
typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Visit all of the entries in the hash table.
|
|
||||||
*
|
|
||||||
* @param htable The hash table.
|
|
||||||
* @param fun The callback function to invoke on each key and value.
|
|
||||||
* @param ctx Context pointer to pass to the callback.
|
|
||||||
*/
|
|
||||||
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Free the hash table.
|
|
||||||
*
|
|
||||||
* It is up the calling code to ensure that the keys and values inside the
|
|
||||||
* table are de-allocated, if that is necessary.
|
|
||||||
*
|
|
||||||
* @param htable The hash table.
|
|
||||||
*/
|
|
||||||
void htable_free(struct htable *htable);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add an entry to the hash table.
|
|
||||||
*
|
|
||||||
* @param htable The hash table.
|
|
||||||
* @param key The key to add. This cannot be NULL.
|
|
||||||
* @param fun The value to add. This cannot be NULL.
|
|
||||||
*
|
|
||||||
* @return 0 on success;
|
|
||||||
* EEXIST if the value already exists in the table;
|
|
||||||
* ENOMEM if there is not enough memory to add the element.
|
|
||||||
* EFBIG if the hash table has too many entries to fit in 32
|
|
||||||
* bits.
|
|
||||||
*/
|
|
||||||
int htable_put(struct htable *htable, void *key, void *val);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get an entry from the hash table.
|
|
||||||
*
|
|
||||||
* @param htable The hash table.
|
|
||||||
* @param key The key to find.
|
|
||||||
*
|
|
||||||
* @return NULL if there is no such entry; the entry otherwise.
|
|
||||||
*/
|
|
||||||
void *htable_get(const struct htable *htable, const void *key);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get an entry from the hash table and remove it.
|
|
||||||
*
|
|
||||||
* @param htable The hash table.
|
|
||||||
* @param key The key for the entry find and remove.
|
|
||||||
* @param found_key (out param) NULL if the entry was not found; the found key
|
|
||||||
* otherwise.
|
|
||||||
* @param found_val (out param) NULL if the entry was not found; the found
|
|
||||||
* value otherwise.
|
|
||||||
*/
|
|
||||||
void htable_pop(struct htable *htable, const void *key,
|
|
||||||
void **found_key, void **found_val);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the number of entries used in the hash table.
|
|
||||||
*
|
|
||||||
* @param htable The hash table.
|
|
||||||
*
|
|
||||||
* @return The number of entries used in the hash table.
|
|
||||||
*/
|
|
||||||
uint32_t htable_used(const struct htable *htable);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the capacity of the hash table.
|
|
||||||
*
|
|
||||||
* @param htable The hash table.
|
|
||||||
*
|
|
||||||
* @return The capacity of the hash table.
|
|
||||||
*/
|
|
||||||
uint32_t htable_capacity(const struct htable *htable);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Hash a string.
|
|
||||||
*
|
|
||||||
* @param str The string.
|
|
||||||
* @param max Maximum hash value
|
|
||||||
*
|
|
||||||
* @return A number less than max.
|
|
||||||
*/
|
|
||||||
uint32_t ht_hash_string(const void *str, uint32_t max);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compare two strings.
|
|
||||||
*
|
|
||||||
* @param a The first string.
|
|
||||||
* @param b The second string.
|
|
||||||
*
|
|
||||||
* @return 1 if the strings are identical; 0 otherwise.
|
|
||||||
*/
|
|
||||||
int ht_compare_string(const void *a, const void *b);
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// vim: ts=4:sw=4:tw=79:et
|
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
#include "exception.h"
|
#include "exception.h"
|
||||||
#include "hdfs/hdfs.h"
|
#include "hdfs/hdfs.h"
|
||||||
|
#include "jclasses.h"
|
||||||
#include "jni_helper.h"
|
#include "jni_helper.h"
|
||||||
#include "platform.h"
|
#include "platform.h"
|
||||||
|
|
||||||
@ -129,8 +130,7 @@ static char* getExceptionUtilString(JNIEnv *env, jthrowable exc, char *methodNam
|
|||||||
jvalue jVal;
|
jvalue jVal;
|
||||||
jstring jStr = NULL;
|
jstring jStr = NULL;
|
||||||
char *excString = NULL;
|
char *excString = NULL;
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_EXCEPTION_UTILS,
|
||||||
"org/apache/commons/lang3/exception/ExceptionUtils",
|
|
||||||
methodName, "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
|
methodName, "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
destroyLocalReference(env, jthr);
|
destroyLocalReference(env, jthr);
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
#include "exception.h"
|
#include "exception.h"
|
||||||
#include "hdfs/hdfs.h"
|
#include "hdfs/hdfs.h"
|
||||||
|
#include "jclasses.h"
|
||||||
#include "jni_helper.h"
|
#include "jni_helper.h"
|
||||||
#include "platform.h"
|
#include "platform.h"
|
||||||
|
|
||||||
@ -26,23 +27,6 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
/* Some frequently used Java paths */
|
|
||||||
#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
|
|
||||||
#define HADOOP_PATH "org/apache/hadoop/fs/Path"
|
|
||||||
#define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
|
|
||||||
#define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
|
|
||||||
#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
|
|
||||||
#define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
|
|
||||||
#define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
|
|
||||||
#define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
|
|
||||||
#define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
|
|
||||||
#define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
|
|
||||||
#define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
|
|
||||||
#define JAVA_NET_ISA "java/net/InetSocketAddress"
|
|
||||||
#define JAVA_NET_URI "java/net/URI"
|
|
||||||
#define JAVA_STRING "java/lang/String"
|
|
||||||
#define READ_OPTION "org/apache/hadoop/fs/ReadOption"
|
|
||||||
|
|
||||||
#define JAVA_VOID "V"
|
#define JAVA_VOID "V"
|
||||||
|
|
||||||
/* Macros for constructing method signatures */
|
/* Macros for constructing method signatures */
|
||||||
@ -109,8 +93,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
|
|||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
|
||||||
HADOOP_DFS,
|
JC_DISTRIBUTED_FILE_SYSTEM, "getHedgedReadMetrics",
|
||||||
"getHedgedReadMetrics",
|
|
||||||
"()Lorg/apache/hadoop/hdfs/DFSHedgedReadMetrics;");
|
"()Lorg/apache/hadoop/hdfs/DFSHedgedReadMetrics;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -126,8 +109,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
|
|||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
|
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
|
||||||
"org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
|
JC_DFS_HEDGED_READ_METRICS, "getHedgedReadOps", "()J");
|
||||||
"getHedgedReadOps", "()J");
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsGetHedgedReadStatistics: getHedgedReadOps failed");
|
"hdfsGetHedgedReadStatistics: getHedgedReadOps failed");
|
||||||
@ -136,8 +118,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
|
|||||||
m->hedgedReadOps = jVal.j;
|
m->hedgedReadOps = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
|
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
|
||||||
"org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
|
JC_DFS_HEDGED_READ_METRICS, "getHedgedReadWins", "()J");
|
||||||
"getHedgedReadWins", "()J");
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsGetHedgedReadStatistics: getHedgedReadWins failed");
|
"hdfsGetHedgedReadStatistics: getHedgedReadWins failed");
|
||||||
@ -146,8 +127,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
|
|||||||
m->hedgedReadOpsWin = jVal.j;
|
m->hedgedReadOpsWin = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
|
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
|
||||||
"org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
|
JC_DFS_HEDGED_READ_METRICS, "getHedgedReadOpsInCurThread", "()J");
|
||||||
"getHedgedReadOpsInCurThread", "()J");
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsGetHedgedReadStatistics: getHedgedReadOpsInCurThread failed");
|
"hdfsGetHedgedReadStatistics: getHedgedReadOpsInCurThread failed");
|
||||||
@ -193,8 +173,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
||||||
"org/apache/hadoop/hdfs/client/HdfsDataInputStream",
|
JC_HDFS_DATA_INPUT_STREAM, "getReadStatistics",
|
||||||
"getReadStatistics",
|
|
||||||
"()Lorg/apache/hadoop/hdfs/ReadStatistics;");
|
"()Lorg/apache/hadoop/hdfs/ReadStatistics;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -208,8 +187,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
JC_READ_STATISTICS, "getTotalBytesRead", "()J");
|
||||||
"getTotalBytesRead", "()J");
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsFileGetReadStatistics: getTotalBytesRead failed");
|
"hdfsFileGetReadStatistics: getTotalBytesRead failed");
|
||||||
@ -218,8 +196,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||||||
s->totalBytesRead = jVal.j;
|
s->totalBytesRead = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
JC_READ_STATISTICS, "getTotalLocalBytesRead", "()J");
|
||||||
"getTotalLocalBytesRead", "()J");
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsFileGetReadStatistics: getTotalLocalBytesRead failed");
|
"hdfsFileGetReadStatistics: getTotalLocalBytesRead failed");
|
||||||
@ -228,8 +205,8 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||||||
s->totalLocalBytesRead = jVal.j;
|
s->totalLocalBytesRead = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
JC_READ_STATISTICS, "getTotalShortCircuitBytesRead",
|
||||||
"getTotalShortCircuitBytesRead", "()J");
|
"()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsFileGetReadStatistics: getTotalShortCircuitBytesRead failed");
|
"hdfsFileGetReadStatistics: getTotalShortCircuitBytesRead failed");
|
||||||
@ -237,8 +214,8 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||||||
}
|
}
|
||||||
s->totalShortCircuitBytesRead = jVal.j;
|
s->totalShortCircuitBytesRead = jVal.j;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
JC_READ_STATISTICS, "getTotalZeroCopyBytesRead",
|
||||||
"getTotalZeroCopyBytesRead", "()J");
|
"()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsFileGetReadStatistics: getTotalZeroCopyBytesRead failed");
|
"hdfsFileGetReadStatistics: getTotalZeroCopyBytesRead failed");
|
||||||
@ -280,8 +257,8 @@ int hdfsFileClearReadStatistics(hdfsFile file)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, file->file,
|
jthr = invokeMethod(env, NULL, INSTANCE, file->file,
|
||||||
"org/apache/hadoop/hdfs/client/HdfsDataInputStream",
|
JC_HDFS_DATA_INPUT_STREAM, "clearReadStatistics",
|
||||||
"clearReadStatistics", "()V");
|
"()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsFileClearReadStatistics: clearReadStatistics failed");
|
"hdfsFileClearReadStatistics: clearReadStatistics failed");
|
||||||
@ -324,8 +301,7 @@ int hdfsDisableDomainSocketSecurity(void)
|
|||||||
errno = EINTERNAL;
|
errno = EINTERNAL;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, STATIC, NULL,
|
jthr = invokeMethod(env, NULL, STATIC, NULL, JC_DOMAIN_SOCKET,
|
||||||
"org/apache/hadoop/net/unix/DomainSocket",
|
|
||||||
"disableBindPathValidation", "()V");
|
"disableBindPathValidation", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -363,7 +339,7 @@ static jthrowable constructNewObjectOfPath(JNIEnv *env, const char *path,
|
|||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
//Construct the org.apache.hadoop.fs.Path object
|
//Construct the org.apache.hadoop.fs.Path object
|
||||||
jthr = constructNewObjectOfClass(env, &jPath, "org/apache/hadoop/fs/Path",
|
jthr = constructNewObjectOfCachedClass(env, &jPath, JC_PATH,
|
||||||
"(Ljava/lang/String;)V", jPathString);
|
"(Ljava/lang/String;)V", jPathString);
|
||||||
destroyLocalReference(env, jPathString);
|
destroyLocalReference(env, jPathString);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
@ -383,7 +359,7 @@ static jthrowable hadoopConfGetStr(JNIEnv *env, jobject jConfiguration,
|
|||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
||||||
HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
|
JC_CONFIGURATION, "get", JMETHOD1(JPARAM(JAVA_STRING),
|
||||||
JPARAM(JAVA_STRING)), jkey);
|
JPARAM(JAVA_STRING)), jkey);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
@ -407,7 +383,8 @@ int hdfsConfGetStr(const char *key, char **val)
|
|||||||
ret = EINTERNAL;
|
ret = EINTERNAL;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
|
jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
|
||||||
|
JC_CONFIGURATION, "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsConfGetStr(%s): new Configuration", key);
|
"hdfsConfGetStr(%s): new Configuration", key);
|
||||||
@ -443,8 +420,8 @@ static jthrowable hadoopConfGetInt(JNIEnv *env, jobject jConfiguration,
|
|||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
||||||
HADOOP_CONF, "getInt", JMETHOD2(JPARAM(JAVA_STRING), "I", "I"),
|
JC_CONFIGURATION, "getInt",
|
||||||
jkey, (jint)(*val));
|
JMETHOD2(JPARAM(JAVA_STRING), "I", "I"), jkey, (jint)(*val));
|
||||||
destroyLocalReference(env, jkey);
|
destroyLocalReference(env, jkey);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
@ -464,7 +441,8 @@ int hdfsConfGetInt(const char *key, int32_t *val)
|
|||||||
ret = EINTERNAL;
|
ret = EINTERNAL;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
|
jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
|
||||||
|
JC_CONFIGURATION, "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsConfGetInt(%s): new Configuration", key);
|
"hdfsConfGetInt(%s): new Configuration", key);
|
||||||
@ -697,7 +675,8 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// jConfiguration = new Configuration();
|
// jConfiguration = new Configuration();
|
||||||
jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
|
jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
|
||||||
|
JC_CONFIGURATION, "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||||
@ -719,9 +698,10 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
|
|||||||
// Get a local filesystem.
|
// Get a local filesystem.
|
||||||
if (bld->forceNewInstance) {
|
if (bld->forceNewInstance) {
|
||||||
// fs = FileSytem#newInstanceLocal(conf);
|
// fs = FileSytem#newInstanceLocal(conf);
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
||||||
"newInstanceLocal", JMETHOD1(JPARAM(HADOOP_CONF),
|
JC_FILE_SYSTEM, "newInstanceLocal",
|
||||||
JPARAM(HADOOP_LOCALFS)), jConfiguration);
|
JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_LOCALFS)),
|
||||||
|
jConfiguration);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsBuilderConnect(%s)",
|
"hdfsBuilderConnect(%s)",
|
||||||
@ -731,9 +711,9 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
|
|||||||
jFS = jVal.l;
|
jFS = jVal.l;
|
||||||
} else {
|
} else {
|
||||||
// fs = FileSytem#getLocal(conf);
|
// fs = FileSytem#getLocal(conf);
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "getLocal",
|
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
||||||
JMETHOD1(JPARAM(HADOOP_CONF),
|
JC_FILE_SYSTEM, "getLocal",
|
||||||
JPARAM(HADOOP_LOCALFS)),
|
JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_LOCALFS)),
|
||||||
jConfiguration);
|
jConfiguration);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -746,8 +726,8 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
|
|||||||
} else {
|
} else {
|
||||||
if (!strcmp(bld->nn, "default")) {
|
if (!strcmp(bld->nn, "default")) {
|
||||||
// jURI = FileSystem.getDefaultUri(conf)
|
// jURI = FileSystem.getDefaultUri(conf)
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
||||||
"getDefaultUri",
|
JC_FILE_SYSTEM, "getDefaultUri",
|
||||||
"(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
|
"(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
|
||||||
jConfiguration);
|
jConfiguration);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
@ -769,9 +749,9 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
|
|||||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, JAVA_NET_URI,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
||||||
"create", "(Ljava/lang/String;)Ljava/net/URI;",
|
JC_URI, "create",
|
||||||
jURIString);
|
"(Ljava/lang/String;)Ljava/net/URI;", jURIString);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsBuilderConnect(%s)",
|
"hdfsBuilderConnect(%s)",
|
||||||
@ -799,11 +779,11 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (bld->forceNewInstance) {
|
if (bld->forceNewInstance) {
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
||||||
"newInstance", JMETHOD3(JPARAM(JAVA_NET_URI),
|
JC_FILE_SYSTEM, "newInstance",
|
||||||
JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
|
JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
|
||||||
JPARAM(HADOOP_FS)),
|
JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI,
|
||||||
jURI, jConfiguration, jUserString);
|
jConfiguration, jUserString);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsBuilderConnect(%s)",
|
"hdfsBuilderConnect(%s)",
|
||||||
@ -812,10 +792,11 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
|
|||||||
}
|
}
|
||||||
jFS = jVal.l;
|
jFS = jVal.l;
|
||||||
} else {
|
} else {
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "get",
|
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
||||||
|
JC_FILE_SYSTEM, "get",
|
||||||
JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
|
JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
|
||||||
JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)),
|
JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI,
|
||||||
jURI, jConfiguration, jUserString);
|
jConfiguration, jUserString);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsBuilderConnect(%s)",
|
"hdfsBuilderConnect(%s)",
|
||||||
@ -877,7 +858,7 @@ int hdfsDisconnect(hdfsFS fs)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"close", "()V");
|
"close", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -909,8 +890,9 @@ static jthrowable getDefaultBlockSize(JNIEnv *env, jobject jFS,
|
|||||||
jthrowable jthr;
|
jthrowable jthr;
|
||||||
jvalue jVal;
|
jvalue jVal;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"getDefaultBlockSize", JMETHOD1(JPARAM(HADOOP_PATH), "J"), jPath);
|
"getDefaultBlockSize", JMETHOD1(JPARAM(HADOOP_PATH),
|
||||||
|
"J"), jPath);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
*out = jVal.j;
|
*out = jVal.j;
|
||||||
@ -1053,13 +1035,13 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
|
|||||||
|
|
||||||
if (accmode == O_RDONLY) {
|
if (accmode == O_RDONLY) {
|
||||||
method = "open";
|
method = "open";
|
||||||
signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM));
|
signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_FSDISTRM));
|
||||||
} else if (flags & O_APPEND) {
|
} else if (flags & O_APPEND) {
|
||||||
method = "append";
|
method = "append";
|
||||||
signature = JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM));
|
signature = JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSDOSTRM));
|
||||||
} else {
|
} else {
|
||||||
method = "create";
|
method = "create";
|
||||||
signature = JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
|
signature = JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_FSDOSTRM));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create an object of org.apache.hadoop.fs.Path */
|
/* Create an object of org.apache.hadoop.fs.Path */
|
||||||
@ -1071,7 +1053,7 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Get the Configuration object from the FileSystem object */
|
/* Get the Configuration object from the FileSystem object */
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"getConf", JMETHOD1("", JPARAM(HADOOP_CONF)));
|
"getConf", JMETHOD1("", JPARAM(HADOOP_CONF)));
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -1093,8 +1075,8 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
|
|||||||
|
|
||||||
if (!bufferSize) {
|
if (!bufferSize) {
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
||||||
HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
|
JC_CONFIGURATION, "getInt",
|
||||||
jStrBufferSize, 4096);
|
"(Ljava/lang/String;I)I", jStrBufferSize, 4096);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND |
|
ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND |
|
||||||
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_UNRESOLVED_LINK,
|
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_UNRESOLVED_LINK,
|
||||||
@ -1108,8 +1090,8 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
|
|||||||
if ((accmode == O_WRONLY) && (flags & O_APPEND) == 0) {
|
if ((accmode == O_WRONLY) && (flags & O_APPEND) == 0) {
|
||||||
if (!replication) {
|
if (!replication) {
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
||||||
HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
|
JC_CONFIGURATION, "getInt",
|
||||||
jStrReplication, 1);
|
"(Ljava/lang/String;I)I", jStrReplication, 1);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsOpenFile(%s): Configuration#getInt(dfs.replication)",
|
"hdfsOpenFile(%s): Configuration#getInt(dfs.replication)",
|
||||||
@ -1125,11 +1107,11 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
|
|||||||
|
|
||||||
// READ?
|
// READ?
|
||||||
if (accmode == O_RDONLY) {
|
if (accmode == O_RDONLY) {
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
method, signature, jPath, jBufferSize);
|
method, signature, jPath, jBufferSize);
|
||||||
} else if ((accmode == O_WRONLY) && (flags & O_APPEND)) {
|
} else if ((accmode == O_WRONLY) && (flags & O_APPEND)) {
|
||||||
// WRITE/APPEND?
|
// WRITE/APPEND?
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
method, signature, jPath);
|
method, signature, jPath);
|
||||||
} else {
|
} else {
|
||||||
// WRITE/CREATE
|
// WRITE/CREATE
|
||||||
@ -1143,9 +1125,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
method, signature, jPath, jOverWrite,
|
method, signature, jPath, jOverWrite, jBufferSize,
|
||||||
jBufferSize, jReplication, jBlockSize);
|
jReplication, jBlockSize);
|
||||||
}
|
}
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -1178,8 +1160,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
|
|||||||
"hdfsOpenFile(%s): newJavaStr", path);
|
"hdfsOpenFile(%s): newJavaStr", path);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFile, HADOOP_ISTRM,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFile,
|
||||||
"hasCapability", "(Ljava/lang/String;)Z", jCapabilityString);
|
JC_FS_DATA_INPUT_STREAM, "hasCapability",
|
||||||
|
"(Ljava/lang/String;)Z", jCapabilityString);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsOpenFile(%s): FSDataInputStream#hasCapability", path);
|
"hdfsOpenFile(%s): FSDataInputStream#hasCapability", path);
|
||||||
@ -1243,7 +1226,7 @@ int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"truncate", JMETHOD2(JPARAM(HADOOP_PATH), "J", "Z"),
|
"truncate", JMETHOD2(JPARAM(HADOOP_PATH), "J", "Z"),
|
||||||
jPath, newlength);
|
jPath, newlength);
|
||||||
destroyLocalReference(env, jPath);
|
destroyLocalReference(env, jPath);
|
||||||
@ -1272,11 +1255,11 @@ int hdfsUnbufferFile(hdfsFile file)
|
|||||||
ret = ENOTSUP;
|
ret = ENOTSUP;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, file->file, HADOOP_ISTRM,
|
jthr = invokeMethod(env, NULL, INSTANCE, file->file,
|
||||||
"unbuffer", "()V");
|
JC_FS_DATA_INPUT_STREAM, "unbuffer", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
HADOOP_ISTRM "#unbuffer failed:");
|
HADOOP_FSDISTRM "#unbuffer failed:");
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -1293,7 +1276,7 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
|
|||||||
// file.close
|
// file.close
|
||||||
|
|
||||||
//The interface whose 'close' method to be called
|
//The interface whose 'close' method to be called
|
||||||
const char *interface;
|
CachedJavaClass cachedJavaClass;
|
||||||
const char *interfaceShortName;
|
const char *interfaceShortName;
|
||||||
|
|
||||||
//Caught exception
|
//Caught exception
|
||||||
@ -1312,11 +1295,14 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface = (file->type == HDFS_STREAM_INPUT) ?
|
if (file->type == HDFS_STREAM_INPUT) {
|
||||||
HADOOP_ISTRM : HADOOP_OSTRM;
|
cachedJavaClass = JC_FS_DATA_INPUT_STREAM;
|
||||||
|
} else {
|
||||||
|
cachedJavaClass = JC_FS_DATA_OUTPUT_STREAM;
|
||||||
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, file->file, interface,
|
jthr = invokeMethod(env, NULL, INSTANCE, file->file,
|
||||||
"close", "()V");
|
cachedJavaClass, "close", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
interfaceShortName = (file->type == HDFS_STREAM_INPUT) ?
|
interfaceShortName = (file->type == HDFS_STREAM_INPUT) ?
|
||||||
"FSDataInputStream" : "FSDataOutputStream";
|
"FSDataInputStream" : "FSDataOutputStream";
|
||||||
@ -1360,7 +1346,7 @@ int hdfsExists(hdfsFS fs, const char *path)
|
|||||||
"hdfsExists: constructNewObjectOfPath");
|
"hdfsExists: constructNewObjectOfPath");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
|
"exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
|
||||||
destroyLocalReference(env, jPath);
|
destroyLocalReference(env, jPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
@ -1441,8 +1427,8 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream, HADOOP_ISTRM,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
|
||||||
"read", "([B)I", jbRarray);
|
JC_FS_DATA_INPUT_STREAM, "read", "([B)I", jbRarray);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
destroyLocalReference(env, jbRarray);
|
destroyLocalReference(env, jbRarray);
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -1505,7 +1491,8 @@ tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
|
|||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
|
||||||
HADOOP_ISTRM, "read", "(Ljava/nio/ByteBuffer;)I", bb);
|
JC_FS_DATA_INPUT_STREAM, "read",
|
||||||
|
"(Ljava/nio/ByteBuffer;)I", bb);
|
||||||
destroyLocalReference(env, bb);
|
destroyLocalReference(env, bb);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -1556,8 +1543,9 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
|
|||||||
"hdfsPread: NewByteArray");
|
"hdfsPread: NewByteArray");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, f->file, HADOOP_ISTRM,
|
jthr = invokeMethod(env, &jVal, INSTANCE, f->file,
|
||||||
"read", "(J[BII)I", position, jbRarray, 0, length);
|
JC_FS_DATA_INPUT_STREAM, "read", "(J[BII)I", position,
|
||||||
|
jbRarray, 0, length);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
destroyLocalReference(env, jbRarray);
|
destroyLocalReference(env, jbRarray);
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -1642,7 +1630,8 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
|
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
|
||||||
HADOOP_OSTRM, "write", "([B)V", jbWarray);
|
JC_FS_DATA_OUTPUT_STREAM, "write", "([B)V",
|
||||||
|
jbWarray);
|
||||||
destroyLocalReference(env, jbWarray);
|
destroyLocalReference(env, jbWarray);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -1677,7 +1666,7 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
|
|||||||
|
|
||||||
jInputStream = f->file;
|
jInputStream = f->file;
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
|
jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
|
||||||
HADOOP_ISTRM, "seek", "(J)V", desiredPos);
|
JC_FS_DATA_INPUT_STREAM, "seek", "(J)V", desiredPos);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsSeek(desiredPos=%" PRId64 ")"
|
"hdfsSeek(desiredPos=%" PRId64 ")"
|
||||||
@ -1687,15 +1676,13 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
tOffset hdfsTell(hdfsFS fs, hdfsFile f)
|
tOffset hdfsTell(hdfsFS fs, hdfsFile f)
|
||||||
{
|
{
|
||||||
// JAVA EQUIVALENT
|
// JAVA EQUIVALENT
|
||||||
// pos = f.getPos();
|
// pos = f.getPos();
|
||||||
|
|
||||||
jobject jStream;
|
jobject jStream;
|
||||||
const char *interface;
|
CachedJavaClass cachedJavaClass;
|
||||||
jvalue jVal;
|
jvalue jVal;
|
||||||
jthrowable jthr;
|
jthrowable jthr;
|
||||||
|
|
||||||
@ -1714,10 +1701,13 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
|
|||||||
|
|
||||||
//Parameters
|
//Parameters
|
||||||
jStream = f->file;
|
jStream = f->file;
|
||||||
interface = (f->type == HDFS_STREAM_INPUT) ?
|
if (f->type == HDFS_STREAM_INPUT) {
|
||||||
HADOOP_ISTRM : HADOOP_OSTRM;
|
cachedJavaClass = JC_FS_DATA_INPUT_STREAM;
|
||||||
|
} else {
|
||||||
|
cachedJavaClass = JC_FS_DATA_OUTPUT_STREAM;
|
||||||
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
|
||||||
interface, "getPos", "()J");
|
cachedJavaClass, "getPos", "()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsTell: %s#getPos",
|
"hdfsTell: %s#getPos",
|
||||||
@ -1748,7 +1738,7 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, f->file,
|
jthr = invokeMethod(env, NULL, INSTANCE, f->file,
|
||||||
HADOOP_OSTRM, "flush", "()V");
|
JC_FS_DATA_OUTPUT_STREAM, "flush", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsFlush: FSDataInputStream#flush");
|
"hdfsFlush: FSDataInputStream#flush");
|
||||||
@ -1777,7 +1767,7 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
|
|||||||
|
|
||||||
jOutputStream = f->file;
|
jOutputStream = f->file;
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
|
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
|
||||||
HADOOP_OSTRM, "hflush", "()V");
|
JC_FS_DATA_OUTPUT_STREAM, "hflush", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsHFlush: FSDataOutputStream#hflush");
|
"hdfsHFlush: FSDataOutputStream#hflush");
|
||||||
@ -1806,7 +1796,7 @@ int hdfsHSync(hdfsFS fs, hdfsFile f)
|
|||||||
|
|
||||||
jOutputStream = f->file;
|
jOutputStream = f->file;
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
|
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
|
||||||
HADOOP_OSTRM, "hsync", "()V");
|
JC_FS_DATA_OUTPUT_STREAM, "hsync", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsHSync: FSDataOutputStream#hsync");
|
"hdfsHSync: FSDataOutputStream#hsync");
|
||||||
@ -1840,7 +1830,7 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
|
|||||||
//Parameters
|
//Parameters
|
||||||
jInputStream = f->file;
|
jInputStream = f->file;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
|
||||||
HADOOP_ISTRM, "available", "()I");
|
JC_FS_DATA_INPUT_STREAM, "available", "()I");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsAvailable: FSDataInputStream#available");
|
"hdfsAvailable: FSDataInputStream#available");
|
||||||
@ -1885,8 +1875,8 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Create the org.apache.hadoop.conf.Configuration object
|
//Create the org.apache.hadoop.conf.Configuration object
|
||||||
jthr = constructNewObjectOfClass(env, &jConfiguration,
|
jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
|
||||||
HADOOP_CONF, "()V");
|
JC_CONFIGURATION, "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsCopyImpl: Configuration constructor");
|
"hdfsCopyImpl: Configuration constructor");
|
||||||
@ -1894,8 +1884,8 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
|
|||||||
}
|
}
|
||||||
|
|
||||||
//FileUtil#copy
|
//FileUtil#copy
|
||||||
jthr = invokeMethod(env, &jVal, STATIC,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_FILE_UTIL,
|
||||||
NULL, "org/apache/hadoop/fs/FileUtil", "copy",
|
"copy",
|
||||||
"(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
|
"(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
|
||||||
"Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
|
"Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
|
||||||
"ZLorg/apache/hadoop/conf/Configuration;)Z",
|
"ZLorg/apache/hadoop/conf/Configuration;)Z",
|
||||||
@ -1961,9 +1951,9 @@ int hdfsDelete(hdfsFS fs, const char *path, int recursive)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
|
jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
|
"delete", "(Lorg/apache/hadoop/fs/Path;Z)Z", jPath,
|
||||||
jPath, jRecursive);
|
jRecursive);
|
||||||
destroyLocalReference(env, jPath);
|
destroyLocalReference(env, jPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -2015,9 +2005,9 @@ int hdfsRename(hdfsFS fs, const char *oldPath, const char *newPath)
|
|||||||
|
|
||||||
// Rename the file
|
// Rename the file
|
||||||
// TODO: use rename2 here? (See HDFS-3592)
|
// TODO: use rename2 here? (See HDFS-3592)
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, "rename",
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
|
"rename", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM
|
||||||
jOldPath, jNewPath);
|
(HADOOP_PATH), "Z"), jOldPath, jNewPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsRename(oldPath=%s, newPath=%s): FileSystem#rename",
|
"hdfsRename(oldPath=%s, newPath=%s): FileSystem#rename",
|
||||||
@ -2060,9 +2050,8 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//FileSystem#getWorkingDirectory()
|
//FileSystem#getWorkingDirectory()
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
HADOOP_FS, "getWorkingDirectory",
|
"getWorkingDirectory", "()Lorg/apache/hadoop/fs/Path;");
|
||||||
"()Lorg/apache/hadoop/fs/Path;");
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsGetWorkingDirectory: FileSystem#getWorkingDirectory");
|
"hdfsGetWorkingDirectory: FileSystem#getWorkingDirectory");
|
||||||
@ -2077,8 +2066,7 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Path#toString()
|
//Path#toString()
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jPath,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jPath, JC_PATH, "toString",
|
||||||
"org/apache/hadoop/fs/Path", "toString",
|
|
||||||
"()Ljava/lang/String;");
|
"()Ljava/lang/String;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -2143,9 +2131,9 @@ int hdfsSetWorkingDirectory(hdfsFS fs, const char *path)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//FileSystem#setWorkingDirectory()
|
//FileSystem#setWorkingDirectory()
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"setWorkingDirectory",
|
"setWorkingDirectory", "(Lorg/apache/hadoop/fs/Path;)V",
|
||||||
"(Lorg/apache/hadoop/fs/Path;)V", jPath);
|
jPath);
|
||||||
destroyLocalReference(env, jPath);
|
destroyLocalReference(env, jPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, NOPRINT_EXC_ILLEGAL_ARGUMENT,
|
errno = printExceptionAndFree(env, jthr, NOPRINT_EXC_ILLEGAL_ARGUMENT,
|
||||||
@ -2185,9 +2173,8 @@ int hdfsCreateDirectory(hdfsFS fs, const char *path)
|
|||||||
|
|
||||||
//Create the directory
|
//Create the directory
|
||||||
jVal.z = 0;
|
jVal.z = 0;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
|
"mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z", jPath);
|
||||||
jPath);
|
|
||||||
destroyLocalReference(env, jPath);
|
destroyLocalReference(env, jPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr,
|
errno = printExceptionAndFree(env, jthr,
|
||||||
@ -2235,7 +2222,7 @@ int hdfsSetReplication(hdfsFS fs, const char *path, int16_t replication)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Create the directory
|
//Create the directory
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
|
"setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
|
||||||
jPath, replication);
|
jPath, replication);
|
||||||
destroyLocalReference(env, jPath);
|
destroyLocalReference(env, jPath);
|
||||||
@ -2298,7 +2285,7 @@ int hdfsChown(hdfsFS fs, const char *path, const char *owner, const char *group)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Create the directory
|
//Create the directory
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"setOwner", JMETHOD3(JPARAM(HADOOP_PATH),
|
"setOwner", JMETHOD3(JPARAM(HADOOP_PATH),
|
||||||
JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
|
JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
|
||||||
jPath, jOwner, jGroup);
|
jPath, jOwner, jGroup);
|
||||||
@ -2343,12 +2330,12 @@ int hdfsChmod(hdfsFS fs, const char *path, short mode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// construct jPerm = FsPermission.createImmutable(short mode);
|
// construct jPerm = FsPermission.createImmutable(short mode);
|
||||||
jthr = constructNewObjectOfClass(env, &jPermObj,
|
jthr = constructNewObjectOfCachedClass(env, &jPermObj, JC_FS_PERMISSION,
|
||||||
HADOOP_FSPERM,"(S)V",jmode);
|
"(S)V",jmode);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"constructNewObjectOfClass(%s)", HADOOP_FSPERM);
|
"constructNewObjectOfCachedClass(%s)", HADOOP_FSPERM);
|
||||||
return -1;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Create an object of org.apache.hadoop.fs.Path
|
//Create an object of org.apache.hadoop.fs.Path
|
||||||
@ -2360,10 +2347,9 @@ int hdfsChmod(hdfsFS fs, const char *path, short mode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Create the directory
|
//Create the directory
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"setPermission",
|
"setPermission", JMETHOD2(JPARAM(HADOOP_PATH),
|
||||||
JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
|
JPARAM(HADOOP_FSPERM), JAVA_VOID), jPath, jPermObj);
|
||||||
jPath, jPermObj);
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr,
|
ret = printExceptionAndFree(env, jthr,
|
||||||
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
|
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
|
||||||
@ -2413,9 +2399,9 @@ int hdfsUtime(hdfsFS fs, const char *path, tTime mtime, tTime atime)
|
|||||||
jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
|
jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
|
||||||
jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
|
jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
|
||||||
|
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
|
"setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J",
|
||||||
jPath, jmtime, jatime);
|
JAVA_VOID), jPath, jmtime, jatime);
|
||||||
destroyLocalReference(env, jPath);
|
destroyLocalReference(env, jPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr,
|
errno = printExceptionAndFree(env, jthr,
|
||||||
@ -2579,27 +2565,28 @@ static jthrowable hadoopRzOptionsGetEnumSet(JNIEnv *env,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (opts->skipChecksums) {
|
if (opts->skipChecksums) {
|
||||||
jthr = fetchEnumInstance(env, READ_OPTION,
|
jthr = fetchEnumInstance(env, HADOOP_RO,
|
||||||
"SKIP_CHECKSUMS", &enumInst);
|
"SKIP_CHECKSUMS", &enumInst);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_ENUM_SET,
|
||||||
"java/util/EnumSet", "of",
|
"of", "(Ljava/lang/Enum;)Ljava/util/EnumSet;", enumInst);
|
||||||
"(Ljava/lang/Enum;)Ljava/util/EnumSet;", enumInst);
|
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
enumSetObj = jVal.l;
|
enumSetObj = jVal.l;
|
||||||
} else {
|
} else {
|
||||||
jclass clazz = (*env)->FindClass(env, READ_OPTION);
|
jclass clazz = (*env)->FindClass(env, HADOOP_RO);
|
||||||
if (!clazz) {
|
if (!clazz) {
|
||||||
jthr = getPendingExceptionAndClear(env);
|
jthr = getPendingExceptionAndClear(env);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_ENUM_SET,
|
||||||
"java/util/EnumSet", "noneOf",
|
"noneOf", "(Ljava/lang/Class;)Ljava/util/EnumSet;", clazz);
|
||||||
"(Ljava/lang/Class;)Ljava/util/EnumSet;", clazz);
|
if (jthr) {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
enumSetObj = jVal.l;
|
enumSetObj = jVal.l;
|
||||||
}
|
}
|
||||||
// create global ref
|
// create global ref
|
||||||
@ -2628,7 +2615,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
|
|||||||
jarray array = NULL;
|
jarray array = NULL;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
|
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
|
||||||
"java/nio/ByteBuffer", "remaining", "()I");
|
JC_BYTE_BUFFER, "remaining", "()I");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hadoopReadZeroExtractBuffer: ByteBuffer#remaining failed: ");
|
"hadoopReadZeroExtractBuffer: ByteBuffer#remaining failed: ");
|
||||||
@ -2636,7 +2623,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
|
|||||||
}
|
}
|
||||||
buffer->length = jVal.i;
|
buffer->length = jVal.i;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
|
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
|
||||||
"java/nio/ByteBuffer", "position", "()I");
|
JC_BYTE_BUFFER, "position", "()I");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hadoopReadZeroExtractBuffer: ByteBuffer#position failed: ");
|
"hadoopReadZeroExtractBuffer: ByteBuffer#position failed: ");
|
||||||
@ -2667,7 +2654,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
|
|||||||
}
|
}
|
||||||
// Get the backing array object of this buffer.
|
// Get the backing array object of this buffer.
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
|
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
|
||||||
"java/nio/ByteBuffer", "array", "()[B");
|
JC_BYTE_BUFFER, "array", "()[B");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hadoopReadZeroExtractBuffer: ByteBuffer#array failed: ");
|
"hadoopReadZeroExtractBuffer: ByteBuffer#array failed: ");
|
||||||
@ -2761,7 +2748,8 @@ struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
|
|||||||
"hadoopReadZero: hadoopRzOptionsGetEnumSet failed: ");
|
"hadoopReadZero: hadoopRzOptionsGetEnumSet failed: ");
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, file->file, HADOOP_ISTRM, "read",
|
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
||||||
|
JC_FS_DATA_INPUT_STREAM, "read",
|
||||||
"(Lorg/apache/hadoop/io/ByteBufferPool;ILjava/util/EnumSet;)"
|
"(Lorg/apache/hadoop/io/ByteBufferPool;ILjava/util/EnumSet;)"
|
||||||
"Ljava/nio/ByteBuffer;", opts->byteBufferPool, maxLength, enumSet);
|
"Ljava/nio/ByteBuffer;", opts->byteBufferPool, maxLength, enumSet);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
@ -2826,7 +2814,7 @@ void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer)
|
|||||||
}
|
}
|
||||||
if (buffer->byteBuffer) {
|
if (buffer->byteBuffer) {
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
||||||
HADOOP_ISTRM, "releaseBuffer",
|
JC_FS_DATA_INPUT_STREAM, "releaseBuffer",
|
||||||
"(Ljava/nio/ByteBuffer;)V", buffer->byteBuffer);
|
"(Ljava/nio/ByteBuffer;)V", buffer->byteBuffer);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -2876,8 +2864,8 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
|
|||||||
"hdfsGetHosts(path=%s): constructNewObjectOfPath", path);
|
"hdfsGetHosts(path=%s): constructNewObjectOfPath", path);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jFSVal, INSTANCE, jFS,
|
jthr = invokeMethod(env, &jFSVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
HADOOP_FS, "getFileStatus", "(Lorg/apache/hadoop/fs/Path;)"
|
"getFileStatus", "(Lorg/apache/hadoop/fs/Path;)"
|
||||||
"Lorg/apache/hadoop/fs/FileStatus;", jPath);
|
"Lorg/apache/hadoop/fs/FileStatus;", jPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND,
|
ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND,
|
||||||
@ -2889,11 +2877,11 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
|
|||||||
jFileStatus = jFSVal.l;
|
jFileStatus = jFSVal.l;
|
||||||
|
|
||||||
//org.apache.hadoop.fs.FileSystem#getFileBlockLocations
|
//org.apache.hadoop.fs.FileSystem#getFileBlockLocations
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
HADOOP_FS, "getFileBlockLocations",
|
"getFileBlockLocations",
|
||||||
"(Lorg/apache/hadoop/fs/FileStatus;JJ)"
|
"(Lorg/apache/hadoop/fs/FileStatus;JJ)"
|
||||||
"[Lorg/apache/hadoop/fs/BlockLocation;",
|
"[Lorg/apache/hadoop/fs/BlockLocation;", jFileStatus, start,
|
||||||
jFileStatus, start, length);
|
length);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
|
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
|
||||||
@ -2928,8 +2916,9 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFileBlock,
|
||||||
"getHosts", "()[Ljava/lang/String;");
|
JC_BLOCK_LOCATION, "getHosts",
|
||||||
|
"()[Ljava/lang/String;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
|
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
|
||||||
@ -3034,7 +3023,7 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//FileSystem#getDefaultBlockSize()
|
//FileSystem#getDefaultBlockSize()
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"getDefaultBlockSize", "()J");
|
"getDefaultBlockSize", "()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -3098,7 +3087,7 @@ tOffset hdfsGetCapacity(hdfsFS fs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//FileSystem#getStatus
|
//FileSystem#getStatus
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
|
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -3106,8 +3095,8 @@ tOffset hdfsGetCapacity(hdfsFS fs)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
fss = (jobject)jVal.l;
|
fss = (jobject)jVal.l;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, fss,
|
||||||
"getCapacity", "()J");
|
JC_FS_STATUS, "getCapacity", "()J");
|
||||||
destroyLocalReference(env, fss);
|
destroyLocalReference(env, fss);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -3138,7 +3127,7 @@ tOffset hdfsGetUsed(hdfsFS fs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//FileSystem#getStatus
|
//FileSystem#getStatus
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
|
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -3146,8 +3135,8 @@ tOffset hdfsGetUsed(hdfsFS fs)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
fss = (jobject)jVal.l;
|
fss = (jobject)jVal.l;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, fss, JC_FS_STATUS,
|
||||||
"getUsed", "()J");
|
HADOOP_FSSTATUS,"getUsed", "()J");
|
||||||
destroyLocalReference(env, fss);
|
destroyLocalReference(env, fss);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -3205,45 +3194,45 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
|
|||||||
struct hdfsExtendedFileInfo *extInfo;
|
struct hdfsExtendedFileInfo *extInfo;
|
||||||
size_t extOffset;
|
size_t extOffset;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS, "isDir",
|
||||||
HADOOP_STAT, "isDir", "()Z");
|
"()Z");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
|
fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
HADOOP_STAT, "getReplication", "()S");
|
"getReplication", "()S");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
fileInfo->mReplication = jVal.s;
|
fileInfo->mReplication = jVal.s;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
HADOOP_STAT, "getBlockSize", "()J");
|
"getBlockSize", "()J");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
fileInfo->mBlockSize = jVal.j;
|
fileInfo->mBlockSize = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
HADOOP_STAT, "getModificationTime", "()J");
|
"getModificationTime", "()J");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
fileInfo->mLastMod = jVal.j / 1000;
|
fileInfo->mLastMod = jVal.j / 1000;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
HADOOP_STAT, "getAccessTime", "()J");
|
"getAccessTime", "()J");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
|
fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
|
||||||
|
|
||||||
if (fileInfo->mKind == kObjectKindFile) {
|
if (fileInfo->mKind == kObjectKindFile) {
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
HADOOP_STAT, "getLen", "()J");
|
"getLen", "()J");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
fileInfo->mSize = jVal.j;
|
fileInfo->mSize = jVal.j;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
"getPath", "()Lorg/apache/hadoop/fs/Path;");
|
"getPath", "()Lorg/apache/hadoop/fs/Path;");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
@ -3254,8 +3243,8 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jPath, HADOOP_PATH,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jPath, JC_PATH, "toString",
|
||||||
"toString", "()Ljava/lang/String;");
|
"()Ljava/lang/String;");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
jPathName = jVal.l;
|
jPathName = jVal.l;
|
||||||
@ -3267,8 +3256,8 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
|
|||||||
}
|
}
|
||||||
fileInfo->mName = strdup(cPathName);
|
fileInfo->mName = strdup(cPathName);
|
||||||
(*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
|
(*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS, "getOwner",
|
||||||
"getOwner", "()Ljava/lang/String;");
|
"()Ljava/lang/String;");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
jUserName = jVal.l;
|
jUserName = jVal.l;
|
||||||
@ -3288,15 +3277,15 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
|
|||||||
(*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
|
(*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
|
||||||
extInfo = getExtendedFileInfo(fileInfo);
|
extInfo = getExtendedFileInfo(fileInfo);
|
||||||
memset(extInfo, 0, sizeof(*extInfo));
|
memset(extInfo, 0, sizeof(*extInfo));
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
HADOOP_STAT, "isEncrypted", "()Z");
|
"isEncrypted", "()Z");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (jVal.z == JNI_TRUE) {
|
if (jVal.z == JNI_TRUE) {
|
||||||
extInfo->flags |= HDFS_EXTENDED_FILE_INFO_ENCRYPTED;
|
extInfo->flags |= HDFS_EXTENDED_FILE_INFO_ENCRYPTED;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
"getGroup", "()Ljava/lang/String;");
|
"getGroup", "()Ljava/lang/String;");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
@ -3309,19 +3298,19 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
|
|||||||
fileInfo->mGroup = strdup(cGroupName);
|
fileInfo->mGroup = strdup(cGroupName);
|
||||||
(*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
|
(*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
|
||||||
"getPermission",
|
"getPermission",
|
||||||
"()Lorg/apache/hadoop/fs/permission/FsPermission;");
|
"()Lorg/apache/hadoop/fs/permission/FsPermission;");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
if (jVal.l == NULL) {
|
if (jVal.l == NULL) {
|
||||||
jthr = newRuntimeError(env, "%s#getPermission returned NULL!",
|
jthr = newRuntimeError(env, "%s#getPermission returned NULL!",
|
||||||
HADOOP_STAT);
|
HADOOP_FILESTAT);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
jPermission = jVal.l;
|
jPermission = jVal.l;
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jPermission, HADOOP_FSPERM,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jPermission,
|
||||||
"toShort", "()S");
|
JC_FS_PERMISSION, "toShort", "()S");
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
fileInfo->mPermissions = jVal.s;
|
fileInfo->mPermissions = jVal.s;
|
||||||
@ -3355,18 +3344,17 @@ getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo **fileInfo)
|
|||||||
jvalue jVal;
|
jvalue jVal;
|
||||||
jthrowable jthr;
|
jthrowable jthr;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM, "exists",
|
||||||
"exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
|
JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
|
||||||
jPath);
|
|
||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
if (jVal.z == 0) {
|
if (jVal.z == 0) {
|
||||||
*fileInfo = NULL;
|
*fileInfo = NULL;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
|
||||||
HADOOP_FS, "getFileStatus",
|
"getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM
|
||||||
JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)), jPath);
|
(HADOOP_FILESTAT)), jPath);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
jStat = jVal.l;
|
jStat = jVal.l;
|
||||||
@ -3416,9 +3404,9 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_DFS, "listStatus",
|
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
|
||||||
JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
|
JC_DISTRIBUTED_FILE_SYSTEM, "listStatus",
|
||||||
jPath);
|
JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_FILESTAT)), jPath);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
ret = printExceptionAndFree(env, jthr,
|
ret = printExceptionAndFree(env, jthr,
|
||||||
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
|
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
|
||||||
|
@ -0,0 +1,136 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "exception.h"
|
||||||
|
#include "jclasses.h"
|
||||||
|
#include "jni_helper.h"
|
||||||
|
#include "os/mutexes.h"
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether initCachedClasses has been called or not. Protected by the mutex
|
||||||
|
* jclassInitMutex.
|
||||||
|
*/
|
||||||
|
static int jclassesInitialized = 0;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
jclass javaClass;
|
||||||
|
const char *className;
|
||||||
|
} javaClassAndName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A collection of commonly used jclass objects that are used throughout
|
||||||
|
* libhdfs. The jclasses are loaded immediately after the JVM is created (see
|
||||||
|
* initCachedClasses). The array is indexed using CachedJavaClass.
|
||||||
|
*/
|
||||||
|
javaClassAndName cachedJavaClasses[NUM_CACHED_CLASSES];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method that creates and sets a jclass object given a class name.
|
||||||
|
* Returns a jthrowable on error, NULL otherwise.
|
||||||
|
*/
|
||||||
|
static jthrowable initCachedClass(JNIEnv *env, const char *className,
|
||||||
|
jclass *cachedJclass) {
|
||||||
|
assert(className != NULL && "Found a CachedJavaClass without a class "
|
||||||
|
"name");
|
||||||
|
jthrowable jthr = NULL;
|
||||||
|
jclass tempLocalClassRef;
|
||||||
|
tempLocalClassRef = (*env)->FindClass(env, className);
|
||||||
|
if (!tempLocalClassRef) {
|
||||||
|
jthr = getPendingExceptionAndClear(env);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
*cachedJclass = (jclass) (*env)->NewGlobalRef(env, tempLocalClassRef);
|
||||||
|
if (!*cachedJclass) {
|
||||||
|
jthr = getPendingExceptionAndClear(env);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
done:
|
||||||
|
destroyLocalReference(env, tempLocalClassRef);
|
||||||
|
return jthr;
|
||||||
|
}
|
||||||
|
|
||||||
|
jthrowable initCachedClasses(JNIEnv* env) {
|
||||||
|
mutexLock(&jclassInitMutex);
|
||||||
|
if (!jclassesInitialized) {
|
||||||
|
// Set all the class names
|
||||||
|
cachedJavaClasses[JC_CONFIGURATION].className =
|
||||||
|
"org/apache/hadoop/conf/Configuration";
|
||||||
|
cachedJavaClasses[JC_PATH].className =
|
||||||
|
"org/apache/hadoop/fs/Path";
|
||||||
|
cachedJavaClasses[JC_FILE_SYSTEM].className =
|
||||||
|
"org/apache/hadoop/fs/FileSystem";
|
||||||
|
cachedJavaClasses[JC_FS_STATUS].className =
|
||||||
|
"org/apache/hadoop/fs/FsStatus";
|
||||||
|
cachedJavaClasses[JC_FILE_UTIL].className =
|
||||||
|
"org/apache/hadoop/fs/FileUtil";
|
||||||
|
cachedJavaClasses[JC_BLOCK_LOCATION].className =
|
||||||
|
"org/apache/hadoop/fs/BlockLocation";
|
||||||
|
cachedJavaClasses[JC_DFS_HEDGED_READ_METRICS].className =
|
||||||
|
"org/apache/hadoop/hdfs/DFSHedgedReadMetrics";
|
||||||
|
cachedJavaClasses[JC_DISTRIBUTED_FILE_SYSTEM].className =
|
||||||
|
"org/apache/hadoop/hdfs/DistributedFileSystem";
|
||||||
|
cachedJavaClasses[JC_FS_DATA_INPUT_STREAM].className =
|
||||||
|
"org/apache/hadoop/fs/FSDataInputStream";
|
||||||
|
cachedJavaClasses[JC_FS_DATA_OUTPUT_STREAM].className =
|
||||||
|
"org/apache/hadoop/fs/FSDataOutputStream";
|
||||||
|
cachedJavaClasses[JC_FILE_STATUS].className =
|
||||||
|
"org/apache/hadoop/fs/FileStatus";
|
||||||
|
cachedJavaClasses[JC_FS_PERMISSION].className =
|
||||||
|
"org/apache/hadoop/fs/permission/FsPermission";
|
||||||
|
cachedJavaClasses[JC_READ_STATISTICS].className =
|
||||||
|
"org/apache/hadoop/hdfs/ReadStatistics";
|
||||||
|
cachedJavaClasses[JC_HDFS_DATA_INPUT_STREAM].className =
|
||||||
|
"org/apache/hadoop/hdfs/client/HdfsDataInputStream";
|
||||||
|
cachedJavaClasses[JC_DOMAIN_SOCKET].className =
|
||||||
|
"org/apache/hadoop/net/unix/DomainSocket";
|
||||||
|
cachedJavaClasses[JC_URI].className =
|
||||||
|
"java/net/URI";
|
||||||
|
cachedJavaClasses[JC_BYTE_BUFFER].className =
|
||||||
|
"java/nio/ByteBuffer";
|
||||||
|
cachedJavaClasses[JC_ENUM_SET].className =
|
||||||
|
"java/util/EnumSet";
|
||||||
|
cachedJavaClasses[JC_EXCEPTION_UTILS].className =
|
||||||
|
"org/apache/commons/lang3/exception/ExceptionUtils";
|
||||||
|
|
||||||
|
// Create and set the jclass objects based on the class names set above
|
||||||
|
jthrowable jthr;
|
||||||
|
int numCachedClasses =
|
||||||
|
sizeof(cachedJavaClasses) / sizeof(javaClassAndName);
|
||||||
|
for (int i = 0; i < numCachedClasses; i++) {
|
||||||
|
jthr = initCachedClass(env, cachedJavaClasses[i].className,
|
||||||
|
&cachedJavaClasses[i].javaClass);
|
||||||
|
if (jthr) {
|
||||||
|
mutexUnlock(&jclassInitMutex);
|
||||||
|
return jthr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
jclassesInitialized = 1;
|
||||||
|
}
|
||||||
|
mutexUnlock(&jclassInitMutex);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
jclass getJclass(CachedJavaClass cachedJavaClass) {
|
||||||
|
return cachedJavaClasses[cachedJavaClass].javaClass;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *getClassName(CachedJavaClass cachedJavaClass) {
|
||||||
|
return cachedJavaClasses[cachedJavaClass].className;
|
||||||
|
}
|
@ -0,0 +1,112 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFS_JCLASSES_H
|
||||||
|
#define LIBHDFS_JCLASSES_H
|
||||||
|
|
||||||
|
#include <jni.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Encapsulates logic to cache jclass objects so they can re-used across
|
||||||
|
* calls to FindClass. Creating jclass objects every time libhdfs has to
|
||||||
|
* invoke a method can hurt performance. By cacheing jclass objects we avoid
|
||||||
|
* this overhead.
|
||||||
|
*
|
||||||
|
* We use the term "cached" here loosely; jclasses are not truly cached,
|
||||||
|
* instead they are created once during JVM load and are kept alive until the
|
||||||
|
* process shutdowns. There is no eviction of jclass objects.
|
||||||
|
*
|
||||||
|
* @see https://www.ibm.com/developerworks/library/j-jni/index.html#notc
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Each enum value represents one jclass that is cached. Enum values should
|
||||||
|
* be passed to getJclass or getName to get the jclass object or class name
|
||||||
|
* represented by the enum value.
|
||||||
|
*/
|
||||||
|
typedef enum {
|
||||||
|
JC_CONFIGURATION,
|
||||||
|
JC_PATH,
|
||||||
|
JC_FILE_SYSTEM,
|
||||||
|
JC_FS_STATUS,
|
||||||
|
JC_FILE_UTIL,
|
||||||
|
JC_BLOCK_LOCATION,
|
||||||
|
JC_DFS_HEDGED_READ_METRICS,
|
||||||
|
JC_DISTRIBUTED_FILE_SYSTEM,
|
||||||
|
JC_FS_DATA_INPUT_STREAM,
|
||||||
|
JC_FS_DATA_OUTPUT_STREAM,
|
||||||
|
JC_FILE_STATUS,
|
||||||
|
JC_FS_PERMISSION,
|
||||||
|
JC_READ_STATISTICS,
|
||||||
|
JC_HDFS_DATA_INPUT_STREAM,
|
||||||
|
JC_DOMAIN_SOCKET,
|
||||||
|
JC_URI,
|
||||||
|
JC_BYTE_BUFFER,
|
||||||
|
JC_ENUM_SET,
|
||||||
|
JC_EXCEPTION_UTILS,
|
||||||
|
// A special marker enum that counts the number of cached jclasses
|
||||||
|
NUM_CACHED_CLASSES
|
||||||
|
} CachedJavaClass;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internally initializes all jclass objects listed in the CachedJavaClass
|
||||||
|
* enum. This method is idempotent and thread-safe.
|
||||||
|
*/
|
||||||
|
jthrowable initCachedClasses(JNIEnv* env);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the jclass object represented by the given CachedJavaClass
|
||||||
|
*/
|
||||||
|
jclass getJclass(CachedJavaClass cachedJavaClass);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the class name represented by the given CachedJavaClass
|
||||||
|
*/
|
||||||
|
const char *getClassName(CachedJavaClass cachedJavaClass);
|
||||||
|
|
||||||
|
/* Some frequently used HDFS class names */
|
||||||
|
#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
|
||||||
|
#define HADOOP_PATH "org/apache/hadoop/fs/Path"
|
||||||
|
#define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
|
||||||
|
#define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
|
||||||
|
#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
|
||||||
|
#define HADOOP_FILEUTIL "org/apache/hadoop/fs/FileUtil"
|
||||||
|
#define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
|
||||||
|
#define HADOOP_DFS_HRM "org/apache/hadoop/hdfs/DFSHedgedReadMetrics"
|
||||||
|
#define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
|
||||||
|
#define HADOOP_FSDISTRM "org/apache/hadoop/fs/FSDataInputStream"
|
||||||
|
#define HADOOP_FSDOSTRM "org/apache/hadoop/fs/FSDataOutputStream"
|
||||||
|
#define HADOOP_FILESTAT "org/apache/hadoop/fs/FileStatus"
|
||||||
|
#define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
|
||||||
|
#define HADOOP_RSTAT "org/apache/hadoop/hdfs/ReadStatistics"
|
||||||
|
#define HADOOP_HDISTRM "org/apache/hadoop/hdfs/client/HdfsDataInputStream"
|
||||||
|
#define HADOOP_RO "org/apache/hadoop/fs/ReadOption"
|
||||||
|
#define HADOOP_DS "org/apache/hadoop/net/unix/DomainSocket"
|
||||||
|
|
||||||
|
/* Some frequently used Java class names */
|
||||||
|
#define JAVA_NET_ISA "java/net/InetSocketAddress"
|
||||||
|
#define JAVA_NET_URI "java/net/URI"
|
||||||
|
#define JAVA_BYTEBUFFER "java/nio/ByteBuffer"
|
||||||
|
#define JAVA_STRING "java/lang/String"
|
||||||
|
#define JAVA_ENUMSET "java/util/EnumSet"
|
||||||
|
|
||||||
|
/* Some frequently used third-party class names */
|
||||||
|
|
||||||
|
#define EXCEPTION_UTILS "org/apache/commons/lang3/exception/ExceptionUtils"
|
||||||
|
|
||||||
|
#endif /*LIBHDFS_JCLASSES_H*/
|
@ -18,17 +18,15 @@
|
|||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "exception.h"
|
#include "exception.h"
|
||||||
|
#include "jclasses.h"
|
||||||
#include "jni_helper.h"
|
#include "jni_helper.h"
|
||||||
#include "platform.h"
|
#include "platform.h"
|
||||||
#include "common/htable.h"
|
|
||||||
#include "os/mutexes.h"
|
#include "os/mutexes.h"
|
||||||
#include "os/thread_local_storage.h"
|
#include "os/thread_local_storage.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
static struct htable *gClassRefHTable = NULL;
|
|
||||||
|
|
||||||
/** The Native return types that methods could return */
|
/** The Native return types that methods could return */
|
||||||
#define JVOID 'V'
|
#define JVOID 'V'
|
||||||
#define JOBJECT 'L'
|
#define JOBJECT 'L'
|
||||||
@ -42,13 +40,6 @@ static struct htable *gClassRefHTable = NULL;
|
|||||||
#define JFLOAT 'F'
|
#define JFLOAT 'F'
|
||||||
#define JDOUBLE 'D'
|
#define JDOUBLE 'D'
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* MAX_HASH_TABLE_ELEM: The maximum no. of entries in the hashtable.
|
|
||||||
* It's set to 4096 to account for (classNames + No. of threads)
|
|
||||||
*/
|
|
||||||
#define MAX_HASH_TABLE_ELEM 4096
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Length of buffer for retrieving created JVMs. (We only ever create one.)
|
* Length of buffer for retrieving created JVMs. (We only ever create one.)
|
||||||
*/
|
*/
|
||||||
@ -106,32 +97,27 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
/**
|
||||||
jobject instObj, const char *className,
|
* Does the work to actually execute a Java method. Takes in an existing jclass
|
||||||
const char *methName, const char *methSignature, ...)
|
* object and a va_list of arguments for the Java method to be invoked.
|
||||||
|
*/
|
||||||
|
static jthrowable invokeMethodOnJclass(JNIEnv *env, jvalue *retval,
|
||||||
|
MethType methType, jobject instObj, jclass cls, const char *className,
|
||||||
|
const char *methName, const char *methSignature, va_list args)
|
||||||
{
|
{
|
||||||
va_list args;
|
|
||||||
jclass cls;
|
|
||||||
jmethodID mid;
|
jmethodID mid;
|
||||||
jthrowable jthr;
|
jthrowable jthr;
|
||||||
const char *str;
|
const char *str;
|
||||||
char returnType;
|
char returnType;
|
||||||
|
|
||||||
jthr = validateMethodType(env, methType);
|
jthr = methodIdFromClass(cls, className, methName, methSignature, methType,
|
||||||
if (jthr)
|
env, &mid);
|
||||||
return jthr;
|
|
||||||
jthr = globalClassReference(className, env, &cls);
|
|
||||||
if (jthr)
|
|
||||||
return jthr;
|
|
||||||
jthr = methodIdFromClass(className, methName, methSignature,
|
|
||||||
methType, env, &mid);
|
|
||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
str = methSignature;
|
str = methSignature;
|
||||||
while (*str != ')') str++;
|
while (*str != ')') str++;
|
||||||
str++;
|
str++;
|
||||||
returnType = *str;
|
returnType = *str;
|
||||||
va_start(args, methSignature);
|
|
||||||
if (returnType == JOBJECT || returnType == JARRAYOBJECT) {
|
if (returnType == JOBJECT || returnType == JARRAYOBJECT) {
|
||||||
jobject jobj = NULL;
|
jobject jobj = NULL;
|
||||||
if (methType == STATIC) {
|
if (methType == STATIC) {
|
||||||
@ -190,7 +176,6 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
|||||||
}
|
}
|
||||||
retval->i = ji;
|
retval->i = ji;
|
||||||
}
|
}
|
||||||
va_end(args);
|
|
||||||
|
|
||||||
jthr = (*env)->ExceptionOccurred(env);
|
jthr = (*env)->ExceptionOccurred(env);
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
@ -200,43 +185,115 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className,
|
jthrowable findClassAndInvokeMethod(JNIEnv *env, jvalue *retval,
|
||||||
const char *ctorSignature, ...)
|
MethType methType, jobject instObj, const char *className,
|
||||||
|
const char *methName, const char *methSignature, ...)
|
||||||
{
|
{
|
||||||
|
jclass cls = NULL;
|
||||||
|
jthrowable jthr = NULL;
|
||||||
|
|
||||||
va_list args;
|
va_list args;
|
||||||
jclass cls;
|
va_start(args, methSignature);
|
||||||
|
|
||||||
|
jthr = validateMethodType(env, methType);
|
||||||
|
if (jthr) {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
cls = (*env)->FindClass(env, className);
|
||||||
|
if (!cls) {
|
||||||
|
jthr = getPendingExceptionAndClear(env);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
jthr = invokeMethodOnJclass(env, retval, methType, instObj, cls,
|
||||||
|
className, methName, methSignature, args);
|
||||||
|
|
||||||
|
done:
|
||||||
|
va_end(args);
|
||||||
|
destroyLocalReference(env, cls);
|
||||||
|
return jthr;
|
||||||
|
}
|
||||||
|
|
||||||
|
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
||||||
|
jobject instObj, CachedJavaClass class,
|
||||||
|
const char *methName, const char *methSignature, ...)
|
||||||
|
{
|
||||||
|
jthrowable jthr;
|
||||||
|
|
||||||
|
va_list args;
|
||||||
|
va_start(args, methSignature);
|
||||||
|
|
||||||
|
jthr = invokeMethodOnJclass(env, retval, methType, instObj,
|
||||||
|
getJclass(class), getClassName(class), methName, methSignature,
|
||||||
|
args);
|
||||||
|
|
||||||
|
va_end(args);
|
||||||
|
return jthr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static jthrowable constructNewObjectOfJclass(JNIEnv *env,
|
||||||
|
jobject *out, jclass cls, const char *className,
|
||||||
|
const char *ctorSignature, va_list args) {
|
||||||
jmethodID mid;
|
jmethodID mid;
|
||||||
jobject jobj;
|
jobject jobj;
|
||||||
jthrowable jthr;
|
jthrowable jthr;
|
||||||
|
|
||||||
jthr = globalClassReference(className, env, &cls);
|
jthr = methodIdFromClass(cls, className, "<init>", ctorSignature, INSTANCE,
|
||||||
|
env, &mid);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
jthr = methodIdFromClass(className, "<init>", ctorSignature,
|
|
||||||
INSTANCE, env, &mid);
|
|
||||||
if (jthr)
|
|
||||||
return jthr;
|
|
||||||
va_start(args, ctorSignature);
|
|
||||||
jobj = (*env)->NewObjectV(env, cls, mid, args);
|
jobj = (*env)->NewObjectV(env, cls, mid, args);
|
||||||
va_end(args);
|
|
||||||
if (!jobj)
|
if (!jobj)
|
||||||
return getPendingExceptionAndClear(env);
|
return getPendingExceptionAndClear(env);
|
||||||
*out = jobj;
|
*out = jobj;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out,
|
||||||
|
const char *className, const char *ctorSignature, ...)
|
||||||
|
{
|
||||||
|
va_list args;
|
||||||
|
jclass cls;
|
||||||
|
jthrowable jthr = NULL;
|
||||||
|
|
||||||
jthrowable methodIdFromClass(const char *className, const char *methName,
|
cls = (*env)->FindClass(env, className);
|
||||||
const char *methSignature, MethType methType,
|
if (!cls) {
|
||||||
|
jthr = getPendingExceptionAndClear(env);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
va_start(args, ctorSignature);
|
||||||
|
jthr = constructNewObjectOfJclass(env, out, cls, className,
|
||||||
|
ctorSignature, args);
|
||||||
|
va_end(args);
|
||||||
|
done:
|
||||||
|
destroyLocalReference(env, cls);
|
||||||
|
return jthr;
|
||||||
|
}
|
||||||
|
|
||||||
|
jthrowable constructNewObjectOfCachedClass(JNIEnv *env, jobject *out,
|
||||||
|
CachedJavaClass cachedJavaClass, const char *ctorSignature, ...)
|
||||||
|
{
|
||||||
|
jthrowable jthr = NULL;
|
||||||
|
va_list args;
|
||||||
|
va_start(args, ctorSignature);
|
||||||
|
|
||||||
|
jthr = constructNewObjectOfJclass(env, out,
|
||||||
|
getJclass(cachedJavaClass), getClassName(cachedJavaClass),
|
||||||
|
ctorSignature, args);
|
||||||
|
|
||||||
|
va_end(args);
|
||||||
|
return jthr;
|
||||||
|
}
|
||||||
|
|
||||||
|
jthrowable methodIdFromClass(jclass cls, const char *className,
|
||||||
|
const char *methName, const char *methSignature, MethType methType,
|
||||||
JNIEnv *env, jmethodID *out)
|
JNIEnv *env, jmethodID *out)
|
||||||
{
|
{
|
||||||
jclass cls;
|
|
||||||
jthrowable jthr;
|
jthrowable jthr;
|
||||||
jmethodID mid = 0;
|
jmethodID mid = 0;
|
||||||
|
|
||||||
jthr = globalClassReference(className, env, &cls);
|
|
||||||
if (jthr)
|
|
||||||
return jthr;
|
|
||||||
jthr = validateMethodType(env, methType);
|
jthr = validateMethodType(env, methType);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
return jthr;
|
return jthr;
|
||||||
@ -255,54 +312,6 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
|
|
||||||
{
|
|
||||||
jthrowable jthr = NULL;
|
|
||||||
jclass local_clazz = NULL;
|
|
||||||
jclass clazz = NULL;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutexLock(&hdfsHashMutex);
|
|
||||||
if (!gClassRefHTable) {
|
|
||||||
gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
|
|
||||||
ht_compare_string);
|
|
||||||
if (!gClassRefHTable) {
|
|
||||||
jthr = newRuntimeError(env, "htable_alloc failed\n");
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clazz = htable_get(gClassRefHTable, className);
|
|
||||||
if (clazz) {
|
|
||||||
*out = clazz;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
local_clazz = (*env)->FindClass(env,className);
|
|
||||||
if (!local_clazz) {
|
|
||||||
jthr = getPendingExceptionAndClear(env);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
clazz = (*env)->NewGlobalRef(env, local_clazz);
|
|
||||||
if (!clazz) {
|
|
||||||
jthr = getPendingExceptionAndClear(env);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
ret = htable_put(gClassRefHTable, (void*)className, clazz);
|
|
||||||
if (ret) {
|
|
||||||
jthr = newRuntimeError(env, "htable_put failed with error "
|
|
||||||
"code %d\n", ret);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
*out = clazz;
|
|
||||||
jthr = NULL;
|
|
||||||
done:
|
|
||||||
mutexUnlock(&hdfsHashMutex);
|
|
||||||
(*env)->DeleteLocalRef(env, local_clazz);
|
|
||||||
if (jthr && clazz) {
|
|
||||||
(*env)->DeleteGlobalRef(env, clazz);
|
|
||||||
}
|
|
||||||
return jthr;
|
|
||||||
}
|
|
||||||
|
|
||||||
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
|
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
|
||||||
{
|
{
|
||||||
jthrowable jthr;
|
jthrowable jthr;
|
||||||
@ -361,7 +370,6 @@ done:
|
|||||||
return jthr;
|
return jthr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the global JNI environemnt.
|
* Get the global JNI environemnt.
|
||||||
*
|
*
|
||||||
@ -461,14 +469,17 @@ static JNIEnv* getGlobalJNIEnv(void)
|
|||||||
"with error: %d\n", rv);
|
"with error: %d\n", rv);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
jthr = invokeMethod(env, NULL, STATIC, NULL,
|
|
||||||
"org/apache/hadoop/fs/FileSystem",
|
// We use findClassAndInvokeMethod here because the jclasses in
|
||||||
|
// jclasses.h have not loaded yet
|
||||||
|
jthr = findClassAndInvokeMethod(env, NULL, STATIC, NULL, HADOOP_FS,
|
||||||
"loadFileSystems", "()V");
|
"loadFileSystems", "()V");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "loadFileSystems");
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
|
"FileSystem: loadFileSystems failed");
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
//Attach this thread to the VM
|
//Attach this thread to the VM
|
||||||
vm = vmBuf[0];
|
vm = vmBuf[0];
|
||||||
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
|
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
|
||||||
@ -539,6 +550,15 @@ JNIEnv* getJNIEnv(void)
|
|||||||
|
|
||||||
state->env = getGlobalJNIEnv();
|
state->env = getGlobalJNIEnv();
|
||||||
mutexUnlock(&jvmMutex);
|
mutexUnlock(&jvmMutex);
|
||||||
|
|
||||||
|
jthrowable jthr = NULL;
|
||||||
|
jthr = initCachedClasses(state->env);
|
||||||
|
if (jthr) {
|
||||||
|
printExceptionAndFree(state->env, jthr, PRINT_EXC_ALL,
|
||||||
|
"initCachedClasses failed");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
if (!state->env) {
|
if (!state->env) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
@ -628,8 +648,7 @@ jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
|
|||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
|
jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
|
||||||
"org/apache/hadoop/conf/Configuration", "set",
|
JC_CONFIGURATION, "set", "(Ljava/lang/String;Ljava/lang/String;)V",
|
||||||
"(Ljava/lang/String;Ljava/lang/String;)V",
|
|
||||||
jkey, jvalue);
|
jkey, jvalue);
|
||||||
if (jthr)
|
if (jthr)
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
#ifndef LIBHDFS_JNI_HELPER_H
|
#ifndef LIBHDFS_JNI_HELPER_H
|
||||||
#define LIBHDFS_JNI_HELPER_H
|
#define LIBHDFS_JNI_HELPER_H
|
||||||
|
|
||||||
|
#include "jclasses.h"
|
||||||
|
|
||||||
#include <jni.h>
|
#include <jni.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
@ -28,7 +30,6 @@
|
|||||||
|
|
||||||
#define PATH_SEPARATOR ':'
|
#define PATH_SEPARATOR ':'
|
||||||
|
|
||||||
|
|
||||||
/** Denote the method we want to invoke as STATIC or INSTANCE */
|
/** Denote the method we want to invoke as STATIC or INSTANCE */
|
||||||
typedef enum {
|
typedef enum {
|
||||||
STATIC,
|
STATIC,
|
||||||
@ -66,12 +67,12 @@ jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out);
|
|||||||
void destroyLocalReference(JNIEnv *env, jobject jObject);
|
void destroyLocalReference(JNIEnv *env, jobject jObject);
|
||||||
|
|
||||||
/** invokeMethod: Invoke a Static or Instance method.
|
/** invokeMethod: Invoke a Static or Instance method.
|
||||||
* className: Name of the class where the method can be found
|
|
||||||
* methName: Name of the method
|
* methName: Name of the method
|
||||||
* methSignature: the signature of the method "(arg-types)ret-type"
|
* methSignature: the signature of the method "(arg-types)ret-type"
|
||||||
* methType: The type of the method (STATIC or INSTANCE)
|
* methType: The type of the method (STATIC or INSTANCE)
|
||||||
* instObj: Required if the methType is INSTANCE. The object to invoke
|
* instObj: Required if the methType is INSTANCE. The object to invoke
|
||||||
the method on.
|
the method on.
|
||||||
|
* class: The CachedJavaClass to call the method on.
|
||||||
* env: The JNIEnv pointer
|
* env: The JNIEnv pointer
|
||||||
* retval: The pointer to a union type which will contain the result of the
|
* retval: The pointer to a union type which will contain the result of the
|
||||||
method invocation, e.g. if the method returns an Object, retval will be
|
method invocation, e.g. if the method returns an Object, retval will be
|
||||||
@ -83,18 +84,34 @@ void destroyLocalReference(JNIEnv *env, jobject jObject);
|
|||||||
a valid exception reference, and the result stored at retval is undefined.
|
a valid exception reference, and the result stored at retval is undefined.
|
||||||
*/
|
*/
|
||||||
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
||||||
jobject instObj, const char *className, const char *methName,
|
jobject instObj, CachedJavaClass class,
|
||||||
const char *methSignature, ...);
|
const char *methName, const char *methSignature, ...);
|
||||||
|
|
||||||
jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className,
|
/**
|
||||||
const char *ctorSignature, ...);
|
* findClassAndInvokeMethod: Same as invokeMethod, but it calls FindClass on
|
||||||
|
* the given className first and then calls invokeMethod. This method exists
|
||||||
|
* mainly for test infrastructure, any production code should use
|
||||||
|
* invokeMethod. Calling FindClass repeatedly can introduce performance
|
||||||
|
* overhead, so users should prefer invokeMethod and supply a CachedJavaClass.
|
||||||
|
*/
|
||||||
|
jthrowable findClassAndInvokeMethod(JNIEnv *env, jvalue *retval,
|
||||||
|
MethType methType, jobject instObj, const char *className,
|
||||||
|
const char *methName, const char *methSignature, ...);
|
||||||
|
|
||||||
jthrowable methodIdFromClass(const char *className, const char *methName,
|
jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out,
|
||||||
const char *methSignature, MethType methType,
|
const char *className, const char *ctorSignature, ...);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Same as constructNewObjectOfClass but it takes in a CachedJavaClass
|
||||||
|
* rather than a className. This avoids an extra call to FindClass.
|
||||||
|
*/
|
||||||
|
jthrowable constructNewObjectOfCachedClass(JNIEnv *env, jobject *out,
|
||||||
|
CachedJavaClass cachedJavaClass, const char *ctorSignature, ...);
|
||||||
|
|
||||||
|
jthrowable methodIdFromClass(jclass cls, const char *className,
|
||||||
|
const char *methName, const char *methSignature, MethType methType,
|
||||||
JNIEnv *env, jmethodID *out);
|
JNIEnv *env, jmethodID *out);
|
||||||
|
|
||||||
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out);
|
|
||||||
|
|
||||||
/** classNameOfObject: Get an object's class name.
|
/** classNameOfObject: Get an object's class name.
|
||||||
* @param jobj: The object.
|
* @param jobj: The object.
|
||||||
* @param env: The JNIEnv pointer.
|
* @param env: The JNIEnv pointer.
|
||||||
|
@ -30,12 +30,12 @@
|
|||||||
|
|
||||||
#include "platform.h"
|
#include "platform.h"
|
||||||
|
|
||||||
/** Mutex protecting the class reference hash table. */
|
|
||||||
extern mutex hdfsHashMutex;
|
|
||||||
|
|
||||||
/** Mutex protecting singleton JVM instance. */
|
/** Mutex protecting singleton JVM instance. */
|
||||||
extern mutex jvmMutex;
|
extern mutex jvmMutex;
|
||||||
|
|
||||||
|
/** Mutex protecting initialization of jclasses in jclasses.h. */
|
||||||
|
extern mutex jclassInitMutex;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Locks a mutex.
|
* Locks a mutex.
|
||||||
*
|
*
|
||||||
|
@ -21,8 +21,8 @@
|
|||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
|
|
||||||
mutex jvmMutex;
|
mutex jvmMutex;
|
||||||
|
mutex jclassInitMutex = PTHREAD_MUTEX_INITIALIZER;
|
||||||
pthread_mutexattr_t jvmMutexAttr;
|
pthread_mutexattr_t jvmMutexAttr;
|
||||||
|
|
||||||
__attribute__((constructor)) static void init() {
|
__attribute__((constructor)) static void init() {
|
||||||
|
@ -97,7 +97,7 @@ static void get_current_thread_id(JNIEnv* env, char* id, int max) {
|
|||||||
jthrowable jthr = NULL;
|
jthrowable jthr = NULL;
|
||||||
const char *thr_name_str;
|
const char *thr_name_str;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, "java/lang/Thread",
|
jthr = findClassAndInvokeMethod(env, &jVal, STATIC, NULL, "java/lang/Thread",
|
||||||
"currentThread", "()Ljava/lang/Thread;");
|
"currentThread", "()Ljava/lang/Thread;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
snprintf(id, max, "%s", UNKNOWN);
|
snprintf(id, max, "%s", UNKNOWN);
|
||||||
@ -107,8 +107,8 @@ static void get_current_thread_id(JNIEnv* env, char* id, int max) {
|
|||||||
}
|
}
|
||||||
thr = jVal.l;
|
thr = jVal.l;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, thr, "java/lang/Thread",
|
jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, thr,
|
||||||
"getId", "()J");
|
"java/lang/Thread", "getId", "()J");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
snprintf(id, max, "%s", UNKNOWN);
|
snprintf(id, max, "%s", UNKNOWN);
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
@ -117,8 +117,8 @@ static void get_current_thread_id(JNIEnv* env, char* id, int max) {
|
|||||||
}
|
}
|
||||||
thr_id = jVal.j;
|
thr_id = jVal.j;
|
||||||
|
|
||||||
jthr = invokeMethod(env, &jVal, INSTANCE, thr, "java/lang/Thread",
|
jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, thr,
|
||||||
"toString", "()Ljava/lang/String;");
|
"java/lang/Thread", "toString", "()Ljava/lang/String;");
|
||||||
if (jthr) {
|
if (jthr) {
|
||||||
snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
|
snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
|
||||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||||
|
@ -20,8 +20,8 @@
|
|||||||
|
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
|
|
||||||
mutex hdfsHashMutex;
|
|
||||||
mutex jvmMutex;
|
mutex jvmMutex;
|
||||||
|
mutex jclassInitMutex;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unfortunately, there is no simple static initializer for a critical section.
|
* Unfortunately, there is no simple static initializer for a critical section.
|
||||||
@ -34,8 +34,8 @@ mutex jvmMutex;
|
|||||||
* http://msdn.microsoft.com/en-us/library/bb918180.aspx
|
* http://msdn.microsoft.com/en-us/library/bb918180.aspx
|
||||||
*/
|
*/
|
||||||
static void __cdecl initializeMutexes(void) {
|
static void __cdecl initializeMutexes(void) {
|
||||||
InitializeCriticalSection(&hdfsHashMutex);
|
|
||||||
InitializeCriticalSection(&jvmMutex);
|
InitializeCriticalSection(&jvmMutex);
|
||||||
|
InitializeCriticalSection(&jclassInitMutex);
|
||||||
}
|
}
|
||||||
#pragma section(".CRT$XCU", read)
|
#pragma section(".CRT$XCU", read)
|
||||||
__declspec(allocate(".CRT$XCU"))
|
__declspec(allocate(".CRT$XCU"))
|
||||||
|
Loading…
Reference in New Issue
Block a user