Compare commits
4 Commits
f931ede86b
...
eb1e30395b
Author | SHA1 | Date | |
---|---|---|---|
|
eb1e30395b | ||
|
0b3755347c | ||
|
d1ce965645 | ||
|
09b348753f |
@ -32,13 +32,23 @@ include_directories(
|
|||||||
../libhdfspp/lib
|
../libhdfspp/lib
|
||||||
)
|
)
|
||||||
|
|
||||||
hadoop_add_dual_library(hdfs
|
set(HDFS_SOURCES
|
||||||
exception.c
|
exception.c
|
||||||
jni_helper.c
|
jni_helper.c
|
||||||
hdfs.c
|
hdfs.c
|
||||||
jclasses.c
|
jclasses.c
|
||||||
${OS_DIR}/mutexes.c
|
${OS_DIR}/mutexes.c
|
||||||
${OS_DIR}/thread_local_storage.c
|
${OS_DIR}/thread_local_storage.c
|
||||||
|
)
|
||||||
|
# We want to create an object library for hdfs
|
||||||
|
# so that we can reuse it for the targets
|
||||||
|
# (like get_jni_test), where we don't wish to
|
||||||
|
# link to hdfs's publicly linked libraries
|
||||||
|
# (like jvm)
|
||||||
|
add_library(hdfs_obj OBJECT ${HDFS_SOURCES})
|
||||||
|
set_target_properties(hdfs_obj PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
hadoop_add_dual_library(hdfs
|
||||||
|
$<TARGET_OBJECTS:hdfs_obj>
|
||||||
$<TARGET_OBJECTS:x_platform_obj>
|
$<TARGET_OBJECTS:x_platform_obj>
|
||||||
$<TARGET_OBJECTS:x_platform_obj_c_api>
|
$<TARGET_OBJECTS:x_platform_obj_c_api>
|
||||||
)
|
)
|
||||||
|
@ -74,8 +74,19 @@ add_executable(uri_test uri_test.cc)
|
|||||||
target_link_libraries(uri_test common gmock_main ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(uri_test common gmock_main ${CMAKE_THREAD_LIBS_INIT})
|
||||||
add_memcheck_test(uri uri_test)
|
add_memcheck_test(uri uri_test)
|
||||||
|
|
||||||
|
# We want to link to all the libraries of hdfs_static library,
|
||||||
|
# except jvm.lib since we want to override some of the functions
|
||||||
|
# provided by jvm.lib.
|
||||||
|
get_target_property(HDFS_STATIC_LIBS_NO_JVM hdfs_static LINK_LIBRARIES)
|
||||||
|
list(REMOVE_ITEM HDFS_STATIC_LIBS_NO_JVM ${JAVA_JVM_LIBRARY})
|
||||||
add_executable(get_jni_test libhdfs_getjni_test.cc)
|
add_executable(get_jni_test libhdfs_getjni_test.cc)
|
||||||
target_link_libraries(get_jni_test gmock_main hdfs_static ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(get_jni_test
|
||||||
|
gmock_main
|
||||||
|
$<TARGET_OBJECTS:hdfs_obj>
|
||||||
|
$<TARGET_OBJECTS:x_platform_obj>
|
||||||
|
$<TARGET_OBJECTS:x_platform_obj_c_api>
|
||||||
|
${HDFS_STATIC_LIBS_NO_JVM}
|
||||||
|
${CMAKE_THREAD_LIBS_INIT})
|
||||||
add_memcheck_test(get_jni get_jni_test)
|
add_memcheck_test(get_jni get_jni_test)
|
||||||
|
|
||||||
add_executable(remote_block_reader_test remote_block_reader_test.cc)
|
add_executable(remote_block_reader_test remote_block_reader_test.cc)
|
||||||
|
@ -20,13 +20,26 @@
|
|||||||
#include <hdfs/hdfs.h>
|
#include <hdfs/hdfs.h>
|
||||||
#include <jni.h>
|
#include <jni.h>
|
||||||
|
|
||||||
|
#ifdef WIN32
|
||||||
|
#define DECLSPEC
|
||||||
|
#else
|
||||||
|
// Windows cribs when this is declared in the function definition,
|
||||||
|
// However, Linux needs it.
|
||||||
|
#define DECLSPEC _JNI_IMPORT_OR_EXPORT_
|
||||||
|
#endif
|
||||||
|
|
||||||
// hook the jvm runtime function. expect always failure
|
// hook the jvm runtime function. expect always failure
|
||||||
_JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void*) {
|
DECLSPEC jint JNICALL JNI_GetDefaultJavaVMInitArgs(void*) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// hook the jvm runtime function. expect always failure
|
// hook the jvm runtime function. expect always failure
|
||||||
_JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM**, void**, void*) {
|
DECLSPEC jint JNICALL JNI_CreateJavaVM(JavaVM**, void**, void*) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// hook the jvm runtime function. expect always failure
|
||||||
|
DECLSPEC jint JNICALL JNI_GetCreatedJavaVMs(JavaVM**, jsize, jsize*) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
@ -476,6 +477,7 @@ private String getNodesImpl(final DatanodeReportType type) {
|
|||||||
innerinfo.put("infoSecureAddr", node.getInfoSecureAddr());
|
innerinfo.put("infoSecureAddr", node.getInfoSecureAddr());
|
||||||
innerinfo.put("xferaddr", node.getXferAddr());
|
innerinfo.put("xferaddr", node.getXferAddr());
|
||||||
innerinfo.put("location", node.getNetworkLocation());
|
innerinfo.put("location", node.getNetworkLocation());
|
||||||
|
innerinfo.put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse(""));
|
||||||
innerinfo.put("lastContact", getLastContact(node));
|
innerinfo.put("lastContact", getLastContact(node));
|
||||||
innerinfo.put("usedSpace", node.getDfsUsed());
|
innerinfo.put("usedSpace", node.getDfsUsed());
|
||||||
innerinfo.put("adminState", node.getAdminState().toString());
|
innerinfo.put("adminState", node.getAdminState().toString());
|
||||||
@ -492,6 +494,7 @@ private String getNodesImpl(final DatanodeReportType type) {
|
|||||||
innerinfo.put("volfails", -1); // node.getVolumeFailures()
|
innerinfo.put("volfails", -1); // node.getVolumeFailures()
|
||||||
innerinfo.put("blockPoolUsedPercentStdDev",
|
innerinfo.put("blockPoolUsedPercentStdDev",
|
||||||
Util.getBlockPoolUsedPercentStdDev(storageReports));
|
Util.getBlockPoolUsedPercentStdDev(storageReports));
|
||||||
|
innerinfo.put("lastBlockReport", getLastBlockReport(node));
|
||||||
info.put(node.getXferAddrWithHostname(),
|
info.put(node.getXferAddrWithHostname(),
|
||||||
Collections.unmodifiableMap(innerinfo));
|
Collections.unmodifiableMap(innerinfo));
|
||||||
}
|
}
|
||||||
@ -795,6 +798,10 @@ private long getLastContact(DatanodeInfo node) {
|
|||||||
return (now() - node.getLastUpdate()) / 1000;
|
return (now() - node.getLastUpdate()) / 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private long getLastBlockReport(DatanodeInfo node) {
|
||||||
|
return (now() - node.getLastBlockReportTime()) / 60000;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////
|
||||||
// NameNodeStatusMXBean
|
// NameNodeStatusMXBean
|
||||||
/////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////
|
||||||
|
@ -135,6 +135,8 @@
|
|||||||
import org.apache.hadoop.service.Service.STATE;
|
import org.apache.hadoop.service.Service.STATE;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.LambdaTestUtils;
|
import org.apache.hadoop.test.LambdaTestUtils;
|
||||||
|
|
||||||
|
import org.codehaus.jettison.json.JSONArray;
|
||||||
import org.codehaus.jettison.json.JSONException;
|
import org.codehaus.jettison.json.JSONException;
|
||||||
import org.codehaus.jettison.json.JSONObject;
|
import org.codehaus.jettison.json.JSONObject;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
@ -1880,6 +1882,22 @@ public void testNamenodeMetrics() throws Exception {
|
|||||||
JSONObject jsonObject = new JSONObject(jsonString0);
|
JSONObject jsonObject = new JSONObject(jsonString0);
|
||||||
assertEquals(NUM_SUBCLUSTERS * NUM_DNS, jsonObject.names().length());
|
assertEquals(NUM_SUBCLUSTERS * NUM_DNS, jsonObject.names().length());
|
||||||
|
|
||||||
|
JSONObject jsonObjectNn =
|
||||||
|
new JSONObject(cluster.getRandomNamenode().getNamenode().getNamesystem().getLiveNodes());
|
||||||
|
// DN report by NN and router should be the same
|
||||||
|
String randomDn = (String) jsonObjectNn.names().get(0);
|
||||||
|
JSONObject randomReportNn = jsonObjectNn.getJSONObject(randomDn);
|
||||||
|
JSONObject randomReportRouter = jsonObject.getJSONObject(randomDn);
|
||||||
|
JSONArray keys = randomReportNn.names();
|
||||||
|
for (int i = 0; i < keys.length(); i++) {
|
||||||
|
String key = keys.getString(i);
|
||||||
|
// Skip the 2 keys that always return -1
|
||||||
|
if (key.equals("blockScheduled") || key.equals("volfails")) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
assertEquals(randomReportRouter.get(key), randomReportNn.get(key));
|
||||||
|
}
|
||||||
|
|
||||||
// We should be caching this information
|
// We should be caching this information
|
||||||
String jsonString1 = metrics.getLiveNodes();
|
String jsonString1 = metrics.getLiveNodes();
|
||||||
assertEquals(jsonString0, jsonString1);
|
assertEquals(jsonString0, jsonString1);
|
||||||
|
@ -80,7 +80,7 @@ protected StringBuilder initialValue() {
|
|||||||
private static final BlockPlacementStatus ONE_RACK_PLACEMENT =
|
private static final BlockPlacementStatus ONE_RACK_PLACEMENT =
|
||||||
new BlockPlacementStatusDefault(1, 1, 1);
|
new BlockPlacementStatusDefault(1, 1, 1);
|
||||||
|
|
||||||
private enum NodeNotChosenReason {
|
protected enum NodeNotChosenReason {
|
||||||
NOT_IN_SERVICE("the node is not in service"),
|
NOT_IN_SERVICE("the node is not in service"),
|
||||||
NODE_STALE("the node is stale"),
|
NODE_STALE("the node is stale"),
|
||||||
NODE_TOO_BUSY("the node is too busy"),
|
NODE_TOO_BUSY("the node is too busy"),
|
||||||
@ -88,7 +88,8 @@ private enum NodeNotChosenReason {
|
|||||||
TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
|
TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
|
||||||
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"),
|
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"),
|
||||||
NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable"),
|
NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable"),
|
||||||
NODE_SLOW("the node is too slow");
|
NODE_SLOW("the node is too slow"),
|
||||||
|
NODE_NOT_CONFORM_TO_UD("the node doesn't conform to upgrade domain policy");
|
||||||
|
|
||||||
private final String text;
|
private final String text;
|
||||||
|
|
||||||
@ -980,7 +981,7 @@ private static void logNodeIsNotChosen(DatanodeDescriptor node,
|
|||||||
logNodeIsNotChosen(node, reason, null);
|
logNodeIsNotChosen(node, reason, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void logNodeIsNotChosen(DatanodeDescriptor node,
|
protected static void logNodeIsNotChosen(DatanodeDescriptor node,
|
||||||
NodeNotChosenReason reason, String reasonDetails) {
|
NodeNotChosenReason reason, String reasonDetails) {
|
||||||
assert reason != null;
|
assert reason != null;
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
@ -74,6 +74,9 @@ protected boolean isGoodDatanode(DatanodeDescriptor node,
|
|||||||
Set<String> upgradeDomains = getUpgradeDomains(results);
|
Set<String> upgradeDomains = getUpgradeDomains(results);
|
||||||
if (upgradeDomains.contains(node.getUpgradeDomain())) {
|
if (upgradeDomains.contains(node.getUpgradeDomain())) {
|
||||||
isGoodTarget = false;
|
isGoodTarget = false;
|
||||||
|
logNodeIsNotChosen(node, NodeNotChosenReason.NODE_NOT_CONFORM_TO_UD,
|
||||||
|
"(The node's upgrade domain: " + node.getUpgradeDomain() +
|
||||||
|
" is already chosen)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ public CopyFromLocalOperation(
|
|||||||
this.callbacks = callbacks;
|
this.callbacks = callbacks;
|
||||||
this.deleteSource = deleteSource;
|
this.deleteSource = deleteSource;
|
||||||
this.overwrite = overwrite;
|
this.overwrite = overwrite;
|
||||||
this.source = source;
|
this.source = source.toUri().getScheme() == null ? new Path("file://", source) : source;
|
||||||
this.destination = destination;
|
this.destination = destination;
|
||||||
|
|
||||||
// Capacity of 1 is a safe default for now since transfer manager can also
|
// Capacity of 1 is a safe default for now since transfer manager can also
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
package org.apache.hadoop.fs.s3a;
|
package org.apache.hadoop.fs.s3a;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
@ -107,4 +108,15 @@ public void testOnlyFromLocal() throws Throwable {
|
|||||||
intercept(IllegalArgumentException.class,
|
intercept(IllegalArgumentException.class,
|
||||||
() -> getFileSystem().copyFromLocalFile(true, true, dest, dest));
|
() -> getFileSystem().copyFromLocalFile(true, true, dest, dest));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCopyFromLocalWithNoFileScheme() throws IOException {
|
||||||
|
describe("Copying from local file with no file scheme to remote s3 destination");
|
||||||
|
File source = createTempFile("tempData");
|
||||||
|
Path dest = path(getMethodName());
|
||||||
|
|
||||||
|
Path sourcePathWithOutScheme = new Path(source.toURI().getPath());
|
||||||
|
assertNull(sourcePathWithOutScheme.toUri().getScheme());
|
||||||
|
getFileSystem().copyFromLocalFile(true, true, sourcePathWithOutScheme, dest);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user