HDFS-14267. Add test_libhdfs_ops to libhdfs tests, mark libhdfs_read/write.c as examples. Contributed by Sahil Takiar.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
Sahil Takiar 2019-02-20 11:36:37 -08:00 committed by Wei-Chiu Chuang
parent 1374f8f548
commit a30059bb61
8 changed files with 167 additions and 53 deletions

View File

@ -146,6 +146,7 @@ endif()
add_subdirectory(main/native/libhdfs) add_subdirectory(main/native/libhdfs)
add_subdirectory(main/native/libhdfs-tests) add_subdirectory(main/native/libhdfs-tests)
add_subdirectory(main/native/libhdfs-examples)
# Temporary fix to disable Libhdfs++ build on older systems that do not support thread_local # Temporary fix to disable Libhdfs++ build on older systems that do not support thread_local
include(CheckCXXSourceCompiles) include(CheckCXXSourceCompiles)

View File

@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs/include
${GENERATED_JAVAH}
${CMAKE_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs
${JNI_INCLUDE_DIRS}
${OS_DIR}
)
add_executable(hdfs_read libhdfs_read.c)
target_link_libraries(hdfs_read hdfs)
add_executable(hdfs_write libhdfs_write.c)
target_link_libraries(hdfs_write hdfs)

View File

@ -0,0 +1,24 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
The files in this directory are purely meant to provide additional examples for how to use libhdfs. They are compiled as
part of the build and are thus guaranteed to compile against the associated version of lidhdfs. However, no tests exists
for these examples so their functionality is not guaranteed.
The examples are written to run against a mini-dfs cluster. The script `test-libhdfs.sh` can setup a mini DFS cluster
that the examples can run against. Again, none of this is tested and is thus not guaranteed to work.

View File

@ -21,6 +21,11 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
/**
* An example of using libhdfs to read files. The usage of this file is as follows:
*
* Usage: hdfs_read <filename> <filesize> <buffersize>
*/
int main(int argc, char **argv) { int main(int argc, char **argv) {
hdfsFS fs; hdfsFS fs;
const char *rfile = argv[1]; const char *rfile = argv[1];

View File

@ -23,6 +23,11 @@
#include <stdlib.h> #include <stdlib.h>
#include <sys/types.h> #include <sys/types.h>
/**
* An example of using libhdfs to write files. The usage of this file is as follows:
*
* Usage: hdfs_write <filename> <filesize> <buffersize>
*/
int main(int argc, char **argv) { int main(int argc, char **argv) {
hdfsFS fs; hdfsFS fs;
const char *writeFileName = argv[1]; const char *writeFileName = argv[1];

View File

@ -16,8 +16,10 @@
* limitations under the License. * limitations under the License.
*/ */
#include "expect.h"
#include "hdfs/hdfs.h" #include "hdfs/hdfs.h"
#include "hdfs_test.h" #include "hdfs_test.h"
#include "native_mini_dfs.h"
#include "platform.h" #include "platform.h"
#include <inttypes.h> #include <inttypes.h>
@ -61,6 +63,17 @@ void permission_disp(short permissions, char *rtr) {
} }
} }
/**
* Shutdown and free the given mini cluster, and then exit with the provided exit_code. This method is meant to be
* called with a non-zero exit code, which is why we ignore the return status of calling MiniDFSCluster#shutdown since
* the process is going to fail anyway.
*/
void shutdown_and_exit(struct NativeMiniDfsCluster* cl, int exit_code) {
nmdShutdown(cl);
nmdFree(cl);
exit(exit_code);
}
int main(int argc, char **argv) { int main(int argc, char **argv) {
const char *writePath = "/tmp/testfile.txt"; const char *writePath = "/tmp/testfile.txt";
const char *fileContents = "Hello, World!"; const char *fileContents = "Hello, World!";
@ -88,16 +101,47 @@ int main(int argc, char **argv) {
short newPerm = 0666; short newPerm = 0666;
tTime newMtime, newAtime; tTime newMtime, newAtime;
fs = hdfsConnectNewInstance("default", 0); // Create and start the mini cluster
struct NativeMiniDfsCluster* cl;
struct NativeMiniDfsConf conf = {
1, /* doFormat */
};
cl = nmdCreate(&conf);
EXPECT_NONNULL(cl);
EXPECT_ZERO(nmdWaitClusterUp(cl));
tPort port;
port = (tPort) nmdGetNameNodePort(cl);
// Create a hdfs connection to the mini cluster
struct hdfsBuilder *bld;
bld = hdfsNewBuilder();
EXPECT_NONNULL(bld);
hdfsBuilderSetForceNewInstance(bld);
hdfsBuilderSetNameNode(bld, "localhost");
hdfsBuilderSetNameNodePort(bld, port);
// The HDFS append tests require setting this property otherwise the tests fail with:
//
// IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being
// available to try. The current failed datanode replacement policy is DEFAULT, and a client may configure this
// via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
//
// It seems that when operating against a mini DFS cluster, some HDFS append tests require setting this property
// (for example, see TestFileAppend#testMultipleAppends)
hdfsBuilderConfSetStr(bld, "dfs.client.block.write.replace-datanode-on-failure.enable", "false");
fs = hdfsBuilderConnect(bld);
if(!fs) { if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1); shutdown_and_exit(cl, -1);
} }
lfs = hdfsConnectNewInstance(NULL, 0); lfs = hdfsConnectNewInstance(NULL, 0);
if(!lfs) { if(!lfs) {
fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n"); fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
exit(-1); shutdown_and_exit(cl, -1);
} }
{ {
@ -106,7 +150,7 @@ int main(int argc, char **argv) {
writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) { if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath); fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Opened %s for writing successfully...\n", writePath); fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
num_written_bytes = num_written_bytes =
@ -115,7 +159,7 @@ int main(int argc, char **argv) {
if (num_written_bytes != strlen(fileContents) + 1) { if (num_written_bytes != strlen(fileContents) + 1) {
fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n", fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
(int)(strlen(fileContents) + 1), (int)num_written_bytes); (int)(strlen(fileContents) + 1), (int)num_written_bytes);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
@ -124,19 +168,19 @@ int main(int argc, char **argv) {
fprintf(stderr, fprintf(stderr,
"Failed to get current file position correctly! Got %" PRId64 "!\n", "Failed to get current file position correctly! Got %" PRId64 "!\n",
currentPos); currentPos);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
if (hdfsFlush(fs, writeFile)) { if (hdfsFlush(fs, writeFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", writePath); fprintf(stderr, "Failed to 'flush' %s\n", writePath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Flushed %s successfully!\n", writePath); fprintf(stderr, "Flushed %s successfully!\n", writePath);
if (hdfsHFlush(fs, writeFile)) { if (hdfsHFlush(fs, writeFile)) {
fprintf(stderr, "Failed to 'hflush' %s\n", writePath); fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "HFlushed %s successfully!\n", writePath); fprintf(stderr, "HFlushed %s successfully!\n", writePath);
@ -150,20 +194,20 @@ int main(int argc, char **argv) {
if (exists) { if (exists) {
fprintf(stderr, "Failed to validate existence of %s\n", readPath); fprintf(stderr, "Failed to validate existence of %s\n", readPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0); readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
if (!readFile) { if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", readPath); fprintf(stderr, "Failed to open %s for reading!\n", readPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
if (!hdfsFileIsOpenForRead(readFile)) { if (!hdfsFileIsOpenForRead(readFile)) {
fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file " fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
"with O_RDONLY, and it did not show up as 'open for " "with O_RDONLY, and it did not show up as 'open for "
"read'\n"); "read'\n");
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile)); fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
@ -171,7 +215,7 @@ int main(int argc, char **argv) {
seekPos = 1; seekPos = 1;
if(hdfsSeek(fs, readFile, seekPos)) { if(hdfsSeek(fs, readFile, seekPos)) {
fprintf(stderr, "Failed to seek %s for reading!\n", readPath); fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
currentPos = -1; currentPos = -1;
@ -179,14 +223,14 @@ int main(int argc, char **argv) {
fprintf(stderr, fprintf(stderr,
"Failed to get current file position correctly! Got %" PRId64 "!\n", "Failed to get current file position correctly! Got %" PRId64 "!\n",
currentPos); currentPos);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
if (!hdfsFileUsesDirectRead(readFile)) { if (!hdfsFileUsesDirectRead(readFile)) {
fprintf(stderr, "Direct read support incorrectly not detected " fprintf(stderr, "Direct read support incorrectly not detected "
"for HDFS filesystem\n"); "for HDFS filesystem\n");
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Direct read support detected for HDFS\n"); fprintf(stderr, "Direct read support detected for HDFS\n");
@ -194,7 +238,7 @@ int main(int argc, char **argv) {
// Test the direct read path // Test the direct read path
if(hdfsSeek(fs, readFile, 0)) { if(hdfsSeek(fs, readFile, 0)) {
fprintf(stderr, "Failed to seek %s for reading!\n", readPath); fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
memset(buffer, 0, sizeof(buffer)); memset(buffer, 0, sizeof(buffer));
num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
@ -202,13 +246,13 @@ int main(int argc, char **argv) {
if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) { if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n", fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
fileContents, buffer, num_read_bytes); fileContents, buffer, num_read_bytes);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Read (direct) following %d bytes:\n%s\n", fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
num_read_bytes, buffer); num_read_bytes, buffer);
if (hdfsSeek(fs, readFile, 0L)) { if (hdfsSeek(fs, readFile, 0L)) {
fprintf(stderr, "Failed to seek to file start!\n"); fprintf(stderr, "Failed to seek to file start!\n");
exit(-1); shutdown_and_exit(cl, -1);
} }
// Disable the direct read path so that we really go through the slow // Disable the direct read path so that we really go through the slow
@ -233,7 +277,7 @@ int main(int argc, char **argv) {
localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!localFile) { if(!localFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath); fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1); shutdown_and_exit(cl, -1);
} }
num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents, num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
@ -245,7 +289,7 @@ int main(int argc, char **argv) {
if (hdfsFileUsesDirectRead(localFile)) { if (hdfsFileUsesDirectRead(localFile)) {
fprintf(stderr, "Direct read support incorrectly detected for local " fprintf(stderr, "Direct read support incorrectly detected for local "
"filesystem\n"); "filesystem\n");
exit(-1); shutdown_and_exit(cl, -1);
} }
hdfsCloseFile(lfs, localFile); hdfsCloseFile(lfs, localFile);
@ -425,7 +469,7 @@ int main(int argc, char **argv) {
appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0); appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
if(!appendFile) { if(!appendFile) {
fprintf(stderr, "Failed to open %s for writing!\n", appendPath); fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Opened %s for writing successfully...\n", appendPath); fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
@ -436,7 +480,7 @@ int main(int argc, char **argv) {
if (hdfsFlush(fs, appendFile)) { if (hdfsFlush(fs, appendFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", appendPath); fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Flushed %s successfully!\n", appendPath); fprintf(stderr, "Flushed %s successfully!\n", appendPath);
@ -446,7 +490,7 @@ int main(int argc, char **argv) {
appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0); appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
if(!appendFile) { if(!appendFile) {
fprintf(stderr, "Failed to open %s for writing!\n", appendPath); fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Opened %s for writing successfully...\n", appendPath); fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
@ -457,7 +501,7 @@ int main(int argc, char **argv) {
if (hdfsFlush(fs, appendFile)) { if (hdfsFlush(fs, appendFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", appendPath); fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Flushed %s successfully!\n", appendPath); fprintf(stderr, "Flushed %s successfully!\n", appendPath);
@ -472,7 +516,7 @@ int main(int argc, char **argv) {
readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0); readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
if (!readFile) { if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", appendPath); fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer)); num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
@ -496,16 +540,16 @@ int main(int argc, char **argv) {
// the actual fs user capabilities. Thus just create a file and read // the actual fs user capabilities. Thus just create a file and read
// the owner is correct. // the owner is correct.
fs = hdfsConnectAsUserNewInstance("default", 0, tuser); fs = hdfsConnectAsUserNewInstance("localhost", port, tuser);
if(!fs) { if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser); fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
exit(-1); shutdown_and_exit(cl, -1);
} }
userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0); userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!userFile) { if(!userFile) {
fprintf(stderr, "Failed to open %s for writing!\n", userPath); fprintf(stderr, "Failed to open %s for writing!\n", userPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Opened %s for writing successfully...\n", userPath); fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
@ -515,7 +559,7 @@ int main(int argc, char **argv) {
if (hdfsFlush(fs, userFile)) { if (hdfsFlush(fs, userFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", userPath); fprintf(stderr, "Failed to 'flush' %s\n", userPath);
exit(-1); shutdown_and_exit(cl, -1);
} }
fprintf(stderr, "Flushed %s successfully!\n", userPath); fprintf(stderr, "Flushed %s successfully!\n", userPath);
@ -528,6 +572,9 @@ int main(int argc, char **argv) {
totalResult += (hdfsDisconnect(fs) != 0); totalResult += (hdfsDisconnect(fs) != 0);
EXPECT_ZERO(nmdShutdown(cl));
nmdFree(cl);
if (totalResult != 0) { if (totalResult != 0) {
return -1; return -1;
} else { } else {

View File

@ -55,11 +55,9 @@ set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION}) SOVERSION ${LIBHDFS_VERSION})
build_libhdfs_test(test_libhdfs_ops hdfs_static test_libhdfs_ops.c) build_libhdfs_test(test_libhdfs_ops hdfs_static test_libhdfs_ops.c)
link_libhdfs_test(test_libhdfs_ops hdfs_static ${JAVA_JVM_LIBRARY}) link_libhdfs_test(test_libhdfs_ops hdfs_static native_mini_dfs ${JAVA_JVM_LIBRARY})
build_libhdfs_test(test_libhdfs_reads hdfs_static test_libhdfs_read.c) add_libhdfs_test(test_libhdfs_ops hdfs_static)
link_libhdfs_test(test_libhdfs_reads hdfs_static ${JAVA_JVM_LIBRARY})
build_libhdfs_test(test_libhdfs_write hdfs_static test_libhdfs_write.c)
link_libhdfs_test(test_libhdfs_write hdfs_static ${JAVA_JVM_LIBRARY})
build_libhdfs_test(test_libhdfs_threaded hdfs_static expect.c test_libhdfs_threaded.c ${OS_DIR}/thread.c) build_libhdfs_test(test_libhdfs_threaded hdfs_static expect.c test_libhdfs_threaded.c ${OS_DIR}/thread.c)
link_libhdfs_test(test_libhdfs_threaded hdfs_static native_mini_dfs) link_libhdfs_test(test_libhdfs_threaded hdfs_static native_mini_dfs)
add_libhdfs_test(test_libhdfs_threaded hdfs_static) add_libhdfs_test(test_libhdfs_threaded hdfs_static)