From cb74f39697b57ec1189073fe128ce5ed3e7d73f0 Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Wed, 24 Sep 2014 08:22:02 -0700 Subject: [PATCH] HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop-hdfs/src/CMakeLists.txt | 17 +++++++--- .../src/main/native/libhdfs/hdfs.c | 2 +- .../native/libhdfs/test/test_libhdfs_ops.c | 22 ++++++------- .../src/main/native/libhdfs/test/vecsum.c | 33 +++++++++++++++++-- .../native/libhdfs/test_libhdfs_threaded.c | 2 +- 6 files changed, 58 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index af6c135be8..4a52d448ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -935,6 +935,8 @@ Release 2.6.0 - UNRELEASED HDFS-7130. TestDataTransferKeepalive fails intermittently on Windows. (cnauroth) + HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt index 854988b9c5..227be45da5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt @@ -211,11 +211,18 @@ if (NOT WIN32) add_executable(test_libhdfs_vecsum main/native/libhdfs/test/vecsum.c ) - target_link_libraries(test_libhdfs_vecsum - hdfs - pthread - rt - ) + if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + target_link_libraries(test_libhdfs_vecsum + hdfs + pthread + ) + else (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + target_link_libraries(test_libhdfs_vecsum + hdfs + pthread + rt + ) + endif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") endif(NOT WIN32) IF(REQUIRE_LIBWEBHDFS) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c index ebdcad3e38..dc8f39d5ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c @@ -3215,7 +3215,7 @@ static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo) free(hdfsFileInfo->mName); free(hdfsFileInfo->mOwner); free(hdfsFileInfo->mGroup); - memset(hdfsFileInfo, 0, sizeof(hdfsFileInfo)); + memset(hdfsFileInfo, 0, sizeof(*hdfsFileInfo)); } void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c index a6e1a13abb..aa9441a0ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c @@ -122,11 +122,11 @@ int main(int argc, char **argv) { currentPos = -1; if ((currentPos = hdfsTell(fs, writeFile)) == -1) { fprintf(stderr, - "Failed to get current file position correctly! Got %ld!\n", + "Failed to get current file position correctly! Got %" PRId64 "!\n", currentPos); exit(-1); } - fprintf(stderr, "Current position: %ld\n", currentPos); + fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); if (hdfsFlush(fs, writeFile)) { fprintf(stderr, "Failed to 'flush' %s\n", writePath); @@ -177,11 +177,11 @@ int main(int argc, char **argv) { currentPos = -1; if((currentPos = hdfsTell(fs, readFile)) != seekPos) { fprintf(stderr, - "Failed to get current file position correctly! Got %ld!\n", + "Failed to get current file position correctly! Got %" PRId64 "!\n", currentPos); exit(-1); } - fprintf(stderr, "Current position: %ld\n", currentPos); + fprintf(stderr, "Current position: %" PRId64 "\n", currentPos); if (!hdfsFileUsesDirectRead(readFile)) { fprintf(stderr, "Direct read support incorrectly not detected " @@ -283,9 +283,9 @@ int main(int argc, char **argv) { fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!")); totalResult += (resp ? 0 : 1); - fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs)); - fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs)); - fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs)); + fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", hdfsGetDefaultBlockSize(fs)); + fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs)); + fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs)); fileInfo = NULL; if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) { @@ -293,8 +293,8 @@ int main(int argc, char **argv) { fprintf(stderr, "Name: %s, ", fileInfo->mName); fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind)); fprintf(stderr, "Replication: %d, ", fileInfo->mReplication); - fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize); - fprintf(stderr, "Size: %ld, ", fileInfo->mSize); + fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize); + fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize); fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); fprintf(stderr, "Owner: %s, ", fileInfo->mOwner); fprintf(stderr, "Group: %s, ", fileInfo->mGroup); @@ -312,8 +312,8 @@ int main(int argc, char **argv) { fprintf(stderr, "Name: %s, ", fileList[i].mName); fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind); fprintf(stderr, "Replication: %d, ", fileList[i].mReplication); - fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize); - fprintf(stderr, "Size: %ld, ", fileList[i].mSize); + fprintf(stderr, "BlockSize: %" PRId64 ", ", fileList[i].mBlockSize); + fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize); fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod)); fprintf(stderr, "Owner: %s, ", fileList[i].mOwner); fprintf(stderr, "Group: %s, ", fileList[i].mGroup); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c index fd18c9db5e..80a64b4f73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c @@ -29,6 +29,12 @@ #include #include +#ifdef __MACH__ // OS X does not have clock_gettime +#include +#include +#include +#endif + #include "config.h" #include "hdfs.h" @@ -49,6 +55,29 @@ struct stopwatch { struct timespec stop; }; + +#ifdef __MACH__ +static int clock_gettime_mono(struct timespec * ts) { + static mach_timebase_info_data_t tb; + static uint64_t timestart = 0; + uint64_t t = 0; + if (timestart == 0) { + mach_timebase_info(&tb); + timestart = mach_absolute_time(); + } + t = mach_absolute_time() - timestart; + t *= tb.numer; + t /= tb.denom; + ts->tv_sec = t / 1000000000ULL; + ts->tv_nsec = t - (ts->tv_sec * 1000000000ULL); + return 0; +} +#else +static int clock_gettime_mono(struct timespec * ts) { + return clock_gettime(CLOCK_MONOTONIC, ts); +} +#endif + static struct stopwatch *stopwatch_create(void) { struct stopwatch *watch; @@ -58,7 +87,7 @@ static struct stopwatch *stopwatch_create(void) fprintf(stderr, "failed to allocate memory for stopwatch\n"); goto error; } - if (clock_gettime(CLOCK_MONOTONIC, &watch->start)) { + if (clock_gettime_mono(&watch->start)) { int err = errno; fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with " "error %d (%s)\n", err, strerror(err)); @@ -76,7 +105,7 @@ static void stopwatch_stop(struct stopwatch *watch, { double elapsed, rate; - if (clock_gettime(CLOCK_MONOTONIC, &watch->stop)) { + if (clock_gettime_mono(&watch->stop)) { int err = errno; fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with " "error %d (%s)\n", err, strerror(err)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c index 64c1a8f863..016f0b19dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c @@ -84,7 +84,7 @@ static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path) { - uint64_t blockSize; + int64_t blockSize; int ret; blockSize = hdfsGetDefaultBlockSize(fs);