HDFS-16445. Make HDFS count, mkdir, rm cross platform (#3945)

This commit is contained in:
Gautham B A 2022-02-02 11:12:17 +05:30 committed by GitHub
parent ec2fd01333
commit ed44662968
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 1484 additions and 287 deletions

View File

@ -32,6 +32,9 @@ add_executable(hdfs_tool_tests
hdfs-du-mock.cc
hdfs-copy-to-local-mock.cc
hdfs-move-to-local-mock.cc
hdfs-count-mock.cc
hdfs-mkdir-mock.cc
hdfs-rm-mock.cc
main.cc)
target_include_directories(hdfs_tool_tests PRIVATE
../tools
@ -48,6 +51,9 @@ target_include_directories(hdfs_tool_tests PRIVATE
../../tools/hdfs-chmod
../../tools/hdfs-copy-to-local
../../tools/hdfs-move-to-local
../../tools/hdfs-count
../../tools/hdfs-mkdir
../../tools/hdfs-rm
../../tools/hdfs-cat)
target_link_libraries(hdfs_tool_tests PRIVATE
gmock_main
@ -63,5 +69,8 @@ target_link_libraries(hdfs_tool_tests PRIVATE
hdfs_chmod_lib
hdfs_copyToLocal_lib
hdfs_moveToLocal_lib
hdfs_count_lib
hdfs_mkdir_lib
hdfs_rm_lib
hdfs_cat_lib)
add_test(hdfs_tool_tests hdfs_tool_tests)

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "hdfs-count-mock.h"
#include "hdfs-tool-tests.h"
namespace hdfs::tools::test {
CountMock::~CountMock() = default;
void CountMock::SetExpectations(
std::function<std::unique_ptr<CountMock>()> test_case,
const std::vector<std::string> &args) const {
// Get the pointer to the function that defines the test case
const auto test_case_func =
test_case.target<std::unique_ptr<CountMock> (*)()>();
ASSERT_NE(test_case_func, nullptr);
// Set the expected method calls and their corresponding arguments for each
// test case
if (*test_case_func == &CallHelp<CountMock>) {
EXPECT_CALL(*this, HandleHelp()).Times(1).WillOnce(testing::Return(true));
return;
}
if (*test_case_func == &PassAPath<CountMock>) {
const auto arg1 = args[0];
EXPECT_CALL(*this, HandlePath(false, arg1))
.Times(1)
.WillOnce(testing::Return(true));
}
if (*test_case_func == &PassQOptAndPath<CountMock>) {
const auto arg1 = args[0];
const auto arg2 = args[1];
EXPECT_CALL(*this, HandlePath(true, arg2))
.Times(1)
.WillOnce(testing::Return(true));
}
}
} // namespace hdfs::tools::test

View File

@ -0,0 +1,68 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFSPP_TOOLS_HDFS_COUNT_MOCK
#define LIBHDFSPP_TOOLS_HDFS_COUNT_MOCK
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "hdfs-count.h"
namespace hdfs::tools::test {
/**
* {@class CountMock} is an {@class Count} whereby it mocks the
* HandleHelp and HandlePath methods for testing their functionality.
*/
class CountMock : public hdfs::tools::Count {
public:
/**
* {@inheritdoc}
*/
CountMock(const int argc, char **argv) : Count(argc, argv) {}
// Abiding to the Rule of 5
CountMock(const CountMock &) = delete;
CountMock(CountMock &&) = delete;
CountMock &operator=(const CountMock &) = delete;
CountMock &operator=(CountMock &&) = delete;
~CountMock() override;
/**
* Defines the methods and the corresponding arguments that are expected
* to be called on this instance of {@link HdfsTool} for the given test case.
*
* @param test_case An {@link std::function} object that points to the
* function defining the test case
* @param args The arguments that are passed to this test case
*/
void SetExpectations(std::function<std::unique_ptr<CountMock>()> test_case,
const std::vector<std::string> &args = {}) const;
MOCK_METHOD(bool, HandleHelp, (), (const, override));
MOCK_METHOD(bool, HandlePath, (const bool, const std::string &),
(const, override));
};
} // namespace hdfs::tools::test
#endif

View File

@ -0,0 +1,83 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "hdfs-mkdir-mock.h"
#include "hdfs-tool-tests.h"
namespace hdfs::tools::test {
MkdirMock::~MkdirMock() = default;
void MkdirMock::SetExpectations(
std::function<std::unique_ptr<MkdirMock>()> test_case,
const std::vector<std::string> &args) const {
// Get the pointer to the function that defines the test case
const auto test_case_func =
test_case.target<std::unique_ptr<MkdirMock> (*)()>();
ASSERT_NE(test_case_func, nullptr);
// Set the expected method calls and their corresponding arguments for each
// test case
if (*test_case_func == &CallHelp<MkdirMock>) {
EXPECT_CALL(*this, HandleHelp()).Times(1).WillOnce(testing::Return(true));
return;
}
if (*test_case_func == &PassAPath<MkdirMock>) {
const auto arg1 = args[0];
const std::optional<std::string> permissions = std::nullopt;
EXPECT_CALL(*this, HandlePath(false, permissions, arg1))
.Times(1)
.WillOnce(testing::Return(true));
}
if (*test_case_func == &PassPOptAndPath<MkdirMock>) {
const auto arg1 = args[1];
const std::optional<std::string> permissions = std::nullopt;
EXPECT_CALL(*this, HandlePath(true, permissions, arg1))
.Times(1)
.WillOnce(testing::Return(true));
}
if (*test_case_func == &PassMOptPermissionsAndAPath<MkdirMock>) {
const auto arg1 = args[1];
const auto arg2 = args[2];
const auto permissions = std::optional(arg1);
EXPECT_CALL(*this, HandlePath(false, permissions, arg2))
.Times(1)
.WillOnce(testing::Return(true));
}
if (*test_case_func == &PassMPOptsPermissionsAndAPath<MkdirMock>) {
const auto arg1 = args[1];
const auto arg2 = args[3];
const auto permissions = std::optional(arg1);
EXPECT_CALL(*this, HandlePath(true, permissions, arg2))
.Times(1)
.WillOnce(testing::Return(true));
}
}
} // namespace hdfs::tools::test

View File

@ -0,0 +1,70 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFSPP_TOOLS_HDFS_MKDIR_MOCK
#define LIBHDFSPP_TOOLS_HDFS_MKDIR_MOCK
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "hdfs-mkdir.h"
namespace hdfs::tools::test {
/**
* {@class MkdirMock} is an {@class Mkdir} whereby it mocks the
* HandleHelp and HandlePath methods for testing their functionality.
*/
class MkdirMock : public hdfs::tools::Mkdir {
public:
/**
* {@inheritdoc}
*/
MkdirMock(const int argc, char **argv) : Mkdir(argc, argv) {}
// Abiding to the Rule of 5
MkdirMock(const MkdirMock &) = delete;
MkdirMock(MkdirMock &&) = delete;
MkdirMock &operator=(const MkdirMock &) = delete;
MkdirMock &operator=(MkdirMock &&) = delete;
~MkdirMock() override;
/**
* Defines the methods and the corresponding arguments that are expected
* to be called on this instance of {@link HdfsTool} for the given test case.
*
* @param test_case An {@link std::function} object that points to the
* function defining the test case
* @param args The arguments that are passed to this test case
*/
void SetExpectations(std::function<std::unique_ptr<MkdirMock>()> test_case,
const std::vector<std::string> &args = {}) const;
MOCK_METHOD(bool, HandleHelp, (), (const, override));
MOCK_METHOD(bool, HandlePath,
(bool, const std::optional<std::string> &, const std::string &),
(const, override));
};
} // namespace hdfs::tools::test
#endif

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "hdfs-rm-mock.h"
#include "hdfs-tool-tests.h"
namespace hdfs::tools::test {
RmMock::~RmMock() = default;
void RmMock::SetExpectations(std::function<std::unique_ptr<RmMock>()> test_case,
const std::vector<std::string> &args) const {
// Get the pointer to the function that defines the test case
const auto test_case_func = test_case.target<std::unique_ptr<RmMock> (*)()>();
ASSERT_NE(test_case_func, nullptr);
// Set the expected method calls and their corresponding arguments for each
// test case
if (*test_case_func == &CallHelp<RmMock>) {
EXPECT_CALL(*this, HandleHelp()).Times(1).WillOnce(testing::Return(true));
return;
}
if (*test_case_func == &PassAPath<RmMock>) {
const auto arg1 = args[0];
EXPECT_CALL(*this, HandlePath(false, arg1))
.Times(1)
.WillOnce(testing::Return(true));
}
if (*test_case_func == &PassRecursivePath<RmMock>) {
const auto arg1 = args[1];
EXPECT_CALL(*this, HandlePath(true, arg1))
.Times(1)
.WillOnce(testing::Return(true));
}
}
} // namespace hdfs::tools::test

View File

@ -0,0 +1,68 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFSPP_TOOLS_HDFS_RM_MOCK
#define LIBHDFSPP_TOOLS_HDFS_RM_MOCK
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "hdfs-rm.h"
namespace hdfs::tools::test {
/**
* {@class RmMock} is an {@class Rm} whereby it mocks the
* HandleHelp and HandlePath methods for testing their functionality.
*/
class RmMock : public hdfs::tools::Rm {
public:
/**
* {@inheritdoc}
*/
RmMock(const int argc, char **argv) : Rm(argc, argv) {}
// Abiding to the Rule of 5
RmMock(const RmMock &) = delete;
RmMock(RmMock &&) = delete;
RmMock &operator=(const RmMock &) = delete;
RmMock &operator=(RmMock &&) = delete;
~RmMock() override;
/**
* Defines the methods and the corresponding arguments that are expected
* to be called on this instance of {@link HdfsTool} for the given test case.
*
* @param test_case An {@link std::function} object that points to the
* function defining the test case
* @param args The arguments that are passed to this test case
*/
void SetExpectations(std::function<std::unique_ptr<RmMock>()> test_case,
const std::vector<std::string> &args = {}) const;
MOCK_METHOD(bool, HandleHelp, (), (const, override));
MOCK_METHOD(bool, HandlePath, (const bool, const std::string &),
(const, override));
};
} // namespace hdfs::tools::test
#endif

View File

@ -25,13 +25,16 @@
#include "hdfs-chmod-mock.h"
#include "hdfs-chown-mock.h"
#include "hdfs-copy-to-local-mock.h"
#include "hdfs-count-mock.h"
#include "hdfs-create-snapshot-mock.h"
#include "hdfs-delete-snapshot-mock.h"
#include "hdfs-df-mock.h"
#include "hdfs-disallow-snapshot-mock.h"
#include "hdfs-du-mock.h"
#include "hdfs-mkdir-mock.h"
#include "hdfs-move-to-local-mock.h"
#include "hdfs-rename-snapshot-mock.h"
#include "hdfs-rm-mock.h"
#include "hdfs-tool-test-fixtures.h"
#include "hdfs-tool-tests.h"
@ -110,6 +113,27 @@ INSTANTIATE_TEST_SUITE_P(
testing::Values(CallHelp<hdfs::tools::test::MoveToLocalMock>,
Pass2Paths<hdfs::tools::test::MoveToLocalMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsCount, HdfsToolBasicTest,
testing::Values(CallHelp<hdfs::tools::test::CountMock>,
PassAPath<hdfs::tools::test::CountMock>,
PassQOptAndPath<hdfs::tools::test::CountMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsMkdir, HdfsToolBasicTest,
testing::Values(
CallHelp<hdfs::tools::test::MkdirMock>,
PassAPath<hdfs::tools::test::MkdirMock>,
PassPOptAndPath<hdfs::tools::test::MkdirMock>,
PassMOptPermissionsAndAPath<hdfs::tools::test::MkdirMock>,
PassMPOptsPermissionsAndAPath<hdfs::tools::test::MkdirMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsRm, HdfsToolBasicTest,
testing::Values(CallHelp<hdfs::tools::test::RmMock>,
PassAPath<hdfs::tools::test::RmMock>,
PassRecursivePath<hdfs::tools::test::RmMock>));
// Negative tests
INSTANTIATE_TEST_SUITE_P(
HdfsAllowSnapshot, HdfsToolNegativeTestThrows,
@ -153,6 +177,41 @@ INSTANTIATE_TEST_SUITE_P(
HdfsMoveToLocal, HdfsToolNegativeTestThrows,
testing::Values(Pass3Paths<hdfs::tools::test::MoveToLocalMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsCount, HdfsToolNegativeTestThrows,
testing::Values(Pass2Paths<hdfs::tools::test::CountMock>,
Pass3Paths<hdfs::tools::test::CountMock>,
PassNOptAndAPath<hdfs::tools::test::CountMock>,
PassRecursive<hdfs::tools::test::CountMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsMkdir, HdfsToolNegativeTestThrows,
testing::Values(Pass2Paths<hdfs::tools::test::MkdirMock>,
Pass3Paths<hdfs::tools::test::MkdirMock>,
PassNOptAndAPath<hdfs::tools::test::MkdirMock>,
PassRecursive<hdfs::tools::test::MkdirMock>,
PassMOpt<hdfs::tools::test::MkdirMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsRm, HdfsToolNegativeTestThrows,
testing::Values(Pass2Paths<hdfs::tools::test::RmMock>,
Pass3Paths<hdfs::tools::test::RmMock>,
PassNOptAndAPath<hdfs::tools::test::RmMock>,
PassRecursiveOwnerAndAPath<hdfs::tools::test::RmMock>,
PassMOpt<hdfs::tools::test::RmMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsRm, HdfsToolNegativeTestNoThrow,
testing::Values(PassRecursive<hdfs::tools::test::RmMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsMkdir, HdfsToolNegativeTestNoThrow,
testing::Values(PassPOpt<hdfs::tools::test::MkdirMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsCount, HdfsToolNegativeTestNoThrow,
testing::Values(PassQOpt<hdfs::tools::test::CountMock>));
INSTANTIATE_TEST_SUITE_P(
HdfsMoveToLocal, HdfsToolNegativeTestNoThrow,
testing::Values(PassAPath<hdfs::tools::test::MoveToLocalMock>));

View File

@ -177,4 +177,98 @@ template <class T> std::unique_ptr<T> PassRecursivePermissionsAndAPath() {
return hdfs_tool;
}
template <class T> std::unique_ptr<T> PassQOpt() {
constexpr auto argc = 2;
static std::string exe("hdfs_tool_name");
static std::string arg1("-q");
static char *argv[] = {exe.data(), arg1.data()};
auto hdfs_tool = std::make_unique<T>(argc, argv);
hdfs_tool->SetExpectations(PassQOpt<T>, {arg1});
return hdfs_tool;
}
template <class T> std::unique_ptr<T> PassQOptAndPath() {
constexpr auto argc = 3;
static std::string exe("hdfs_tool_name");
static std::string arg1("-q");
static std::string arg2("a/b/c");
static char *argv[] = {exe.data(), arg1.data(), arg2.data()};
auto hdfs_tool = std::make_unique<T>(argc, argv);
hdfs_tool->SetExpectations(PassQOptAndPath<T>, {arg1, arg2});
return hdfs_tool;
}
template <class T> std::unique_ptr<T> PassPOpt() {
constexpr auto argc = 2;
static std::string exe("hdfs_tool_name");
static std::string arg1("-p");
static char *argv[] = {exe.data(), arg1.data()};
auto hdfs_tool = std::make_unique<T>(argc, argv);
hdfs_tool->SetExpectations(PassPOpt<T>, {arg1});
return hdfs_tool;
}
template <class T> std::unique_ptr<T> PassMOpt() {
constexpr auto argc = 2;
static std::string exe("hdfs_tool_name");
static std::string arg1("-m");
static char *argv[] = {exe.data(), arg1.data()};
auto hdfs_tool = std::make_unique<T>(argc, argv);
hdfs_tool->SetExpectations(PassMOpt<T>, {arg1});
return hdfs_tool;
}
template <class T> std::unique_ptr<T> PassPOptAndPath() {
constexpr auto argc = 3;
static std::string exe("hdfs_tool_name");
static std::string arg1("-p");
static std::string arg2("a/b/c");
static char *argv[] = {exe.data(), arg1.data(), arg2.data()};
auto hdfs_tool = std::make_unique<T>(argc, argv);
hdfs_tool->SetExpectations(PassPOptAndPath<T>, {arg1, arg2});
return hdfs_tool;
}
template <class T> std::unique_ptr<T> PassMOptPermissionsAndAPath() {
constexpr auto argc = 4;
static std::string exe("hdfs_tool_name");
static std::string arg1("-m");
static std::string arg2("757");
static std::string arg3("g/h/i");
static char *argv[] = {exe.data(), arg1.data(), arg2.data(), arg3.data()};
auto hdfs_tool = std::make_unique<T>(argc, argv);
hdfs_tool->SetExpectations(PassMOptPermissionsAndAPath<T>,
{arg1, arg2, arg3});
return hdfs_tool;
}
template <class T> std::unique_ptr<T> PassMPOptsPermissionsAndAPath() {
constexpr auto argc = 5;
static std::string exe("hdfs_tool_name");
static std::string arg1("-m");
static std::string arg2("757");
static std::string arg3("-p");
static std::string arg4("g/h/i");
static char *argv[] = {exe.data(), arg1.data(), arg2.data(), arg3.data(),
arg4.data()};
auto hdfs_tool = std::make_unique<T>(argc, argv);
hdfs_tool->SetExpectations(PassMPOptsPermissionsAndAPath<T>,
{arg1, arg2, arg3, arg4});
return hdfs_tool;
}
#endif

View File

@ -44,11 +44,9 @@ add_subdirectory(hdfs-chmod)
add_executable(hdfs_find hdfs_find.cc)
target_link_libraries(hdfs_find tools_common hdfspp_static)
add_executable(hdfs_mkdir hdfs_mkdir.cc)
target_link_libraries(hdfs_mkdir tools_common hdfspp_static)
add_subdirectory(hdfs-mkdir)
add_executable(hdfs_rm hdfs_rm.cc)
target_link_libraries(hdfs_rm tools_common hdfspp_static)
add_subdirectory(hdfs-rm)
add_executable(hdfs_ls hdfs_ls.cc)
target_link_libraries(hdfs_ls tools_common hdfspp_static)
@ -56,8 +54,7 @@ target_link_libraries(hdfs_ls tools_common hdfspp_static)
add_executable(hdfs_stat hdfs_stat.cc)
target_link_libraries(hdfs_stat tools_common hdfspp_static)
add_executable(hdfs_count hdfs_count.cc)
target_link_libraries(hdfs_count tools_common hdfspp_static)
add_subdirectory(hdfs-count)
add_subdirectory(hdfs-df)

View File

@ -0,0 +1,27 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
add_library(hdfs_count_lib STATIC $<TARGET_OBJECTS:hdfs_tool_obj> hdfs-count.cc)
target_include_directories(hdfs_count_lib PRIVATE ../../tools hdfs-count ${Boost_INCLUDE_DIRS})
target_link_libraries(hdfs_count_lib PRIVATE Boost::boost Boost::program_options tools_common hdfspp_static)
add_executable(hdfs_count main.cc)
target_include_directories(hdfs_count PRIVATE ../../tools)
target_link_libraries(hdfs_count PRIVATE hdfs_count_lib)
install(TARGETS hdfs_count RUNTIME DESTINATION bin)

View File

@ -0,0 +1,125 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include "hdfs-count.h"
#include "tools_common.h"
namespace hdfs::tools {
Count::Count(const int argc, char **argv) : HdfsTool(argc, argv) {}
bool Count::Initialize() {
auto add_options = opt_desc_.add_options();
add_options(
"help,h",
"Count the number of directories, files and bytes under the given path");
add_options("show-quota,q", "Output additional columns before the rest: "
"QUOTA, SPACE_QUOTA, SPACE_CONSUMED");
add_options("path", po::value<std::string>(),
"The path to the file that needs to be count-ed");
// We allow only one argument to be passed to this tool. An exception is
// thrown if multiple arguments are passed.
pos_opt_desc_.add("path", 1);
po::store(po::command_line_parser(argc_, argv_)
.options(opt_desc_)
.positional(pos_opt_desc_)
.run(),
opt_val_);
po::notify(opt_val_);
return true;
}
std::string Count::GetDescription() const {
std::stringstream desc;
desc << "Usage: hdfs_count [OPTION] FILE" << std::endl
<< std::endl
<< "Count the number of directories, files and bytes under the path "
"that match the specified FILE pattern."
<< std::endl
<< "The output columns with -count are: DIR_COUNT, FILE_COUNT, "
"CONTENT_SIZE, PATHNAME"
<< std::endl
<< std::endl
<< " -q output additional columns before the rest: QUOTA, "
"SPACE_QUOTA, SPACE_CONSUMED"
<< std::endl
<< " -h display this help and exit" << std::endl
<< std::endl
<< "Examples:" << std::endl
<< "hdfs_count hdfs://localhost.localdomain:8020/dir" << std::endl
<< "hdfs_count -q /dir1/dir2" << std::endl;
return desc.str();
}
bool Count::Do() {
if (!Initialize()) {
std::cerr << "Unable to initialize HDFS count tool" << std::endl;
return false;
}
if (!ValidateConstraints()) {
std::cout << GetDescription();
return false;
}
if (opt_val_.count("help") > 0) {
return HandleHelp();
}
if (opt_val_.count("path") > 0) {
const auto path = opt_val_["path"].as<std::string>();
const auto show_quota = opt_val_.count("show-quota") > 0;
return HandlePath(show_quota, path);
}
return false;
}
bool Count::HandleHelp() const {
std::cout << GetDescription();
return true;
}
bool Count::HandlePath(const bool show_quota, const std::string &path) const {
// Building a URI object from the given uri_path
auto uri = hdfs::parse_path_or_exit(path);
const auto fs = hdfs::doConnect(uri, false);
if (fs == nullptr) {
std::cerr << "Could not connect the file system." << std::endl;
return false;
}
hdfs::ContentSummary content_summary;
const auto status = fs->GetContentSummary(uri.get_path(), content_summary);
if (!status.ok()) {
std::cerr << "Error: " << status.ToString() << std::endl;
return false;
}
std::cout << content_summary.str(show_quota) << std::endl;
return true;
}
} // namespace hdfs::tools

View File

@ -0,0 +1,94 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFSPP_TOOLS_HDFS_COUNT
#define LIBHDFSPP_TOOLS_HDFS_COUNT
#include <string>
#include <boost/program_options.hpp>
#include "hdfs-tool.h"
namespace hdfs::tools {
/**
* {@class Count} is an {@class HdfsTool} that counts the number of directories,
* files and bytes under the given path.
*/
class Count : public HdfsTool {
public:
/**
* {@inheritdoc}
*/
Count(int argc, char **argv);
// Abiding to the Rule of 5
Count(const Count &) = default;
Count(Count &&) = default;
Count &operator=(const Count &) = delete;
Count &operator=(Count &&) = delete;
~Count() override = default;
/**
* {@inheritdoc}
*/
[[nodiscard]] std::string GetDescription() const override;
/**
* {@inheritdoc}
*/
[[nodiscard]] bool Do() override;
protected:
/**
* {@inheritdoc}
*/
[[nodiscard]] bool Initialize() override;
/**
* {@inheritdoc}
*/
[[nodiscard]] bool ValidateConstraints() const override { return argc_ > 1; }
/**
* {@inheritdoc}
*/
[[nodiscard]] bool HandleHelp() const override;
/**
* Handle the path argument that's passed to this tool.
*
* @param show_quota Output additional columns before the rest: QUOTA,
* SPACE_QUOTA, SPACE_CONSUMED.
* @param path The path to the directory for which the files, directories and
* bytes need to be counted.
*
* @return A boolean indicating the result of this operation.
*/
[[nodiscard]] virtual bool HandlePath(bool show_quota,
const std::string &path) const;
private:
/**
* A boost data-structure containing the description of positional arguments
* passed to the command-line.
*/
po::positional_options_description pos_opt_desc_;
};
} // namespace hdfs::tools
#endif

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <exception>
#include <iostream>
#include <google/protobuf/stubs/common.h>
#include "hdfs-count.h"
int main(int argc, char *argv[]) {
const auto result = std::atexit([]() -> void {
// Clean up static data on exit and prevent valgrind memory leaks
google::protobuf::ShutdownProtobufLibrary();
});
if (result != 0) {
std::cerr << "Error: Unable to schedule clean-up tasks for HDFS count "
"tool, exiting"
<< std::endl;
std::exit(EXIT_FAILURE);
}
hdfs::tools::Count count(argc, argv);
auto success = false;
try {
success = count.Do();
} catch (const std::exception &e) {
std::cerr << "Error: " << e.what() << std::endl;
}
if (!success) {
std::exit(EXIT_FAILURE);
}
return 0;
}

View File

@ -0,0 +1,27 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
add_library(hdfs_mkdir_lib STATIC $<TARGET_OBJECTS:hdfs_tool_obj> hdfs-mkdir.cc)
target_include_directories(hdfs_mkdir_lib PRIVATE ../../tools hdfs-mkdir ${Boost_INCLUDE_DIRS})
target_link_libraries(hdfs_mkdir_lib PRIVATE Boost::boost Boost::program_options tools_common hdfspp_static)
add_executable(hdfs_mkdir main.cc)
target_include_directories(hdfs_mkdir PRIVATE ../../tools)
target_link_libraries(hdfs_mkdir PRIVATE hdfs_mkdir_lib)
install(TARGETS hdfs_mkdir RUNTIME DESTINATION bin)

View File

@ -0,0 +1,140 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include "hdfs-mkdir.h"
#include "tools_common.h"
namespace hdfs::tools {
Mkdir::Mkdir(const int argc, char **argv) : HdfsTool(argc, argv) {}
bool Mkdir::Initialize() {
auto add_options = opt_desc_.add_options();
add_options("help,h", "Create directory if it does not exist");
add_options("create-parents,p", "Create parent directories as needed");
add_options(
"mode,m", po::value<std::string>(),
"Set the permissions for the new directory (and newly created parents if "
"any). The permissions are specified in octal representation");
add_options("path", po::value<std::string>(),
"The path to the directory that needs to be created");
// We allow only one argument to be passed to this tool. An exception is
// thrown if multiple arguments are passed.
pos_opt_desc_.add("path", 1);
po::store(po::command_line_parser(argc_, argv_)
.options(opt_desc_)
.positional(pos_opt_desc_)
.run(),
opt_val_);
po::notify(opt_val_);
return true;
}
std::string Mkdir::GetDescription() const {
std::stringstream desc;
desc << "Usage: hdfs_mkdir [OPTION] DIRECTORY" << std::endl
<< std::endl
<< "Create the DIRECTORY(ies), if they do not already exist."
<< std::endl
<< std::endl
<< " -p make parent directories as needed" << std::endl
<< " -m MODE set file mode (octal permissions) for the new "
"DIRECTORY(ies)"
<< std::endl
<< " -h display this help and exit" << std::endl
<< std::endl
<< "Examples:" << std::endl
<< "hdfs_mkdir hdfs://localhost.localdomain:8020/dir1/dir2" << std::endl
<< "hdfs_mkdir -p /extant_dir/non_extant_dir/non_extant_dir/new_dir"
<< std::endl;
return desc.str();
}
bool Mkdir::Do() {
if (!Initialize()) {
std::cerr << "Unable to initialize HDFS mkdir tool" << std::endl;
return false;
}
if (!ValidateConstraints()) {
std::cout << GetDescription();
return false;
}
if (opt_val_.count("help") > 0) {
return HandleHelp();
}
if (opt_val_.count("path") > 0) {
const auto path = opt_val_["path"].as<std::string>();
const auto create_parents = opt_val_.count("create-parents") > 0;
const auto permissions =
opt_val_.count("mode") > 0
? std::optional(opt_val_["mode"].as<std::string>())
: std::nullopt;
return HandlePath(create_parents, permissions, path);
}
return false;
}
bool Mkdir::HandleHelp() const {
std::cout << GetDescription();
return true;
}
bool Mkdir::HandlePath(const bool create_parents,
const std::optional<std::string> &permissions,
const std::string &path) const {
// Building a URI object from the given uri_path
auto uri = hdfs::parse_path_or_exit(path);
const auto fs = hdfs::doConnect(uri, false);
if (fs == nullptr) {
std::cerr << "Could not connect the file system." << std::endl;
return false;
}
const auto status =
fs->Mkdirs(uri.get_path(), GetPermissions(permissions), create_parents);
if (!status.ok()) {
std::cerr << "Error: " << status.ToString() << std::endl;
return false;
}
return true;
}
uint16_t Mkdir::GetPermissions(const std::optional<std::string> &permissions) {
if (permissions) {
// TODO : Handle the error returned by std::strtol.
return static_cast<uint16_t>(
std::strtol(permissions.value().c_str(), nullptr, 8));
}
return hdfs::FileSystem::GetDefaultPermissionMask();
}
} // namespace hdfs::tools

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFSPP_TOOLS_HDFS_MKDIR
#define LIBHDFSPP_TOOLS_HDFS_MKDIR
#include <optional>
#include <string>
#include <boost/program_options.hpp>
#include "hdfs-tool.h"
namespace hdfs::tools {
/**
* {@class Mkdir} is an {@class HdfsTool} that creates directory if it does not
* exist.
*/
class Mkdir : public HdfsTool {
public:
/**
* {@inheritdoc}
*/
Mkdir(int argc, char **argv);
// Abiding to the Rule of 5
Mkdir(const Mkdir &) = default;
Mkdir(Mkdir &&) = default;
Mkdir &operator=(const Mkdir &) = delete;
Mkdir &operator=(Mkdir &&) = delete;
~Mkdir() override = default;
/**
* {@inheritdoc}
*/
[[nodiscard]] std::string GetDescription() const override;
/**
* {@inheritdoc}
*/
[[nodiscard]] bool Do() override;
protected:
/**
* {@inheritdoc}
*/
[[nodiscard]] bool Initialize() override;
/**
* {@inheritdoc}
*/
[[nodiscard]] bool ValidateConstraints() const override { return argc_ > 1; }
/**
* {@inheritdoc}
*/
[[nodiscard]] bool HandleHelp() const override;
/**
* Handle the path argument that's passed to this tool.
*
* @param create_parents Creates parent directories as needed if this boolean
* is set to true.
* @param permissions An octal representation of the permissions to be stamped
* to each directory that gets created.
* @param path The path in the filesystem where the directory must be created.
*
* @return A boolean indicating the result of this operation.
*/
[[nodiscard]] virtual bool
HandlePath(bool create_parents, const std::optional<std::string> &permissions,
const std::string &path) const;
/**
* @param permissions The permissions string to convert to octal value.
* @return The octal representation of the permissions supplied as parameter
* to this tool.
*/
[[nodiscard]] static uint16_t
GetPermissions(const std::optional<std::string> &permissions);
private:
/**
* A boost data-structure containing the description of positional arguments
* passed to the command-line.
*/
po::positional_options_description pos_opt_desc_;
};
} // namespace hdfs::tools
#endif

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <exception>
#include <iostream>
#include <google/protobuf/stubs/common.h>
#include "hdfs-mkdir.h"
int main(int argc, char *argv[]) {
const auto result = std::atexit([]() -> void {
// Clean up static data on exit and prevent valgrind memory leaks
google::protobuf::ShutdownProtobufLibrary();
});
if (result != 0) {
std::cerr << "Error: Unable to schedule clean-up tasks for HDFS mkdir "
"tool, exiting"
<< std::endl;
std::exit(EXIT_FAILURE);
}
hdfs::tools::Mkdir mkdir(argc, argv);
auto success = false;
try {
success = mkdir.Do();
} catch (const std::exception &e) {
std::cerr << "Error: " << e.what() << std::endl;
}
if (!success) {
std::exit(EXIT_FAILURE);
}
return 0;
}

View File

@ -0,0 +1,27 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
add_library(hdfs_rm_lib STATIC $<TARGET_OBJECTS:hdfs_tool_obj> hdfs-rm.cc)
target_include_directories(hdfs_rm_lib PRIVATE ../../tools hdfs-rm ${Boost_INCLUDE_DIRS})
target_link_libraries(hdfs_rm_lib PRIVATE Boost::boost Boost::program_options tools_common hdfspp_static)
add_executable(hdfs_rm main.cc)
target_include_directories(hdfs_rm PRIVATE ../../tools)
target_link_libraries(hdfs_rm PRIVATE hdfs_rm_lib)
install(TARGETS hdfs_rm RUNTIME DESTINATION bin)

View File

@ -0,0 +1,114 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include "hdfs-rm.h"
#include "tools_common.h"
namespace hdfs::tools {
Rm::Rm(const int argc, char **argv) : HdfsTool(argc, argv) {}
bool Rm::Initialize() {
auto add_options = opt_desc_.add_options();
add_options("help,h", "Remove/unlink the files or directories.");
add_options("recursive,R",
"Remove the directories and their contents recursively.");
add_options("path", po::value<std::string>(),
"The path to the file that needs to be removed.");
// We allow only one argument to be passed to this tool. An exception is
// thrown if multiple arguments are passed.
pos_opt_desc_.add("path", 1);
po::store(po::command_line_parser(argc_, argv_)
.options(opt_desc_)
.positional(pos_opt_desc_)
.run(),
opt_val_);
po::notify(opt_val_);
return true;
}
std::string Rm::GetDescription() const {
std::stringstream desc;
desc << "Usage: hdfs_rm [OPTION] FILE" << std::endl
<< std::endl
<< "Remove (unlink) the FILE(s) or directory(ies)." << std::endl
<< std::endl
<< " -R remove directories and their contents recursively"
<< std::endl
<< " -h display this help and exit" << std::endl
<< std::endl
<< "Examples:" << std::endl
<< "hdfs_rm hdfs://localhost.localdomain:8020/dir/file" << std::endl
<< "hdfs_rm -R /dir1/dir2" << std::endl;
return desc.str();
}
bool Rm::Do() {
if (!Initialize()) {
std::cerr << "Unable to initialize HDFS rm tool" << std::endl;
return false;
}
if (!ValidateConstraints()) {
std::cout << GetDescription();
return false;
}
if (opt_val_.count("help") > 0) {
return HandleHelp();
}
if (opt_val_.count("path") > 0) {
const auto path = opt_val_["path"].as<std::string>();
const auto recursive = opt_val_.count("recursive") > 0;
return HandlePath(recursive, path);
}
return false;
}
bool Rm::HandleHelp() const {
std::cout << GetDescription();
return true;
}
bool Rm::HandlePath(const bool recursive, const std::string &path) const {
// Building a URI object from the given uri_path
auto uri = hdfs::parse_path_or_exit(path);
const auto fs = hdfs::doConnect(uri, false);
if (fs == nullptr) {
std::cerr << "Could not connect the file system." << std::endl;
return false;
}
const auto status = fs->Delete(uri.get_path(), recursive);
if (!status.ok()) {
std::cerr << "Error: " << status.ToString() << std::endl;
return false;
}
return true;
}
} // namespace hdfs::tools

View File

@ -0,0 +1,92 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFSPP_TOOLS_HDFS_RM
#define LIBHDFSPP_TOOLS_HDFS_RM
#include <string>
#include <boost/program_options.hpp>
#include "hdfs-tool.h"
namespace hdfs::tools {
/**
* {@class Rm} is an {@class HdfsTool} that removes/unlinks the files or
* directories.
*/
class Rm : public HdfsTool {
public:
/**
* {@inheritdoc}
*/
Rm(int argc, char **argv);
// Abiding to the Rule of 5
Rm(const Rm &) = default;
Rm(Rm &&) = default;
Rm &operator=(const Rm &) = delete;
Rm &operator=(Rm &&) = delete;
~Rm() override = default;
/**
* {@inheritdoc}
*/
[[nodiscard]] std::string GetDescription() const override;
/**
* {@inheritdoc}
*/
[[nodiscard]] bool Do() override;
protected:
/**
* {@inheritdoc}
*/
[[nodiscard]] bool Initialize() override;
/**
* {@inheritdoc}
*/
[[nodiscard]] bool ValidateConstraints() const override { return argc_ > 1; }
/**
* {@inheritdoc}
*/
[[nodiscard]] bool HandleHelp() const override;
/**
* Handle the path argument that's passed to this tool.
*
* @param recursive Perform this operation recursively on the sub-directories.
* @param path The path to the file/directory that needs to be removed.
*
* @return A boolean indicating the result of this operation.
*/
[[nodiscard]] virtual bool HandlePath(bool recursive,
const std::string &path) const;
private:
/**
* A boost data-structure containing the description of positional arguments
* passed to the command-line.
*/
po::positional_options_description pos_opt_desc_;
};
} // namespace hdfs::tools
#endif

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <exception>
#include <iostream>
#include <google/protobuf/stubs/common.h>
#include "hdfs-rm.h"
int main(int argc, char *argv[]) {
const auto result = std::atexit([]() -> void {
// Clean up static data on exit and prevent valgrind memory leaks
google::protobuf::ShutdownProtobufLibrary();
});
if (result != 0) {
std::cerr << "Error: Unable to schedule clean-up tasks for HDFS rm "
"tool, exiting"
<< std::endl;
std::exit(EXIT_FAILURE);
}
hdfs::tools::Rm rm(argc, argv);
auto success = false;
try {
success = rm.Do();
} catch (const std::exception &e) {
std::cerr << "Error: " << e.what() << std::endl;
}
if (!success) {
std::exit(EXIT_FAILURE);
}
return 0;
}

View File

@ -1,93 +0,0 @@
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
#include <google/protobuf/stubs/common.h>
#include <unistd.h>
#include "tools_common.h"
void usage(){
std::cout << "Usage: hdfs_count [OPTION] FILE"
<< std::endl
<< std::endl << "Count the number of directories, files and bytes under the path that match the specified FILE pattern."
<< std::endl << "The output columns with -count are: DIR_COUNT, FILE_COUNT, CONTENT_SIZE, PATHNAME"
<< std::endl
<< std::endl << " -q output additional columns before the rest: QUOTA, SPACE_QUOTA, SPACE_CONSUMED"
<< std::endl << " -h display this help and exit"
<< std::endl
<< std::endl << "Examples:"
<< std::endl << "hdfs_count hdfs://localhost.localdomain:8020/dir"
<< std::endl << "hdfs_count -q /dir1/dir2"
<< std::endl;
}
int main(int argc, char *argv[]) {
//We should have at least 2 arguments
if (argc < 2) {
usage();
exit(EXIT_FAILURE);
}
bool quota = false;
int input;
//Using GetOpt to read in the values
opterr = 0;
while ((input = getopt(argc, argv, "qh")) != -1) {
switch (input)
{
case 'q':
quota = true;
break;
case 'h':
usage();
exit(EXIT_SUCCESS);
case '?':
if (isprint(optopt))
std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
else
std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
usage();
exit(EXIT_FAILURE);
default:
exit(EXIT_FAILURE);
}
}
std::string uri_path = argv[optind];
//Building a URI object from the given uri_path
hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, false);
if (!fs) {
std::cerr << "Could not connect the file system. " << std::endl;
exit(EXIT_FAILURE);
}
hdfs::ContentSummary content_summary;
hdfs::Status status = fs->GetContentSummary(uri.get_path(), content_summary);
if (!status.ok()) {
std::cerr << "Error: " << status.ToString() << std::endl;
exit(EXIT_FAILURE);
}
std::cout << content_summary.str(quota) << std::endl;
// Clean up static data and prevent valgrind memory leaks
google::protobuf::ShutdownProtobufLibrary();
return 0;
}

View File

@ -1,98 +0,0 @@
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
#include <google/protobuf/stubs/common.h>
#include <unistd.h>
#include "tools_common.h"
void usage(){
std::cout << "Usage: hdfs_mkdir [OPTION] DIRECTORY"
<< std::endl
<< std::endl << "Create the DIRECTORY(ies), if they do not already exist."
<< std::endl
<< std::endl << " -p make parent directories as needed"
<< std::endl << " -m MODE set file mode (octal permissions) for the new DIRECTORY(ies)"
<< std::endl << " -h display this help and exit"
<< std::endl
<< std::endl << "Examples:"
<< std::endl << "hdfs_mkdir hdfs://localhost.localdomain:8020/dir1/dir2"
<< std::endl << "hdfs_mkdir -p /extant_dir/non_extant_dir/non_extant_dir/new_dir"
<< std::endl;
}
int main(int argc, char *argv[]) {
//We should have at least 2 arguments
if (argc < 2) {
usage();
exit(EXIT_FAILURE);
}
bool create_parents = false;
uint16_t permissions = hdfs::FileSystem::GetDefaultPermissionMask();
int input;
//Using GetOpt to read in the values
opterr = 0;
while ((input = getopt(argc, argv, "pm:h")) != -1) {
switch (input)
{
case 'p':
create_parents = true;
break;
case 'h':
usage();
exit(EXIT_SUCCESS);
case 'm':
//Get octal permissions for the new DIRECTORY(ies)
permissions = strtol(optarg, NULL, 8);
break;
case '?':
if (optopt == 'm')
std::cerr << "Option -" << (char) optopt << " requires an argument." << std::endl;
else if (isprint(optopt))
std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
else
std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
usage();
exit(EXIT_FAILURE);
default:
exit(EXIT_FAILURE);
}
}
std::string uri_path = argv[optind];
//Building a URI object from the given uri_path
hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, false);
if (!fs) {
std::cerr << "Could not connect the file system. " << std::endl;
exit(EXIT_FAILURE);
}
hdfs::Status status = fs->Mkdirs(uri.get_path(), permissions, create_parents);
if (!status.ok()) {
std::cerr << "Error: " << status.ToString() << std::endl;
exit(EXIT_FAILURE);
}
// Clean up static data and prevent valgrind memory leaks
google::protobuf::ShutdownProtobufLibrary();
return 0;
}

View File

@ -1,90 +0,0 @@
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
#include <google/protobuf/stubs/common.h>
#include <unistd.h>
#include "tools_common.h"
void usage(){
std::cout << "Usage: hdfs_rm [OPTION] FILE"
<< std::endl
<< std::endl << "Remove (unlink) the FILE(s) or directory(ies)."
<< std::endl
<< std::endl << " -R remove directories and their contents recursively"
<< std::endl << " -h display this help and exit"
<< std::endl
<< std::endl << "Examples:"
<< std::endl << "hdfs_rm hdfs://localhost.localdomain:8020/dir/file"
<< std::endl << "hdfs_rm -R /dir1/dir2"
<< std::endl;
}
int main(int argc, char *argv[]) {
//We should have at least 2 arguments
if (argc < 2) {
usage();
exit(EXIT_FAILURE);
}
bool recursive = false;
int input;
//Using GetOpt to read in the values
opterr = 0;
while ((input = getopt(argc, argv, "Rh")) != -1) {
switch (input)
{
case 'R':
recursive = true;
break;
case 'h':
usage();
exit(EXIT_SUCCESS);
case '?':
if (isprint(optopt))
std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
else
std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
usage();
exit(EXIT_FAILURE);
default:
exit(EXIT_FAILURE);
}
}
std::string uri_path = argv[optind];
//Building a URI object from the given uri_path
hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, true);
if (!fs) {
std::cerr << "Could not connect the file system. " << std::endl;
exit(EXIT_FAILURE);
}
hdfs::Status status = fs->Delete(uri.get_path(), recursive);
if (!status.ok()) {
std::cerr << "Error: " << status.ToString() << std::endl;
exit(EXIT_FAILURE);
}
// Clean up static data and prevent valgrind memory leaks
google::protobuf::ShutdownProtobufLibrary();
return 0;
}