results = getResponse.getEntries();
+ MountTable existingEntry = null;
+ for (MountTable result : results) {
+ if (mount.equals(result.getSourcePath())) {
+ existingEntry = result;
+ break;
+ }
+ }
+ return existingEntry;
+ }
+
+ /**
+ * Disable write by making the mount point readonly.
+ *
+ * @param mount the mount point to set readonly.
+ * @param conf the configuration of the router.
+ */
+ static void disableWrite(String mount, Configuration conf)
+ throws IOException {
+ setMountReadOnly(mount, true, conf);
+ }
+
+ /**
+ * Enable write by cancelling the mount point readonly.
+ *
+ * @param mount the mount point to cancel readonly.
+ * @param conf the configuration of the router.
+ */
+ static void enableWrite(String mount, Configuration conf) throws IOException {
+ setMountReadOnly(mount, false, conf);
+ }
+
+ /**
+ * Enable or disable readonly of the mount point.
+ *
+ * @param mount the mount point.
+ * @param readOnly enable or disable readonly.
+ * @param conf the configuration of the router.
+ */
+ private static void setMountReadOnly(String mount, boolean readOnly,
+ Configuration conf) throws IOException {
+ String address = conf.getTrimmed(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
+ RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);
+ InetSocketAddress routerSocket = NetUtils.createSocketAddr(address);
+ RouterClient rClient = new RouterClient(routerSocket, conf);
+ try {
+ MountTableManager mountTable = rClient.getMountTableManager();
+
+ MountTable originalEntry = getMountEntry(mount, mountTable);
+ if (originalEntry == null) {
+ throw new IOException("Mount table " + mount + " doesn't exist");
+ } else {
+ originalEntry.setReadOnly(readOnly);
+ UpdateMountTableEntryRequest updateRequest =
+ UpdateMountTableEntryRequest.newInstance(originalEntry);
+ UpdateMountTableEntryResponse response =
+ mountTable.updateMountTableEntry(updateRequest);
+ if (!response.getStatus()) {
+ throw new IOException(
+ "Failed update mount table " + mount + " with readonly="
+ + readOnly);
+ }
+ rClient.getMountTableManager().refreshMountTableEntries(
+ RefreshMountTableEntriesRequest.newInstance());
+ }
+ } finally {
+ rClient.close();
+ }
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ Text.writeString(out, mount);
+ Text.writeString(out, dstPath);
+ Text.writeString(out, dstNs);
+ conf.write(out);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ mount = Text.readString(in);
+ dstPath = Text.readString(in);
+ dstNs = Text.readString(in);
+ conf = new Configuration(false);
+ conf.readFields(in);
+ }
+
+ @VisibleForTesting
+ String getMount() {
+ return mount;
+ }
+
+ @VisibleForTesting
+ String getDstPath() {
+ return dstPath;
+ }
+
+ @VisibleForTesting
+ String getDstNs() {
+ return dstNs;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/TrashProcedure.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/TrashProcedure.java
new file mode 100644
index 0000000000..94ae6160b0
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/TrashProcedure.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools.fedbalance;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption;
+
+/**
+ * This procedure moves the source path to the corresponding trash.
+ */
+public class TrashProcedure extends BalanceProcedure {
+
+ private DistributedFileSystem srcFs;
+ private FedBalanceContext context;
+ private Configuration conf;
+
+ public TrashProcedure() {}
+
+ /**
+ * The constructor of TrashProcedure.
+ *
+ * @param name the name of the procedure.
+ * @param nextProcedure the name of the next procedure.
+ * @param delayDuration the delay duration when this procedure is delayed.
+ * @param context the federation balance context.
+ */
+ public TrashProcedure(String name, String nextProcedure, long delayDuration,
+ FedBalanceContext context) throws IOException {
+ super(name, nextProcedure, delayDuration);
+ this.context = context;
+ this.conf = context.getConf();
+ this.srcFs = (DistributedFileSystem) context.getSrc().getFileSystem(conf);
+ }
+
+ @Override
+ public boolean execute() throws IOException {
+ moveToTrash();
+ return true;
+ }
+
+ /**
+ * Delete source path to trash.
+ */
+ void moveToTrash() throws IOException {
+ Path src = context.getSrc();
+ if (srcFs.exists(src)) {
+ TrashOption trashOption = context.getTrashOpt();
+ switch (trashOption) {
+ case TRASH:
+ conf.setFloat(FS_TRASH_INTERVAL_KEY, 60);
+ if (!Trash.moveToAppropriateTrash(srcFs, src, conf)) {
+ throw new IOException("Failed move " + src + " to trash.");
+ }
+ break;
+ case DELETE:
+ if (!srcFs.delete(src, true)) {
+ throw new IOException("Failed delete " + src);
+ }
+ LOG.info("{} is deleted.", src);
+ break;
+ case SKIP:
+ break;
+ default:
+ throw new IOException("Unexpected trash option=" + trashOption);
+ }
+ }
+ }
+
+ public FedBalanceContext getContext() {
+ return context;
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ context.write(out);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ context = new FedBalanceContext();
+ context.readFields(in);
+ conf = context.getConf();
+ srcFs = (DistributedFileSystem) context.getSrc().getFileSystem(conf);
+ }
+}
diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/package-info.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/package-info.java
new file mode 100644
index 0000000000..3007402f69
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/package-info.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * FedBalance is a tool for balancing data across federation clusters.
+ */
+@InterfaceAudience.Public
+package org.apache.hadoop.tools.fedbalance;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJob.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJob.java
similarity index 99%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJob.java
rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJob.java
index 847092a2aa..8d5f9d401a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJob.java
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJob.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.builder.EqualsBuilder;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournal.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournal.java
similarity index 96%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournal.java
rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournal.java
index 011ae857bc..da8eb74b2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournal.java
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournal.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import org.apache.hadoop.conf.Configurable;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournalInfoHDFS.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournalInfoHDFS.java
similarity index 95%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournalInfoHDFS.java
rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournalInfoHDFS.java
index 4e759d8d7f..0da8c36637 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceJournalInfoHDFS.java
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceJournalInfoHDFS.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -37,9 +37,9 @@
import java.net.URI;
import java.net.URISyntaxException;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.SCHEDULER_JOURNAL_URI;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.TMP_TAIL;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.JOB_PREFIX;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TMP_TAIL;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.JOB_PREFIX;
/**
* BalanceJournal based on HDFS. This class stores all the journals in the HDFS.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedure.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedure.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedure.java
rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedure.java
index 6320e8fe99..080a73750e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedure.java
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedure.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
@@ -29,7 +29,7 @@
import java.io.DataOutput;
import java.io.IOException;
-import static org.apache.hadoop.hdfs.procedure.BalanceJob.NEXT_PROCEDURE_NONE;
+import static org.apache.hadoop.tools.fedbalance.procedure.BalanceJob.NEXT_PROCEDURE_NONE;
/**
* The basic components of the Job. Extend this class to implement different
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureScheduler.java
rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java
index 74606c5580..0f82b88f0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/BalanceProcedureScheduler.java
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import com.google.common.annotations.VisibleForTesting;
@@ -40,9 +40,9 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.WORK_THREAD_NUM;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.WORK_THREAD_NUM_DEFAULT;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.JOURNAL_CLASS;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM_DEFAULT;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.JOURNAL_CLASS;
/**
* The state machine framework consist of:
* Job: The state machine. It implements the basic logic of the
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/package-info.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/package-info.java
similarity index 95%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/package-info.java
rename to hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/package-info.java
index 626d3b3727..cb03d137fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/procedure/package-info.java
+++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/package-info.java
@@ -23,7 +23,7 @@
@InterfaceAudience.Private
@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-tools/hadoop-federation-balance/src/main/shellprofile.d/hadoop-federation-balance.sh b/hadoop-tools/hadoop-federation-balance/src/main/shellprofile.d/hadoop-federation-balance.sh
new file mode 100644
index 0000000000..2872c7afba
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/main/shellprofile.d/hadoop-federation-balance.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if ! declare -f hadoop_subcommand_fedbalance >/dev/null 2>/dev/null; then
+
+ if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
+ hadoop_add_subcommand "fedbalance" client "balance data between sub-clusters"
+ fi
+
+ # this can't be indented otherwise shelldocs won't get it
+
+## @description fedbalance command for hadoop
+## @audience public
+## @stability stable
+## @replaceable yes
+function hadoop_subcommand_fedbalance
+{
+ # shellcheck disable=SC2034
+ HADOOP_CLASSNAME=org.apache.hadoop.tools.fedbalance.FedBalance
+ hadoop_add_to_classpath_tools hadoop-distcp
+ hadoop_add_to_classpath_tools hadoop-federation-balance
+}
+
+fi
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java
new file mode 100644
index 0000000000..ec565c36d8
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java
@@ -0,0 +1,446 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools.fedbalance;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.tools.fedbalance.DistCpProcedure.Stage;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.tools.fedbalance.procedure.BalanceJob;
+import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedure.RetryException;
+import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedureScheduler;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.DataOutput;
+import java.io.DataInputStream;
+import java.io.ByteArrayInputStream;
+import java.net.URI;
+import java.util.Random;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI;
+import static org.apache.hadoop.test.GenericTestUtils.getMethodName;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.CURRENT_SNAPSHOT_NAME;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.LAST_SNAPSHOT_NAME;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+/**
+ * Test DistCpProcedure.
+ */
+public class TestDistCpProcedure {
+ private static MiniDFSCluster cluster;
+ private static Configuration conf;
+ static final String MOUNT = "mock_mount_point";
+ private static final String SRCDAT = "srcdat";
+ private static final String DSTDAT = "dstdat";
+ private static final long BLOCK_SIZE = 1024;
+ private static final long FILE_SIZE = BLOCK_SIZE * 100;
+ private FileEntry[] srcfiles =
+ {new FileEntry(SRCDAT, true), new FileEntry(SRCDAT + "/a", false),
+ new FileEntry(SRCDAT + "/b", true),
+ new FileEntry(SRCDAT + "/b/c", false)};
+ private static String nnUri;
+
+ @BeforeClass
+ public static void beforeClass() throws IOException {
+ DistCpProcedure.enabledForTest = true;
+ conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ cluster.waitActive();
+
+ String workPath =
+ "hdfs://" + cluster.getNameNode().getHostAndPort() + "/procedure";
+ conf.set(SCHEDULER_JOURNAL_URI, workPath);
+
+ nnUri = FileSystem.getDefaultUri(conf).toString();
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ DistCpProcedure.enabledForTest = false;
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test(timeout = 30000)
+ public void testSuccessfulDistCpProcedure() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ createFiles(fs, testRoot, srcfiles);
+
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+ FsPermission originalPerm = new FsPermission(777);
+ fs.setPermission(src, originalPerm);
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ DistCpProcedure dcProcedure =
+ new DistCpProcedure("distcp-procedure", null, 1000, context);
+ BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(conf);
+ scheduler.init(true);
+
+ BalanceJob balanceJob =
+ new BalanceJob.Builder<>().nextProcedure(dcProcedure).build();
+ scheduler.submit(balanceJob);
+ scheduler.waitUntilDone(balanceJob);
+ assertTrue(balanceJob.isJobDone());
+ if (balanceJob.getError() != null) {
+ throw balanceJob.getError();
+ }
+ assertNull(balanceJob.getError());
+ assertTrue(fs.exists(dst));
+ assertFalse(
+ fs.exists(new Path(context.getSrc(), HdfsConstants.DOT_SNAPSHOT_DIR)));
+ assertFalse(
+ fs.exists(new Path(context.getDst(), HdfsConstants.DOT_SNAPSHOT_DIR)));
+ assertEquals(originalPerm, fs.getFileStatus(dst).getPermission());
+ assertEquals(0, fs.getFileStatus(src).getPermission().toShort());
+ for (FileEntry e : srcfiles) { // verify file len.
+ if (!e.isDir) {
+ Path targetFile = new Path(testRoot, e.path.replace(SRCDAT, DSTDAT));
+ assertEquals(FILE_SIZE, fs.getFileStatus(targetFile).getLen());
+ }
+ }
+ cleanup(fs, new Path(testRoot));
+ }
+
+ @Test(timeout = 30000)
+ public void testInitDistCp() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ createFiles(fs, testRoot, srcfiles);
+
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+ // set permission.
+ fs.setPermission(src, FsPermission.createImmutable((short) 020));
+
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ DistCpProcedure dcProcedure =
+ new DistCpProcedure("distcp-procedure", null, 1000, context);
+
+ // submit distcp.
+ try {
+ dcProcedure.initDistCp();
+ } catch (RetryException e) {
+ }
+ fs.delete(new Path(src, "a"), true);
+ // wait until job done.
+ executeProcedure(dcProcedure, Stage.DIFF_DISTCP,
+ () -> dcProcedure.initDistCp());
+ assertTrue(fs.exists(dst));
+ // Because we used snapshot, the file should be copied.
+ assertTrue(fs.exists(new Path(dst, "a")));
+ cleanup(fs, new Path(testRoot));
+ }
+
+ @Test(timeout = 30000)
+ public void testDiffDistCp() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ createFiles(fs, testRoot, srcfiles);
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ DistCpProcedure dcProcedure =
+ new DistCpProcedure("distcp-procedure", null, 1000, context);
+ executeProcedure(dcProcedure, Stage.DIFF_DISTCP,
+ () -> dcProcedure.initDistCp());
+ assertTrue(fs.exists(dst));
+
+ // move file out of src and test distcp.
+ fs.rename(new Path(src, "a"), new Path("/a"));
+ executeProcedure(dcProcedure, Stage.FINISH,
+ () -> dcProcedure.finalDistCp());
+ assertFalse(fs.exists(new Path(dst, "a")));
+ // move back file src/a and test distcp.
+ fs.rename(new Path("/a"), new Path(src, "a"));
+ executeProcedure(dcProcedure, Stage.FINISH,
+ () -> dcProcedure.finalDistCp());
+ assertTrue(fs.exists(new Path(dst, "a")));
+ // append file src/a and test.
+ OutputStream out = fs.append(new Path(src, "a"));
+ out.write("hello".getBytes());
+ out.close();
+ long len = fs.getFileStatus(new Path(src, "a")).getLen();
+ executeProcedure(dcProcedure, Stage.FINISH,
+ () -> dcProcedure.finalDistCp());
+ assertEquals(len, fs.getFileStatus(new Path(dst, "a")).getLen());
+ cleanup(fs, new Path(testRoot));
+ }
+
+ @Test(timeout = 30000)
+ public void testStageFinalDistCp() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ createFiles(fs, testRoot, srcfiles);
+
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+ // open files.
+ OutputStream out = fs.append(new Path(src, "a"));
+
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ DistCpProcedure dcProcedure =
+ new DistCpProcedure("distcp-procedure", null, 1000, context);
+ executeProcedure(dcProcedure, Stage.DIFF_DISTCP,
+ () -> dcProcedure.initDistCp());
+ executeProcedure(dcProcedure, Stage.FINISH,
+ () -> dcProcedure.finalDistCp());
+ // Verify all the open files have been closed.
+ intercept(RemoteException.class, "LeaseExpiredException",
+ "Expect RemoteException(LeaseExpiredException).", () -> out.close());
+ cleanup(fs, new Path(testRoot));
+ }
+
+ @Test(timeout = 30000)
+ public void testStageFinish() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+ fs.mkdirs(src);
+ fs.mkdirs(dst);
+ fs.allowSnapshot(src);
+ fs.allowSnapshot(dst);
+ fs.createSnapshot(src, LAST_SNAPSHOT_NAME);
+ fs.createSnapshot(src, CURRENT_SNAPSHOT_NAME);
+ fs.createSnapshot(dst, LAST_SNAPSHOT_NAME);
+ FsPermission originalPerm = new FsPermission(777);
+ fs.setPermission(src, originalPerm);
+
+ // Test the finish stage.
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ DistCpProcedure dcProcedure =
+ new DistCpProcedure("distcp-procedure", null, 1000, context);
+ dcProcedure.disableWrite();
+ dcProcedure.finish();
+
+ // Verify path and permission.
+ assertTrue(fs.exists(dst));
+ assertFalse(fs.exists(new Path(src, HdfsConstants.DOT_SNAPSHOT_DIR)));
+ assertFalse(fs.exists(new Path(dst, HdfsConstants.DOT_SNAPSHOT_DIR)));
+ assertEquals(originalPerm, fs.getFileStatus(dst).getPermission());
+ assertEquals(0, fs.getFileStatus(src).getPermission().toShort());
+ cleanup(fs, new Path(testRoot));
+ }
+
+ @Test(timeout = 30000)
+ public void testRecoveryByStage() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ createFiles(fs, testRoot, srcfiles);
+
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ final DistCpProcedure[] dcp = new DistCpProcedure[1];
+ dcp[0] = new DistCpProcedure("distcp-procedure", null, 1000, context);
+
+ // Doing serialization and deserialization before each stage to monitor the
+ // recovery.
+ dcp[0] = serializeProcedure(dcp[0]);
+ executeProcedure(dcp[0], Stage.INIT_DISTCP, () -> dcp[0].preCheck());
+ dcp[0] = serializeProcedure(dcp[0]);
+ executeProcedure(dcp[0], Stage.DIFF_DISTCP, () -> dcp[0].initDistCp());
+ fs.delete(new Path(src, "a"), true); // make some difference.
+ dcp[0] = serializeProcedure(dcp[0]);
+ executeProcedure(dcp[0], Stage.DISABLE_WRITE, () -> dcp[0].diffDistCp());
+ dcp[0] = serializeProcedure(dcp[0]);
+ executeProcedure(dcp[0], Stage.FINAL_DISTCP, () -> dcp[0].disableWrite());
+ dcp[0] = serializeProcedure(dcp[0]);
+ OutputStream out = fs.append(new Path(src, "b/c"));
+ executeProcedure(dcp[0], Stage.FINISH, () -> dcp[0].finalDistCp());
+ intercept(RemoteException.class, "LeaseExpiredException",
+ "Expect RemoteException(LeaseExpiredException).", () -> out.close());
+ dcp[0] = serializeProcedure(dcp[0]);
+ assertTrue(dcp[0].execute());
+ assertTrue(fs.exists(dst));
+ assertFalse(
+ fs.exists(new Path(context.getSrc(), HdfsConstants.DOT_SNAPSHOT_DIR)));
+ assertFalse(
+ fs.exists(new Path(context.getDst(), HdfsConstants.DOT_SNAPSHOT_DIR)));
+ cleanup(fs, new Path(testRoot));
+ }
+
+ @Test(timeout = 30000)
+ public void testShutdown() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ createFiles(fs, testRoot, srcfiles);
+
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ DistCpProcedure dcProcedure =
+ new DistCpProcedure("distcp-procedure", null, 1000, context);
+ BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(conf);
+ scheduler.init(true);
+
+ BalanceJob balanceJob =
+ new BalanceJob.Builder<>().nextProcedure(dcProcedure).build();
+ scheduler.submit(balanceJob);
+
+ long sleep = Math.abs(new Random().nextLong()) % 10000;
+ Thread.sleep(sleep);
+ scheduler.shutDown();
+ cleanup(fs, new Path(testRoot));
+ }
+
+ @Test(timeout = 30000)
+ public void testDisableWrite() throws Exception {
+ String testRoot = nnUri + "/user/foo/testdir." + getMethodName();
+ DistributedFileSystem fs =
+ (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf);
+ createFiles(fs, testRoot, srcfiles);
+ Path src = new Path(testRoot, SRCDAT);
+ Path dst = new Path(testRoot, DSTDAT);
+
+ FedBalanceContext context = buildContext(src, dst, MOUNT);
+ DistCpProcedure dcProcedure =
+ new DistCpProcedure("distcp-procedure", null, 1000, context);
+ assertNotEquals(0, fs.getFileStatus(src).getPermission().toShort());
+ executeProcedure(dcProcedure, Stage.FINAL_DISTCP,
+ () -> dcProcedure.disableWrite());
+ assertEquals(0, fs.getFileStatus(src).getPermission().toShort());
+ cleanup(fs, new Path(testRoot));
+ }
+
+ private FedBalanceContext buildContext(Path src, Path dst, String mount) {
+ return new FedBalanceContext.Builder(src, dst, mount, conf).setMapNum(10)
+ .setBandwidthLimit(1).setTrash(TrashOption.TRASH).setDelayDuration(1000)
+ .build();
+ }
+
+ interface Call {
+ void execute() throws IOException, RetryException;
+ }
+
+ /**
+ * Execute the procedure until its stage is updated to the target stage.
+ *
+ * @param procedure the procedure to be executed and verified.
+ * @param target the target stage.
+ * @param call the function executing the procedure.
+ */
+ private static void executeProcedure(DistCpProcedure procedure, Stage target,
+ Call call) throws IOException {
+ Stage stage = Stage.PRE_CHECK;
+ procedure.updateStage(stage);
+ while (stage != target) {
+ try {
+ call.execute();
+ } catch (RetryException e) {
+ } finally {
+ stage = procedure.getStage();
+ }
+ }
+ }
+
+ static class FileEntry {
+ private String path;
+ private boolean isDir;
+
+ FileEntry(String path, boolean isDir) {
+ this.path = path;
+ this.isDir = isDir;
+ }
+
+ String getPath() {
+ return path;
+ }
+
+ boolean isDirectory() {
+ return isDir;
+ }
+ }
+
+ /**
+ * Create directories and files with random data.
+ *
+ * @param fs the file system obj.
+ * @param topdir the base dir of the directories and files.
+ * @param entries the directory and file entries to be created.
+ */
+ private void createFiles(DistributedFileSystem fs, String topdir,
+ FileEntry[] entries) throws IOException {
+ long seed = System.currentTimeMillis();
+ Random rand = new Random(seed);
+ short replicationFactor = 2;
+ for (FileEntry entry : entries) {
+ Path newPath = new Path(topdir + "/" + entry.getPath());
+ if (entry.isDirectory()) {
+ fs.mkdirs(newPath);
+ } else {
+ int bufSize = 128;
+ DFSTestUtil.createFile(fs, newPath, bufSize, FILE_SIZE, BLOCK_SIZE,
+ replicationFactor, seed);
+ }
+ seed = System.currentTimeMillis() + rand.nextLong();
+ }
+ }
+
+ private DistCpProcedure serializeProcedure(DistCpProcedure dcp)
+ throws IOException {
+ ByteArrayOutputStream bao = new ByteArrayOutputStream();
+ DataOutput dataOut = new DataOutputStream(bao);
+ dcp.write(dataOut);
+ dcp = new DistCpProcedure();
+ dcp.readFields(
+ new DataInputStream(new ByteArrayInputStream(bao.toByteArray())));
+ return dcp;
+ }
+
+ private void cleanup(DistributedFileSystem dfs, Path root)
+ throws IOException {
+ Path src = new Path(root, SRCDAT);
+ Path dst = new Path(root, DSTDAT);
+ DistCpProcedure.cleanupSnapshot(dfs, src);
+ DistCpProcedure.cleanupSnapshot(dfs, dst);
+ dfs.delete(root, true);
+ }
+}
diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestMountTableProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestMountTableProcedure.java
new file mode 100644
index 0000000000..9dd4e5da8f
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestMountTableProcedure.java
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools.fedbalance;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
+import org.apache.hadoop.hdfs.server.federation.router.Router;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.Time;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutput;
+import java.io.DataInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.DataOutputStream;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
+import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Basic tests of MountTableProcedure.
+ */
+public class TestMountTableProcedure {
+
+ private static StateStoreDFSCluster cluster;
+ private static RouterContext routerContext;
+ private static Configuration routerConf;
+ private static List mockMountTable;
+ private static StateStoreService stateStore;
+
+ @BeforeClass
+ public static void globalSetUp() throws Exception {
+ cluster = new StateStoreDFSCluster(false, 1);
+ // Build and start a router with State Store + admin + RPC
+ Configuration conf = new RouterConfigBuilder()
+ .stateStore()
+ .admin()
+ .rpc()
+ .build();
+ cluster.addRouterOverrides(conf);
+ cluster.startRouters();
+ routerContext = cluster.getRandomRouter();
+ mockMountTable = cluster.generateMockMountTable();
+ Router router = routerContext.getRouter();
+ stateStore = router.getStateStore();
+
+ // Add two name services for testing
+ ActiveNamenodeResolver membership = router.getNamenodeResolver();
+ membership.registerNamenode(createNamenodeReport("ns0", "nn1",
+ HAServiceProtocol.HAServiceState.ACTIVE));
+ membership.registerNamenode(createNamenodeReport("ns1", "nn1",
+ HAServiceProtocol.HAServiceState.ACTIVE));
+ stateStore.refreshCaches(true);
+
+ routerConf = new Configuration();
+ InetSocketAddress routerSocket = router.getAdminServerAddress();
+ routerConf.setSocketAddr(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
+ routerSocket);
+ }
+
+ @AfterClass
+ public static void tearDown() {
+ cluster.stopRouter(routerContext);
+ }
+
+ @Before
+ public void testSetup() throws Exception {
+ assertTrue(
+ synchronizeRecords(stateStore, mockMountTable, MountTable.class));
+ // Avoid running with random users
+ routerContext.resetAdminClient();
+ }
+
+ @Test
+ public void testUpdateMountpoint() throws Exception {
+ // Firstly add mount entry: /test-path->{ns0,/test-path}.
+ String mount = "/test-path";
+ String dst = "/test-dst";
+ MountTable newEntry = MountTable
+ .newInstance(mount, Collections.singletonMap("ns0", mount),
+ Time.now(), Time.now());
+ MountTableManager mountTable =
+ routerContext.getAdminClient().getMountTableManager();
+ AddMountTableEntryRequest addRequest =
+ AddMountTableEntryRequest.newInstance(newEntry);
+ AddMountTableEntryResponse addResponse =
+ mountTable.addMountTableEntry(addRequest);
+ assertTrue(addResponse.getStatus());
+ // verify the mount entry is added successfully.
+ GetMountTableEntriesRequest request =
+ GetMountTableEntriesRequest.newInstance("/");
+ stateStore.loadCache(MountTableStoreImpl.class, true); // load cache.
+ GetMountTableEntriesResponse response =
+ mountTable.getMountTableEntries(request);
+ assertEquals(3, response.getEntries().size());
+
+ // set the mount table to readonly.
+ MountTableProcedure.disableWrite(mount, routerConf);
+
+ // test MountTableProcedure updates the mount point.
+ String dstNs = "ns1";
+ MountTableProcedure smtp =
+ new MountTableProcedure("single-mount-table-procedure", null,
+ 1000, mount, dst, dstNs, routerConf);
+ assertTrue(smtp.execute());
+ stateStore.loadCache(MountTableStoreImpl.class, true); // load cache.
+ // verify the mount entry is updated to /
+ MountTable entry =
+ MountTableProcedure.getMountEntry(mount, mountTable);
+ assertNotNull(entry);
+ assertEquals(1, entry.getDestinations().size());
+ String nsId = entry.getDestinations().get(0).getNameserviceId();
+ String dstPath = entry.getDestinations().get(0).getDest();
+ assertEquals(dstNs, nsId);
+ assertEquals(dst, dstPath);
+ // Verify the mount table is not readonly.
+ URI address = routerContext.getFileSystemURI();
+ DFSClient routerClient = new DFSClient(address, routerConf);
+ MountTableProcedure.enableWrite(mount, routerConf);
+ intercept(RemoteException.class, "No namenode available to invoke mkdirs",
+ "Expect no namenode exception.", () -> routerClient
+ .mkdirs(mount + "/file", new FsPermission(020), false));
+ }
+
+ @Test
+ public void testDisableAndEnableWrite() throws Exception {
+ // Firstly add mount entry: /test-write->{ns0,/test-write}.
+ String mount = "/test-write";
+ MountTable newEntry = MountTable
+ .newInstance(mount, Collections.singletonMap("ns0", mount),
+ Time.now(), Time.now());
+ MountTableManager mountTable =
+ routerContext.getAdminClient().getMountTableManager();
+ AddMountTableEntryRequest addRequest =
+ AddMountTableEntryRequest.newInstance(newEntry);
+ AddMountTableEntryResponse addResponse =
+ mountTable.addMountTableEntry(addRequest);
+ assertTrue(addResponse.getStatus());
+ stateStore.loadCache(MountTableStoreImpl.class, true); // load cache.
+
+ // Construct client.
+ URI address = routerContext.getFileSystemURI();
+ DFSClient routerClient = new DFSClient(address, routerConf);
+ // Verify the mount point is not readonly.
+ intercept(RemoteException.class, "No namenode available to invoke mkdirs",
+ "Expect no namenode exception.", () -> routerClient
+ .mkdirs(mount + "/file", new FsPermission(020), false));
+
+ // Verify disable write.
+ MountTableProcedure.disableWrite(mount, routerConf);
+ intercept(RemoteException.class, "is in a read only mount point",
+ "Expect readonly exception.", () -> routerClient
+ .mkdirs(mount + "/dir", new FsPermission(020), false));
+
+ // Verify enable write.
+ MountTableProcedure.enableWrite(mount, routerConf);
+ intercept(RemoteException.class, "No namenode available to invoke mkdirs",
+ "Expect no namenode exception.", () -> routerClient
+ .mkdirs(mount + "/file", new FsPermission(020), false));
+ }
+
+ @Test
+ public void testSeDeserialize() throws Exception {
+ String fedPath = "/test-path";
+ String dst = "/test-dst";
+ String dstNs = "ns1";
+ MountTableProcedure smtp =
+ new MountTableProcedure("single-mount-table-procedure", null,
+ 1000, fedPath, dst, dstNs, routerConf);
+ ByteArrayOutputStream bao = new ByteArrayOutputStream();
+ DataOutput dataOut = new DataOutputStream(bao);
+ smtp.write(dataOut);
+ smtp = new MountTableProcedure();
+ smtp.readFields(
+ new DataInputStream(new ByteArrayInputStream(bao.toByteArray())));
+ assertEquals(fedPath, smtp.getMount());
+ assertEquals(dst, smtp.getDstPath());
+ assertEquals(dstNs, smtp.getDstNs());
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestTrashProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestTrashProcedure.java
new file mode 100644
index 0000000000..a128932d52
--- /dev/null
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestTrashProcedure.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools.fedbalance;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutput;
+import java.io.DataInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.TrashOption;
+import static org.apache.hadoop.test.GenericTestUtils.getMethodName;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test TrashProcedure.
+ */
+public class TestTrashProcedure {
+
+ private static Configuration conf;
+ private static MiniDFSCluster cluster;
+ private static String nnUri;
+
+ @BeforeClass
+ public static void beforeClass() throws IOException {
+ conf = new Configuration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ cluster.waitActive();
+ nnUri = FileSystem.getDefaultUri(conf).toString();
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testTrashProcedure() throws Exception {
+ Path src = new Path("/" + getMethodName() + "-src");
+ Path dst = new Path("/" + getMethodName() + "-dst");
+ FileSystem fs = cluster.getFileSystem();
+ fs.mkdirs(src);
+ fs.mkdirs(new Path(src, "dir"));
+ assertTrue(fs.exists(src));
+
+ FedBalanceContext context =
+ new FedBalanceContext.Builder(src, dst, TestDistCpProcedure.MOUNT, conf)
+ .setMapNum(10).setBandwidthLimit(1).setTrash(TrashOption.TRASH)
+ .build();
+ TrashProcedure trashProcedure =
+ new TrashProcedure("trash-procedure", null, 1000, context);
+ trashProcedure.moveToTrash();
+ assertFalse(fs.exists(src));
+ }
+
+ @Test
+ public void testSeDeserialize() throws Exception {
+ Path src = new Path("/" + getMethodName() + "-src");
+ Path dst = new Path("/" + getMethodName() + "-dst");
+ FedBalanceContext context =
+ new FedBalanceContext.Builder(src, dst, TestDistCpProcedure.MOUNT, conf)
+ .setMapNum(10).setBandwidthLimit(1).setTrash(TrashOption.TRASH)
+ .build();
+ TrashProcedure trashProcedure =
+ new TrashProcedure("trash-procedure", null, 1000, context);
+ ByteArrayOutputStream bao = new ByteArrayOutputStream();
+ DataOutput dataOut = new DataOutputStream(bao);
+ trashProcedure.write(dataOut);
+ trashProcedure = new TrashProcedure();
+ trashProcedure.readFields(
+ new DataInputStream(new ByteArrayInputStream(bao.toByteArray())));
+ assertEquals(context, trashProcedure.getContext());
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/MultiPhaseProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/MultiPhaseProcedure.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/MultiPhaseProcedure.java
rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/MultiPhaseProcedure.java
index 27cfebd3a3..b9c9c1e1ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/MultiPhaseProcedure.java
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/MultiPhaseProcedure.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RecordProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RecordProcedure.java
similarity index 96%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RecordProcedure.java
rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RecordProcedure.java
index 706d4a1bce..9754b0994c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RecordProcedure.java
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RecordProcedure.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import java.util.ArrayList;
import java.util.List;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RetryProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RetryProcedure.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RetryProcedure.java
rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RetryProcedure.java
index 336873e6a8..faec834f98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/RetryProcedure.java
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/RetryProcedure.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import java.io.DataInput;
import java.io.DataOutput;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/TestBalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/TestBalanceProcedureScheduler.java
rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java
index 39e000b644..7a2b449ce4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/TestBalanceProcedureScheduler.java
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -43,8 +43,8 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.SCHEDULER_JOURNAL_URI;
-import static org.apache.hadoop.hdfs.procedure.BalanceProcedureConfigKeys.WORK_THREAD_NUM;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI;
+import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.WORK_THREAD_NUM;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertEquals;
@@ -70,6 +70,7 @@ public static void setup() throws IOException {
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "hdfs:///");
CONF.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+ CONF.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
CONF.setInt(WORK_THREAD_NUM, 1);
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(3).build();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/UnrecoverableProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/UnrecoverableProcedure.java
similarity index 96%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/UnrecoverableProcedure.java
rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/UnrecoverableProcedure.java
index 941d0a0ae7..804f1aa548 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/UnrecoverableProcedure.java
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/UnrecoverableProcedure.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/WaitProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/WaitProcedure.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/WaitProcedure.java
rename to hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/WaitProcedure.java
index 8666caf2f6..af46b17afb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/procedure/WaitProcedure.java
+++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/WaitProcedure.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.procedure;
+package org.apache.hadoop.tools.fedbalance.procedure;
import org.apache.hadoop.util.Time;
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index f923bb7c36..cc811fca69 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -44,6 +44,11 @@
hadoop-distcp
compile