From 96c4575d7373079becfa3e3db29ba98e6fb86388 Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore <surendralilhore@apache.org> Date: Fri, 24 Aug 2018 08:14:57 +0530 Subject: [PATCH] HDFS-13805. Journal Nodes should allow to format non-empty directories with -force option. Contributed by Surendra Singh Lilhore. --- .../hdfs/qjournal/client/AsyncLogger.java | 3 ++- .../hdfs/qjournal/client/AsyncLoggerSet.java | 4 ++-- .../hdfs/qjournal/client/IPCLoggerChannel.java | 5 +++-- .../qjournal/client/QuorumJournalManager.java | 4 ++-- .../qjournal/protocol/QJournalProtocol.java | 2 +- .../QJournalProtocolServerSideTranslatorPB.java | 2 +- .../QJournalProtocolTranslatorPB.java | 6 ++++-- .../hadoop/hdfs/qjournal/server/JNStorage.java | 4 ++-- .../hadoop/hdfs/qjournal/server/Journal.java | 6 +++--- .../qjournal/server/JournalNodeRpcServer.java | 5 +++-- .../server/namenode/BackupJournalManager.java | 2 +- .../hadoop/hdfs/server/namenode/FSEditLog.java | 5 +++-- .../hadoop/hdfs/server/namenode/FSImage.java | 5 +++-- .../hdfs/server/namenode/FSNamesystem.java | 4 ++-- .../server/namenode/FileJournalManager.java | 2 +- .../hdfs/server/namenode/JournalManager.java | 2 +- .../hadoop/hdfs/server/namenode/JournalSet.java | 2 +- .../hadoop/hdfs/server/namenode/NameNode.java | 4 ++-- .../src/main/proto/QJournalProtocol.proto | 1 + .../qjournal/client/TestEpochsAreUnique.java | 2 +- .../hdfs/qjournal/client/TestQJMWithFaults.java | 6 +++--- .../client/TestQuorumJournalManager.java | 4 ++-- .../client/TestQuorumJournalManagerUnit.java | 4 +++- .../hdfs/qjournal/server/TestJournal.java | 17 ++++++++++++++--- .../hdfs/qjournal/server/TestJournalNode.java | 4 ++-- .../qjournal/server/TestJournalNodeMXBean.java | 2 +- .../qjournal/server/TestJournalNodeSync.java | 2 +- .../server/namenode/TestGenericJournalConf.java | 2 +- .../contract/AbstractContractDistCpTest.java | 2 +- 29 files changed, 67 insertions(+), 46 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java index d2b48ccec5..2633723711 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java @@ -89,8 +89,9 @@ public ListenableFuture<Void> finalizeLogSegment( /** * Format the log directory. * @param nsInfo the namespace info to format with + * @param force the force option to format */ - public ListenableFuture<Void> format(NamespaceInfo nsInfo); + public ListenableFuture<Void> format(NamespaceInfo nsInfo, boolean force); /** * @return whether or not the remote node has any valid data. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index d46c2cf790..b52e312943 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -299,12 +299,12 @@ public QuorumCall<AsyncLogger, RemoteEditLogManifest> getEditLogManifest( return QuorumCall.create(calls); } - QuorumCall<AsyncLogger,Void> format(NamespaceInfo nsInfo) { + QuorumCall<AsyncLogger, Void> format(NamespaceInfo nsInfo, boolean force) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = - logger.format(nsInfo); + logger.format(nsInfo, force); calls.put(logger, future); } return QuorumCall.create(calls); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 30367357b8..4fca1bb0ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -502,11 +502,12 @@ private synchronized void unreserveQueueSpace(int size) { } @Override - public ListenableFuture<Void> format(final NamespaceInfo nsInfo) { + public ListenableFuture<Void> format(final NamespaceInfo nsInfo, + final boolean force) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws Exception { - getProxy().format(journalId, nameServiceId, nsInfo); + getProxy().format(journalId, nameServiceId, nsInfo, force); return null; } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 4faaa983be..bd452923ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -220,8 +220,8 @@ Map<AsyncLogger, NewEpochResponseProto> createNewUniqueEpoch() } @Override - public void format(NamespaceInfo nsInfo) throws IOException { - QuorumCall<AsyncLogger,Void> call = loggers.format(nsInfo); + public void format(NamespaceInfo nsInfo, boolean force) throws IOException { + QuorumCall<AsyncLogger, Void> call = loggers.format(nsInfo, force); try { call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs, "format"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java index 5558bd5472..8dad26104c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java @@ -68,7 +68,7 @@ GetJournalStateResponseProto getJournalState(String journalId, * Format the underlying storage for the given namespace. */ void format(String journalId, String nameServiceId, - NamespaceInfo nsInfo) throws IOException; + NamespaceInfo nsInfo, boolean force) throws IOException; /** * Begin a new epoch. See the HDFS-3077 design doc for details. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java index 865d296922..2ad19da096 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java @@ -147,7 +147,7 @@ public FormatResponseProto format(RpcController controller, try { impl.format(request.getJid().getIdentifier(), request.hasNameServiceId() ? request.getNameServiceId() : null, - PBHelper.convert(request.getNsInfo())); + PBHelper.convert(request.getNsInfo()), request.getForce()); return FormatResponseProto.getDefaultInstance(); } catch (IOException ioe) { throw new ServiceException(ioe); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java index d7cd7b5581..42d35f57fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java @@ -136,11 +136,13 @@ private JournalIdProto convertJournalId(String jid) { @Override public void format(String jid, String nameServiceId, - NamespaceInfo nsInfo) throws IOException { + NamespaceInfo nsInfo, + boolean force) throws IOException { try { FormatRequestProto.Builder req = FormatRequestProto.newBuilder() .setJid(convertJournalId(jid)) - .setNsInfo(PBHelper.convert(nsInfo)); + .setNsInfo(PBHelper.convert(nsInfo)) + .setForce(force); if(nameServiceId != null) { req.setNameServiceId(nameServiceId); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java index 6bf4903b1e..612fd3d19f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java @@ -204,10 +204,10 @@ private static void purgeMatching(File dir, List<Pattern> patterns, } } - void format(NamespaceInfo nsInfo) throws IOException { + void format(NamespaceInfo nsInfo, boolean force) throws IOException { unlockAll(); try { - sd.analyzeStorage(StartupOption.FORMAT, this, true); + sd.analyzeStorage(StartupOption.FORMAT, this, !force); } finally { sd.unlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 8f25d260b6..7e88afa78d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -227,13 +227,13 @@ private synchronized EditLogFile scanStorageForLatestEdits() throws IOException /** * Format the local storage with the given namespace. */ - void format(NamespaceInfo nsInfo) throws IOException { + void format(NamespaceInfo nsInfo, boolean force) throws IOException { Preconditions.checkState(nsInfo.getNamespaceID() != 0, "can't format with uninitialized namespace info: %s", nsInfo); LOG.info("Formatting journal id : " + journalId + " with namespace info: " + - nsInfo); - storage.format(nsInfo); + nsInfo + " and force: " + force); + storage.format(nsInfo, force); refreshCachedData(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index b1a3c9665d..0f11026b1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -176,9 +176,10 @@ public NewEpochResponseProto newEpoch(String journalId, @Override public void format(String journalId, String nameServiceId, - NamespaceInfo nsInfo) + NamespaceInfo nsInfo, + boolean force) throws IOException { - jn.getOrCreateJournal(journalId, nameServiceId).format(nsInfo); + jn.getOrCreateJournal(journalId, nameServiceId).format(nsInfo, force); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java index e1ddfb909c..eac91bf483 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java @@ -42,7 +42,7 @@ class BackupJournalManager implements JournalManager { } @Override - public void format(NamespaceInfo nsInfo) { + public void format(NamespaceInfo nsInfo, boolean force) { // format() should only get called at startup, before any BNs // can register with the NN. throw new UnsupportedOperationException( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index d6fb212c7d..547ad577c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -417,13 +417,14 @@ synchronized void close() { * File-based journals are skipped, since they are formatted by the * Storage format code. */ - synchronized void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException { + synchronized void formatNonFileJournals(NamespaceInfo nsInfo, boolean force) + throws IOException { Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS, "Bad state: %s", state); for (JournalManager jm : journalSet.getJournalManagers()) { if (!(jm instanceof FileJournalManager)) { - jm.format(nsInfo); + jm.format(nsInfo, force); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 5cfc0176f1..6d107be789 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -160,7 +160,8 @@ protected FSImage(Configuration conf, archivalManager = new NNStorageRetentionManager(conf, storage, editLog); } - void format(FSNamesystem fsn, String clusterId) throws IOException { + void format(FSNamesystem fsn, String clusterId, boolean force) + throws IOException { long fileCount = fsn.getFilesTotal(); // Expect 1 file, which is the root inode Preconditions.checkState(fileCount == 1, @@ -171,7 +172,7 @@ void format(FSNamesystem fsn, String clusterId) throws IOException { ns.clusterID = clusterId; storage.format(ns); - editLog.formatNonFileJournals(ns); + editLog.formatNonFileJournals(ns, force); saveFSImageInAllDirs(fsn, 0); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index b0fb26ccb6..06bf008137 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1078,8 +1078,8 @@ private void loadFSImage(StartupOption startOpt) throws IOException { // format before starting up if requested if (startOpt == StartupOption.FORMAT) { - - fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id + // reuse current id + fsImage.format(this, fsImage.getStorage().determineClusterId(), false); startOpt = StartupOption.REGULAR; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index c71c09ad4d..185ad731b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -100,7 +100,7 @@ public FileJournalManager(Configuration conf, StorageDirectory sd, public void close() throws IOException {} @Override - public void format(NamespaceInfo ns) throws IOException { + public void format(NamespaceInfo ns, boolean force) throws IOException { // Formatting file journals is done by the StorageDirectory // format code, since they may share their directory with // checkpoints, etc. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java index ae1bc3b7db..d6d20945bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java @@ -43,7 +43,7 @@ public interface JournalManager extends Closeable, FormatConfirmable, * Format the underlying storage, removing any previously * stored data. */ - void format(NamespaceInfo ns) throws IOException; + void format(NamespaceInfo ns, boolean force) throws IOException; /** * Begin writing to a new segment of the log stream, which starts at diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index e7f2adb5bd..868df017ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -188,7 +188,7 @@ public boolean isShared() { } @Override - public void format(NamespaceInfo nsInfo) throws IOException { + public void format(NamespaceInfo nsInfo, boolean force) throws IOException { // The operation is done by FSEditLog itself throw new UnsupportedOperationException(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 7f78d2fcfd..a8034da85e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1159,7 +1159,7 @@ private static boolean format(Configuration conf, boolean force, return true; // aborted } - fsImage.format(fsn, clusterId); + fsImage.format(fsn, clusterId, force); } catch (IOException ioe) { LOG.warn("Encountered exception during format: ", ioe); fsImage.close(); @@ -1262,7 +1262,7 @@ private static boolean initializeSharedEdits(Configuration conf, // actually want to save a checkpoint - just prime the dirs with // the existing namespace info newSharedStorage.format(nsInfo); - sharedEditsImage.getEditLog().formatNonFileJournals(nsInfo); + sharedEditsImage.getEditLog().formatNonFileJournals(nsInfo, force); // Need to make sure the edit log segments are in good shape to initialize // the shared edits dir. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto index a37c7236a6..625966fd33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto @@ -242,6 +242,7 @@ message FormatRequestProto { required JournalIdProto jid = 1; required NamespaceInfoProto nsInfo = 2; optional string nameServiceId = 3; + optional bool force = 4 [ default = false ]; } message FormatResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java index 5101a41f0e..0fc142929f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java @@ -56,7 +56,7 @@ public void testSingleThreaded() throws IOException { QuorumJournalManager qjm = new QuorumJournalManager( conf, uri, FAKE_NSINFO); try { - qjm.format(FAKE_NSINFO); + qjm.format(FAKE_NSINFO, false); } finally { qjm.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index 6ad43f5835..40f213e707 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -105,7 +105,7 @@ private static long determineMaxIpcNumber() throws Exception { long ret; try { qjm = createInjectableQJM(cluster); - qjm.format(FAKE_NSINFO); + qjm.format(FAKE_NSINFO, false); doWorkload(cluster, qjm); SortedSet<Integer> ipcCounts = Sets.newTreeSet(); @@ -156,7 +156,7 @@ public void testRecoverAfterDoubleFailures() throws Exception { QuorumJournalManager qjm = null; try { qjm = createInjectableQJM(cluster); - qjm.format(FAKE_NSINFO); + qjm.format(FAKE_NSINFO, false); List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests(); failIpcNumber(loggers.get(0), failA); failIpcNumber(loggers.get(1), failB); @@ -240,7 +240,7 @@ public void testRandomized() throws Exception { // Format the cluster using a non-faulty QJM. QuorumJournalManager qjmForInitialFormat = createInjectableQJM(cluster); - qjmForInitialFormat.format(FAKE_NSINFO); + qjmForInitialFormat.format(FAKE_NSINFO, false); qjmForInitialFormat.close(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index 69856ae3fa..00bec22564 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -100,7 +100,7 @@ public void setup() throws Exception { qjm = createSpyingQJM(); spies = qjm.getLoggerSetForTests().getLoggersForTests(); - qjm.format(QJMTestUtil.FAKE_NSINFO); + qjm.format(QJMTestUtil.FAKE_NSINFO, false); qjm.recoverUnfinalizedSegments(); assertEquals(1, qjm.getLoggerSetForTests().getEpoch()); } @@ -149,7 +149,7 @@ public void testFormat() throws Exception { QuorumJournalManager qjm = closeLater(new QuorumJournalManager( conf, cluster.getQuorumJournalURI("testFormat-jid"), FAKE_NSINFO)); assertFalse(qjm.hasSomeData()); - qjm.format(FAKE_NSINFO); + qjm.format(FAKE_NSINFO, false); assertTrue(qjm.hasSomeData()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java index 75dcf2fbda..9e1e3bb9dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java @@ -19,6 +19,7 @@ import static org.junit.Assert.fail; import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.eq; import java.io.IOException; @@ -89,7 +90,8 @@ protected List<AsyncLogger> createLoggers(AsyncLogger.Factory factory) { NewEpochResponseProto.newBuilder().build() ).when(logger).newEpoch(Mockito.anyLong()); - futureReturns(null).when(logger).format(Mockito.<NamespaceInfo>any()); + futureReturns(null).when(logger).format(Mockito.<NamespaceInfo>any(), + anyBoolean()); } qjm.recoverUnfinalizedSegments(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java index b71d69445c..b8d2652ef4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java @@ -73,7 +73,7 @@ public void setup() throws Exception { conf = new Configuration(); journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR, mockErrorReporter); - journal.format(FAKE_NSINFO); + journal.format(FAKE_NSINFO, false); } @After @@ -207,7 +207,7 @@ public void testFormatResetsCachedValues() throws Exception { // Clear the storage directory before reformatting it journal.getStorage().getJournalManager() .getStorageDirectory().clearDirectory(); - journal.format(FAKE_NSINFO_2); + journal.format(FAKE_NSINFO_2, false); assertEquals(0, journal.getLastPromisedEpoch()); assertEquals(0, journal.getLastWriterEpoch()); @@ -425,7 +425,7 @@ public void testFormatNonEmptyStorageDirectories() throws Exception { try { // Format again here and to format the non-empty directories in // journal node. - journal.format(FAKE_NSINFO); + journal.format(FAKE_NSINFO, false); fail("Did not fail to format non-empty directories in journal node."); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( @@ -434,4 +434,15 @@ public void testFormatNonEmptyStorageDirectories() throws Exception { } } + @Test + public void testFormatNonEmptyStorageDirectoriesWhenforceOptionIsTrue() + throws Exception { + try { + // Format again here and to format the non-empty directories in + // journal node. + journal.format(FAKE_NSINFO, true); + } catch (IOException ioe) { + fail("Format should be success with force option."); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 8d587927fb..4cc5968e6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -159,11 +159,11 @@ public void setup() throws Exception { HdfsServerConstants.StartupOption.REGULAR); NamespaceInfo fakeNameSpaceInfo = new NamespaceInfo( 12345, "mycluster", "my-bp"+nsId, 0L); - journal.format(fakeNameSpaceInfo); + journal.format(fakeNameSpaceInfo, false); } } else { journal = jn.getOrCreateJournal(journalId); - journal.format(FAKE_NSINFO); + journal.format(FAKE_NSINFO, false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java index 1de37a4d24..7550c4e9e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java @@ -82,7 +82,7 @@ public void testJournalNodeMXBean() throws Exception { // format the journal ns1 final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster", "my-bp", 0L); - jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO); + jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO, false); // check again after format // getJournalsStatus diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java index 8de9641753..c23604b998 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java @@ -341,7 +341,7 @@ public void testSyncAfterJNformat() throws Exception{ } // Format the JN - journal1.format(nsInfo); + journal1.format(nsInfo, false); // Roll some more edits for (int i = 4; i < 10; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java index 020ecb56aa..edcf9e15a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java @@ -155,7 +155,7 @@ public DummyJournalManager(Configuration conf, URI u, } @Override - public void format(NamespaceInfo nsInfo) throws IOException { + public void format(NamespaceInfo nsInfo, boolean force) throws IOException { formatCalled = true; } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 5c744305c5..0757a66223 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -249,7 +249,7 @@ void assertCounterInRange(Job job, Enum<?> counter, long min, long max) Counter c = job.getCounters().findCounter(counter); long value = c.getValue(); String description = - String.format("%s value %s", c.getDisplayName(), value); + String.format("%s value %s", c.getDisplayName(), value, false); if (min >= 0) { assertTrue(description + " too below minimum " + min,