HDFS-3275. Skip format for non-file based directories. Contributed by Amith D K.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1332527 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0e31fba50d
commit
086fa860c0
@ -25,6 +25,7 @@
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
@ -674,10 +675,14 @@ private static boolean format(Configuration conf, boolean force,
|
||||
initializeGenericKeys(conf, nsId, namenodeId);
|
||||
checkAllowFormat(conf);
|
||||
|
||||
Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
|
||||
Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
|
||||
List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
|
||||
List<URI> dirsToPrompt = new ArrayList<URI>();
|
||||
dirsToPrompt.addAll(nameDirsToFormat);
|
||||
dirsToPrompt.addAll(sharedDirs);
|
||||
List<URI> editDirsToFormat =
|
||||
FSNamesystem.getNamespaceEditsDirs(conf);
|
||||
if (!confirmFormat(dirsToFormat, force, isInteractive)) {
|
||||
if (!confirmFormat(dirsToPrompt, force, isInteractive)) {
|
||||
return true; // aborted
|
||||
}
|
||||
|
||||
@ -689,7 +694,7 @@ private static boolean format(Configuration conf, boolean force,
|
||||
}
|
||||
System.out.println("Formatting using clusterid: " + clusterId);
|
||||
|
||||
FSImage fsImage = new FSImage(conf, dirsToFormat, editDirsToFormat);
|
||||
FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
|
||||
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
|
||||
fsImage.format(fsn, clusterId);
|
||||
return false;
|
||||
@ -711,7 +716,18 @@ public static boolean confirmFormat(Collection<URI> dirsToFormat,
|
||||
boolean force, boolean interactive)
|
||||
throws IOException {
|
||||
for(Iterator<URI> it = dirsToFormat.iterator(); it.hasNext();) {
|
||||
File curDir = new File(it.next().getPath());
|
||||
URI dirUri = it.next();
|
||||
if (!dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
|
||||
System.err.println("Skipping format for directory \"" + dirUri
|
||||
+ "\". Can only format local directories with scheme \""
|
||||
+ NNStorage.LOCAL_URI_SCHEME + "\".");
|
||||
continue;
|
||||
}
|
||||
// To validate only file based schemes are formatted
|
||||
assert dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME) :
|
||||
"formatting is not supported for " + dirUri;
|
||||
|
||||
File curDir = new File(dirUri.getPath());
|
||||
// Its alright for a dir not to exist, or to exist (properly accessible)
|
||||
// and be completely empty.
|
||||
if (!curDir.exists() ||
|
||||
|
@ -27,13 +27,19 @@
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
@ -144,4 +150,34 @@ public void testAllowFormat() throws IOException {
|
||||
NameNode.format(config);
|
||||
LOG.info("Done verifying format will succeed with allowformat true");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test to skip format for non file scheme directory configured
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
String logicalName = "mycluster";
|
||||
|
||||
// DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
|
||||
// is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
|
||||
// is considered.
|
||||
String localhost = "127.0.0.1";
|
||||
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
|
||||
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
|
||||
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
||||
|
||||
conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
|
||||
conf.set(DFSUtil.addKeySuffixes(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
|
||||
DummyJournalManager.class.getName());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
|
||||
+ localhost + ":2181/ledgers");
|
||||
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
|
||||
|
||||
// An internal assert is added to verify the working of test
|
||||
NameNode.format(conf);
|
||||
}
|
||||
}
|
||||
|
@ -167,6 +167,15 @@ public static void setFailoverConfigurations(MiniDFSCluster cluster,
|
||||
Configuration conf, String logicalName, int nsIndex) {
|
||||
InetSocketAddress nnAddr1 = cluster.getNameNode(2 * nsIndex).getNameNodeAddress();
|
||||
InetSocketAddress nnAddr2 = cluster.getNameNode(2 * nsIndex + 1).getNameNodeAddress();
|
||||
setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the required configurations for performing failover
|
||||
*/
|
||||
public static void setFailoverConfigurations(Configuration conf,
|
||||
String logicalName, InetSocketAddress nnAddr1,
|
||||
InetSocketAddress nnAddr2) {
|
||||
String nameNodeId1 = "nn1";
|
||||
String nameNodeId2 = "nn2";
|
||||
String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();
|
||||
|
Loading…
Reference in New Issue
Block a user