HDFS-9249. NPE is thrown if an IOException is thrown in NameNode constructor. (Wei-Chiu Chuang via Yongjun Zhang)

This commit is contained in:
Yongjun Zhang 2015-11-09 14:04:03 -08:00
parent 8fbea531d7
commit 2741a2109b
4 changed files with 88 additions and 3 deletions

View File

@ -2277,6 +2277,9 @@ Release 2.8.0 - UNRELEASED
initialization, because HftpFileSystem is missing.
(Mingliang Liu via cnauroth)
HDFS-9249. NPE is thrown if an IOException is thrown in NameNode constructor.
(Wei-Chiu Chuang via Yongjun Zhang)
Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -217,7 +217,9 @@ void stop(boolean reportError) {
// Abort current log segment - otherwise the NN shutdown code
// will close it gracefully, which is incorrect.
if (namesystem != null) {
getFSImage().getEditLog().abortCurrentLogSegment();
}
// Stop name-node threads
super.stop();

View File

@ -889,15 +889,24 @@ protected NameNode(Configuration conf, NamenodeRole role)
haContext.writeUnlock();
}
} catch (IOException e) {
this.stop();
this.stopAtException(e);
throw e;
} catch (HadoopIllegalArgumentException e) {
this.stop();
this.stopAtException(e);
throw e;
}
this.started.set(true);
}
private void stopAtException(Exception e){
try {
this.stop();
} catch (Exception ex) {
LOG.warn("Encountered exception when handling exception ("
+ e.getMessage() + "):", ex);
}
}
protected HAState createHAState(StartupOption startOpt) {
if (!haEnabled || startOpt == StartupOption.UPGRADE
|| startOpt == StartupOption.UPGRADEONLY) {

View File

@ -21,6 +21,7 @@
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
@ -30,7 +31,9 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.directory.api.ldap.aci.UserClass;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
@ -46,6 +49,8 @@
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
@ -128,6 +133,72 @@ void waitCheckpointDone(MiniDFSCluster cluster, long txid) {
Collections.singletonList((int)thisCheckpointTxId));
}
/**
* Regression test for HDFS-9249.
* This test configures the primary name node with SIMPLE authentication,
* and configures the backup node with Kerberose authentication with
* invalid keytab settings.
*
* This configuration causes the backup node to throw a NPE trying to abort
* the edit log.
* */
@Test
public void startBackupNodeWithIncorrectAuthentication() throws IOException {
Configuration c = new HdfsConfiguration();
StartupOption startupOpt = StartupOption.CHECKPOINT;
String dirs = getBackupNodeDir(startupOpt, 1);
c.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:1234");
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "localhost:0");
c.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
c.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
-1); // disable block scanner
c.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
"127.0.0.1:0");
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
"127.0.0.1:0");
NameNode nn;
try {
Configuration nnconf = new HdfsConfiguration(c);
DFSTestUtil.formatNameNode(nnconf);
nn = NameNode.createNameNode(new String[] {}, nnconf);
} catch (IOException e) {
LOG.info("IOException is thrown creating name node");
throw e;
}
c.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
c.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, "");
BackupNode bn = null;
try {
bn = (BackupNode)NameNode.createNameNode(
new String[] {startupOpt.getName()}, c);
assertTrue("Namesystem in BackupNode should be null",
bn.getNamesystem() == null);
fail("Incorrect authentication setting should throw IOException");
} catch (IOException e) {
LOG.info("IOException thrown as expected", e);
} finally {
if (nn != null) {
nn.stop();
}
if (bn != null) {
bn.stop();
}
SecurityUtil.setAuthenticationMethod(
UserGroupInformation.AuthenticationMethod.SIMPLE, c);
// reset security authentication
UserGroupInformation.setConfiguration(c);
}
}
@Test
public void testCheckpointNode() throws Exception {
testCheckpoint(StartupOption.CHECKPOINT);