HDFS-14261. Kerberize JournalNodeSyncer unit test. Contributed by Siyao Meng.
Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
parent
0ceb1b70f3
commit
5c10630ad8
@ -20,6 +20,7 @@
|
|||||||
import com.google.common.base.Supplier;
|
import com.google.common.base.Supplier;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
@ -29,16 +30,25 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
|
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
|
||||||
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
|
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
|
||||||
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
|
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
|
||||||
|
import org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import static org.apache.hadoop.hdfs.server.namenode.FileJournalManager
|
import static org.apache.hadoop.hdfs.server.namenode.FileJournalManager
|
||||||
.getLogFile;
|
.getLogFile;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
|
import org.apache.hadoop.minikdc.MiniKdc;
|
||||||
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
@ -46,6 +56,7 @@
|
|||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Properties;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -62,12 +73,85 @@ public class TestJournalNodeSync {
|
|||||||
private int activeNNindex=0;
|
private int activeNNindex=0;
|
||||||
private static final int DFS_HA_TAILEDITS_PERIOD_SECONDS=1;
|
private static final int DFS_HA_TAILEDITS_PERIOD_SECONDS=1;
|
||||||
|
|
||||||
|
private static HdfsConfiguration baseConf;
|
||||||
|
private static File baseDir;
|
||||||
|
private static String keystoresDir;
|
||||||
|
private static String sslConfDir;
|
||||||
|
private static MiniKdc kdc;
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public TestName testName = new TestName();
|
public TestName testName = new TestName();
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void init() throws Exception {
|
||||||
|
// Init Kerberos
|
||||||
|
baseDir =
|
||||||
|
GenericTestUtils.getTestDir(TestSecureNNWithQJM.class.getSimpleName());
|
||||||
|
FileUtil.fullyDelete(baseDir);
|
||||||
|
Assert.assertTrue(baseDir.mkdirs());
|
||||||
|
|
||||||
|
Properties kdcConf = MiniKdc.createConf();
|
||||||
|
kdc = new MiniKdc(kdcConf, baseDir);
|
||||||
|
kdc.start();
|
||||||
|
|
||||||
|
baseConf = new HdfsConfiguration();
|
||||||
|
SecurityUtil.setAuthenticationMethod(
|
||||||
|
UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
|
||||||
|
UserGroupInformation.setConfiguration(baseConf);
|
||||||
|
Assert.assertTrue("Expected configuration to enable security",
|
||||||
|
UserGroupInformation.isSecurityEnabled());
|
||||||
|
|
||||||
|
String userName = UserGroupInformation.getLoginUser().getShortUserName();
|
||||||
|
File keytabFile = new File(baseDir, userName + ".keytab");
|
||||||
|
String keytab = keytabFile.getAbsolutePath();
|
||||||
|
// Windows will not reverse name lookup "127.0.0.1" to "localhost".
|
||||||
|
String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
|
||||||
|
kdc.createPrincipal(keytabFile,
|
||||||
|
userName + "/" + krbInstance,
|
||||||
|
"HTTP/" + krbInstance);
|
||||||
|
String hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.getRealm();
|
||||||
|
String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm();
|
||||||
|
|
||||||
|
baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
|
||||||
|
baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
|
||||||
|
baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
|
||||||
|
baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
|
||||||
|
baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
|
||||||
|
spnegoPrincipal);
|
||||||
|
baseConf.set(DFS_JOURNALNODE_KEYTAB_FILE_KEY, keytab);
|
||||||
|
baseConf.set(DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
|
||||||
|
baseConf.set(DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
|
||||||
|
spnegoPrincipal);
|
||||||
|
baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
|
||||||
|
baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
|
||||||
|
baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||||
|
baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
|
||||||
|
|
||||||
|
keystoresDir = baseDir.getAbsolutePath();
|
||||||
|
sslConfDir = KeyStoreTestUtil.getClasspathDir(
|
||||||
|
TestSecureNNWithQJM.class);
|
||||||
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
|
||||||
|
baseConf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
baseConf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void destroy() throws Exception {
|
||||||
|
if (kdc != null) {
|
||||||
|
kdc.stop();
|
||||||
|
}
|
||||||
|
FileUtil.fullyDelete(baseDir);
|
||||||
|
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUpMiniCluster() throws IOException {
|
public void setUpMiniCluster() throws Exception {
|
||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration(baseConf);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, true);
|
conf.setBoolean(DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, true);
|
||||||
conf.setLong(DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_KEY, 1000L);
|
conf.setLong(DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_KEY, 1000L);
|
||||||
if (testName.getMethodName().equals(
|
if (testName.getMethodName().equals(
|
||||||
@ -518,6 +602,8 @@ private long generateEditLog() throws IOException {
|
|||||||
* @return the startTxId of next segment after rolling edits.
|
* @return the startTxId of next segment after rolling edits.
|
||||||
*/
|
*/
|
||||||
private long generateEditLog(int numEdits) throws IOException {
|
private long generateEditLog(int numEdits) throws IOException {
|
||||||
|
// rollEditLog first due to OP_UPDATE_MASTER_KEY
|
||||||
|
dfsCluster.getNameNode(activeNNindex).getRpcServer().rollEditLog();
|
||||||
long lastWrittenTxId = dfsCluster.getNameNode(activeNNindex).getFSImage()
|
long lastWrittenTxId = dfsCluster.getNameNode(activeNNindex).getFSImage()
|
||||||
.getEditLog().getLastWrittenTxId();
|
.getEditLog().getLastWrittenTxId();
|
||||||
for (int i = 1; i <= numEdits; i++) {
|
for (int i = 1; i <= numEdits; i++) {
|
||||||
|
Loading…
Reference in New Issue
Block a user