HDFS-2355. Federation: enable using the same configuration file across all the nodes in the cluster. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1177100 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c5179b16ec
commit
e9dd78d9fe
@ -39,7 +39,7 @@ Trunk (unreleased changes)
|
||||
via szetszwo)
|
||||
|
||||
HDFS-2351 Change Namenode and Datanode to register each of their protocols
|
||||
seperately (Sanjay Radia)
|
||||
seperately. (Sanjay Radia)
|
||||
|
||||
HDFS-2356. Support case insensitive query parameter names in webhdfs.
|
||||
(szetszwo)
|
||||
@ -47,6 +47,9 @@ Trunk (unreleased changes)
|
||||
HDFS-2368. Move SPNEGO conf properties from hdfs-default.xml to
|
||||
hdfs-site.xml. (szetszwo)
|
||||
|
||||
HDFS-2355. Federation: enable using the same configuration file across
|
||||
all the nodes in the cluster. (suresh)
|
||||
|
||||
BUG FIXES
|
||||
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
@ -576,17 +577,6 @@ public static void setGenericConf(Configuration conf,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the configured nameservice Id
|
||||
*
|
||||
* @param conf
|
||||
* Configuration object to lookup the nameserviceId
|
||||
* @return nameserviceId string from conf
|
||||
*/
|
||||
public static String getNameServiceId(Configuration conf) {
|
||||
return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
|
||||
}
|
||||
|
||||
/** Return used as percentage of capacity */
|
||||
public static float getPercentUsed(long used, long capacity) {
|
||||
return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity;
|
||||
@ -696,4 +686,77 @@ public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
|
||||
ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle,
|
||||
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the {@link NameNode} based on namenode RPC address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getNamenodeNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the BackupNode based on backup node RPC address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getBackupNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name service Id for the secondary node based on secondary http address
|
||||
* matching the local node address.
|
||||
*/
|
||||
public static String getSecondaryNameServiceId(Configuration conf) {
|
||||
return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the nameservice Id by matching the {@code addressKey} with the
|
||||
* the address of the local node.
|
||||
*
|
||||
* If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
|
||||
* configured, this method determines the nameservice Id by matching the local
|
||||
* nodes address with the configured addresses. When a match is found, it
|
||||
* returns the nameservice Id from the corresponding configuration key.
|
||||
*
|
||||
* @param conf Configuration
|
||||
* @param addressKey configuration key to get the address.
|
||||
* @return name service Id on success, null on failure.
|
||||
* @throws HadoopIllegalArgumentException on error
|
||||
*/
|
||||
private static String getNameServiceId(Configuration conf, String addressKey) {
|
||||
String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
|
||||
if (nameserviceId != null) {
|
||||
return nameserviceId;
|
||||
}
|
||||
|
||||
Collection<String> ids = getNameServiceIds(conf);
|
||||
if (ids == null || ids.size() == 0) {
|
||||
// Not federation configuration, hence no nameservice Id
|
||||
return null;
|
||||
}
|
||||
|
||||
// Match the rpc address with that of local address
|
||||
int found = 0;
|
||||
for (String id : ids) {
|
||||
String addr = conf.get(getNameServiceIdKey(addressKey, id));
|
||||
InetSocketAddress s = NetUtils.createSocketAddr(addr);
|
||||
if (NetUtils.isLocalAddress(s.getAddress())) {
|
||||
nameserviceId = id;
|
||||
found++;
|
||||
}
|
||||
}
|
||||
if (found > 1) { // Only one address must match the local address
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Configuration has multiple RPC addresses that matches "
|
||||
+ "the local node's address. Please configure the system with "
|
||||
+ "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
|
||||
}
|
||||
if (found == 0) {
|
||||
throw new HadoopIllegalArgumentException("Configuration address "
|
||||
+ addressKey + " is missing in configuration with name service Id");
|
||||
}
|
||||
return nameserviceId;
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
@ -388,4 +389,9 @@ String getBlockPoolId() {
|
||||
String getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getNameServiceId(Configuration conf) {
|
||||
return DFSUtil.getBackupNameServiceId(conf);
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,7 @@
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
@ -453,11 +454,14 @@ protected NameNode(Configuration conf, NamenodeRole role)
|
||||
throws IOException {
|
||||
this.role = role;
|
||||
try {
|
||||
initializeGenericKeys(conf);
|
||||
initializeGenericKeys(conf, getNameServiceId(conf));
|
||||
initialize(conf);
|
||||
} catch (IOException e) {
|
||||
this.stop();
|
||||
throw e;
|
||||
} catch (HadoopIllegalArgumentException e) {
|
||||
this.stop();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@ -762,16 +766,16 @@ public static NameNode createNameNode(String argv[], Configuration conf)
|
||||
* @param conf
|
||||
* Configuration object to lookup specific key and to set the value
|
||||
* to the key passed. Note the conf object is modified
|
||||
* @param nameserviceId name service Id
|
||||
* @see DFSUtil#setGenericConf(Configuration, String, String...)
|
||||
*/
|
||||
public static void initializeGenericKeys(Configuration conf) {
|
||||
final String nameserviceId = DFSUtil.getNameServiceId(conf);
|
||||
public static void initializeGenericKeys(Configuration conf, String
|
||||
nameserviceId) {
|
||||
if ((nameserviceId == null) || nameserviceId.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
|
||||
|
||||
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
|
||||
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
|
||||
@ -779,6 +783,14 @@ public static void initializeGenericKeys(Configuration conf) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the name service Id for the node
|
||||
* @return name service Id or null if federation is not configured
|
||||
*/
|
||||
protected String getNameServiceId(Configuration conf) {
|
||||
return DFSUtil.getNamenodeNameServiceId(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public static void main(String argv[]) throws Exception {
|
||||
@ -792,5 +804,4 @@ public static void main(String argv[]) throws Exception {
|
||||
System.exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,10 +38,12 @@
|
||||
import org.apache.commons.cli.PosixParser;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
@ -173,12 +175,17 @@ public SecondaryNameNode(Configuration conf) throws IOException {
|
||||
public SecondaryNameNode(Configuration conf,
|
||||
CommandLineOpts commandLineOpts) throws IOException {
|
||||
try {
|
||||
NameNode.initializeGenericKeys(conf);
|
||||
NameNode.initializeGenericKeys(conf,
|
||||
DFSUtil.getSecondaryNameServiceId(conf));
|
||||
initialize(conf, commandLineOpts);
|
||||
} catch(IOException e) {
|
||||
shutdown();
|
||||
LOG.fatal("Failed to start secondary namenode. ", e);
|
||||
throw e;
|
||||
} catch(HadoopIllegalArgumentException e) {
|
||||
shutdown();
|
||||
LOG.fatal("Failed to start secondary namenode. ", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,7 @@
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
@ -40,8 +39,7 @@
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
public class TestDFSUtil {
|
||||
/**
|
||||
@ -76,79 +74,141 @@ public void testLocatedBlocks2Locations() {
|
||||
}
|
||||
}
|
||||
|
||||
assertTrue("expected 1 corrupt files but got " + corruptCount,
|
||||
corruptCount == 1);
|
||||
|
||||
assertTrue("expected 1 corrupt files but got " + corruptCount,
|
||||
corruptCount == 1);
|
||||
|
||||
// test an empty location
|
||||
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
|
||||
assertEquals(0, bs.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for
|
||||
* {@link DFSUtil#getNameServiceIds(Configuration)}
|
||||
* {@link DFSUtil#getNameServiceId(Configuration)}
|
||||
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
|
||||
private Configuration setupAddress(String key) {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId from the configuration returned
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleNamenodes() throws IOException {
|
||||
public void getNameServiceId() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
|
||||
// Test - The configured nameserviceIds are returned
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for namenode is determined based on matching the address with
|
||||
* local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getNameNodeNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for backup node is determined based on matching the address
|
||||
* with local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getBackupNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
|
||||
* nameserviceId for backup node is determined based on matching the address
|
||||
* with local node's address
|
||||
*/
|
||||
@Test
|
||||
public void getSecondaryNameServiceId() {
|
||||
Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||
assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
|
||||
* exception is thrown when multiple rpc addresses match the local node's
|
||||
* address
|
||||
*/
|
||||
@Test(expected = HadoopIllegalArgumentException.class)
|
||||
public void testGetNameServiceIdException() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
||||
"localhost:9000");
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||
"localhost:9001");
|
||||
DFSUtil.getNamenodeNameServiceId(conf);
|
||||
fail("Expected exception is not thrown");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test {@link DFSUtil#getNameServiceIds(Configuration)}
|
||||
*/
|
||||
@Test
|
||||
public void testGetNameServiceIds() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
|
||||
Iterator<String> it = nameserviceIds.iterator();
|
||||
assertEquals(2, nameserviceIds.size());
|
||||
assertEquals("nn1", it.next().toString());
|
||||
assertEquals("nn2", it.next().toString());
|
||||
|
||||
// Tests default nameserviceId is returned
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
assertEquals("nn1", DFSUtil.getNameServiceId(conf));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
* {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
|
||||
* (Configuration)}
|
||||
*/
|
||||
@Test
|
||||
public void testMultipleNamenodes() throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
|
||||
// Test - configured list of namenodes are returned
|
||||
final String NN1_ADDRESS = "localhost:9000";
|
||||
final String NN2_ADDRESS = "localhost:9001";
|
||||
final String NN3_ADDRESS = "localhost:9002";
|
||||
conf.set(DFSUtil.getNameServiceIdKey(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
|
||||
|
||||
Collection<InetSocketAddress> nnAddresses =
|
||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
||||
NN1_ADDRESS);
|
||||
conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||
NN2_ADDRESS);
|
||||
|
||||
Collection<InetSocketAddress> nnAddresses = DFSUtil
|
||||
.getNNServiceRpcAddresses(conf);
|
||||
assertEquals(2, nnAddresses.size());
|
||||
Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
|
||||
assertEquals(2, nameserviceIds.size());
|
||||
InetSocketAddress addr = iterator.next();
|
||||
assertEquals("localhost", addr.getHostName());
|
||||
assertEquals(9000, addr.getPort());
|
||||
addr = iterator.next();
|
||||
assertEquals("localhost", addr.getHostName());
|
||||
assertEquals(9001, addr.getPort());
|
||||
|
||||
|
||||
// Test - can look up nameservice ID from service address
|
||||
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
|
||||
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress1,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn1", nameserviceId);
|
||||
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
|
||||
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress2,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals("nn2", nameserviceId);
|
||||
InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
|
||||
nameserviceId = DFSUtil.getNameServiceIdFromAddress(
|
||||
conf, testAddress3,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertNull(nameserviceId);
|
||||
checkNameServiceId(conf, NN1_ADDRESS, "nn1");
|
||||
checkNameServiceId(conf, NN2_ADDRESS, "nn2");
|
||||
checkNameServiceId(conf, NN3_ADDRESS, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
public void checkNameServiceId(Configuration conf, String addr,
|
||||
String expectedNameServiceId) {
|
||||
InetSocketAddress s = NetUtils.createSocketAddr(addr);
|
||||
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertEquals(expectedNameServiceId, nameserviceId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for
|
||||
* {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
|
||||
*/
|
||||
@ -157,27 +217,25 @@ public void testSingleNamenode() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
final String DEFAULT_ADDRESS = "localhost:9000";
|
||||
final String NN2_ADDRESS = "localhost:9001";
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
||||
|
||||
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
||||
|
||||
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
|
||||
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertTrue(isDefault);
|
||||
InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
|
||||
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
|
||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
assertFalse(isDefault);
|
||||
}
|
||||
|
||||
|
||||
/** Tests to ensure default namenode is used as fallback */
|
||||
@Test
|
||||
public void testDefaultNamenode() throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
final String hdfs_default = "hdfs://localhost:9999/";
|
||||
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
|
||||
// If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that
|
||||
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
|
||||
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
|
||||
// default namenode address is returned.
|
||||
List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
assertEquals(1, addrList.size());
|
||||
@ -191,26 +249,26 @@ public void testDefaultNamenode() throws IOException {
|
||||
@Test
|
||||
public void testConfModification() throws IOException {
|
||||
final HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
final String nameserviceId = DFSUtil.getNameServiceId(conf);
|
||||
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
|
||||
final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
|
||||
|
||||
// Set the nameservice specific keys with nameserviceId in the config key
|
||||
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
|
||||
// Note: value is same as the key
|
||||
conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
|
||||
}
|
||||
|
||||
|
||||
// Initialize generic keys from specific keys
|
||||
NameNode.initializeGenericKeys(conf);
|
||||
|
||||
NameNode.initializeGenericKeys(conf, nameserviceId);
|
||||
|
||||
// Retrieve the keys without nameserviceId and Ensure generic keys are set
|
||||
// to the correct value
|
||||
for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
|
||||
assertEquals(key, conf.get(key));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Tests for empty configuration, an exception is thrown from
|
||||
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
||||
@ -238,16 +296,16 @@ public void testEmptyConf() {
|
||||
} catch (IOException expected) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testGetServerInfo(){
|
||||
public void testGetServerInfo() {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
String httpsport = DFSUtil.getInfoServer(null, conf, true);
|
||||
Assert.assertEquals("0.0.0.0:50470", httpsport);
|
||||
assertEquals("0.0.0.0:50470", httpsport);
|
||||
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
||||
Assert.assertEquals("0.0.0.0:50070", httpport);
|
||||
assertEquals("0.0.0.0:50070", httpport);
|
||||
}
|
||||
|
||||
}
|
@ -96,7 +96,8 @@ public void testBlockScannerAfterRefresh() throws IOException,
|
||||
|
||||
String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
|
||||
String nsId = DFSUtil.getNamenodeNameServiceId(cluster
|
||||
.getConfiguration(i));
|
||||
namenodesBuilder.append(nsId);
|
||||
namenodesBuilder.append(",");
|
||||
}
|
||||
@ -116,7 +117,7 @@ public void testBlockScannerAfterRefresh() throws IOException,
|
||||
LOG.info(ex.getMessage());
|
||||
}
|
||||
|
||||
namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
|
||||
namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
|
||||
.getConfiguration(2)));
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
|
||||
.toString());
|
||||
|
Loading…
Reference in New Issue
Block a user