HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. Contributed by Aaron T. Myers.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1295473 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9318ff4250
commit
0d7de507a5
@ -244,3 +244,5 @@ HDFS-2920. fix remaining TODO items. (atm and todd)
|
|||||||
HDFS-3027. Implement a simple NN health check. (atm)
|
HDFS-3027. Implement a simple NN health check. (atm)
|
||||||
|
|
||||||
HDFS-3023. Optimize entries in edits log for persistBlocks call. (todd)
|
HDFS-3023. Optimize entries in edits log for persistBlocks call. (todd)
|
||||||
|
|
||||||
|
HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. (atm)
|
||||||
|
@ -28,9 +28,11 @@
|
|||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.StringTokenizer;
|
import java.util.StringTokenizer;
|
||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
@ -43,6 +45,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
|
||||||
@ -605,6 +608,68 @@ public String toString() {
|
|||||||
"nnId=" + namenodeId + ";addr=" + addr + "]";
|
"nnId=" + namenodeId + ";addr=" + addr + "]";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a URI for each configured nameservice. If a nameservice is
|
||||||
|
* HA-enabled, then the logical URI of the nameservice is returned. If the
|
||||||
|
* nameservice is not HA-enabled, then a URI corresponding to an RPC address
|
||||||
|
* of the single NN for that nameservice is returned, preferring the service
|
||||||
|
* RPC address over the client RPC address.
|
||||||
|
*
|
||||||
|
* @param conf configuration
|
||||||
|
* @return a collection of all configured NN URIs, preferring service
|
||||||
|
* addresses
|
||||||
|
*/
|
||||||
|
public static Collection<URI> getNsServiceRpcUris(Configuration conf) {
|
||||||
|
return getNameServiceUris(conf,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a URI for each configured nameservice. If a nameservice is
|
||||||
|
* HA-enabled, then the logical URI of the nameservice is returned. If the
|
||||||
|
* nameservice is not HA-enabled, then a URI corresponding to the address of
|
||||||
|
* the single NN for that nameservice is returned.
|
||||||
|
*
|
||||||
|
* @param conf configuration
|
||||||
|
* @param keys configuration keys to try in order to get the URI for non-HA
|
||||||
|
* nameservices
|
||||||
|
* @return a collection of all configured NN URIs
|
||||||
|
*/
|
||||||
|
public static Collection<URI> getNameServiceUris(Configuration conf,
|
||||||
|
String... keys) {
|
||||||
|
Set<URI> ret = new HashSet<URI>();
|
||||||
|
for (String nsId : getNameServiceIds(conf)) {
|
||||||
|
if (HAUtil.isHAEnabled(conf, nsId)) {
|
||||||
|
// Add the logical URI of the nameservice.
|
||||||
|
try {
|
||||||
|
ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId));
|
||||||
|
} catch (URISyntaxException ue) {
|
||||||
|
throw new IllegalArgumentException(ue);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Add the URI corresponding to the address of the NN.
|
||||||
|
for (String key : keys) {
|
||||||
|
String addr = conf.get(concatSuffixes(key, nsId));
|
||||||
|
if (addr != null) {
|
||||||
|
ret.add(createUri(HdfsConstants.HDFS_URI_SCHEME,
|
||||||
|
NetUtils.createSocketAddr(addr)));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add the generic configuration keys.
|
||||||
|
for (String key : keys) {
|
||||||
|
String addr = conf.get(key);
|
||||||
|
if (addr != null) {
|
||||||
|
ret.add(createUri("hdfs", NetUtils.createSocketAddr(addr)));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given the InetSocketAddress this method returns the nameservice Id
|
* Given the InetSocketAddress this method returns the nameservice Id
|
||||||
|
@ -24,8 +24,8 @@
|
|||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
import java.io.DataOutputStream;
|
import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
import java.net.URI;
|
||||||
import java.text.DateFormat;
|
import java.text.DateFormat;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
@ -39,7 +39,6 @@
|
|||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
@ -1380,8 +1379,7 @@ private ReturnStatus run(int iteration, Formatter formatter) {
|
|||||||
* for each namenode,
|
* for each namenode,
|
||||||
* execute a {@link Balancer} to work through all datanodes once.
|
* execute a {@link Balancer} to work through all datanodes once.
|
||||||
*/
|
*/
|
||||||
static int run(Map<String, Map<String, InetSocketAddress>> namenodes,
|
static int run(Collection<URI> namenodes, final Parameters p,
|
||||||
final Parameters p,
|
|
||||||
Configuration conf) throws IOException, InterruptedException {
|
Configuration conf) throws IOException, InterruptedException {
|
||||||
final long sleeptime = 2000*conf.getLong(
|
final long sleeptime = 2000*conf.getLong(
|
||||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||||
@ -1395,10 +1393,8 @@ static int run(Map<String, Map<String, InetSocketAddress>> namenodes,
|
|||||||
final List<NameNodeConnector> connectors
|
final List<NameNodeConnector> connectors
|
||||||
= new ArrayList<NameNodeConnector>(namenodes.size());
|
= new ArrayList<NameNodeConnector>(namenodes.size());
|
||||||
try {
|
try {
|
||||||
for(Entry<String, Map<String, InetSocketAddress>> entry :
|
for (URI uri : namenodes) {
|
||||||
namenodes.entrySet()) {
|
connectors.add(new NameNodeConnector(uri, conf));
|
||||||
connectors.add(
|
|
||||||
new NameNodeConnector(entry.getValue().values(), conf));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean done = false;
|
boolean done = false;
|
||||||
@ -1480,8 +1476,7 @@ public int run(String[] args) {
|
|||||||
try {
|
try {
|
||||||
checkReplicationPolicyCompatibility(conf);
|
checkReplicationPolicyCompatibility(conf);
|
||||||
|
|
||||||
final Map<String, Map<String, InetSocketAddress>> namenodes =
|
final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
|
||||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
|
||||||
return Balancer.run(namenodes, parse(args), conf);
|
return Balancer.run(namenodes, parse(args), conf);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
System.out.println(e + ". Exiting ...");
|
System.out.println(e + ". Exiting ...");
|
||||||
|
@ -21,9 +21,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
@ -38,7 +36,6 @@
|
|||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
@ -46,8 +43,6 @@
|
|||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The class provides utilities for {@link Balancer} to access a NameNode
|
* The class provides utilities for {@link Balancer} to access a NameNode
|
||||||
*/
|
*/
|
||||||
@ -56,7 +51,7 @@ class NameNodeConnector {
|
|||||||
private static final Log LOG = Balancer.LOG;
|
private static final Log LOG = Balancer.LOG;
|
||||||
private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
|
private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
|
||||||
|
|
||||||
final InetSocketAddress namenodeAddress;
|
final URI nameNodeUri;
|
||||||
final String blockpoolID;
|
final String blockpoolID;
|
||||||
|
|
||||||
final NamenodeProtocol namenode;
|
final NamenodeProtocol namenode;
|
||||||
@ -70,10 +65,9 @@ class NameNodeConnector {
|
|||||||
private BlockTokenSecretManager blockTokenSecretManager;
|
private BlockTokenSecretManager blockTokenSecretManager;
|
||||||
private Daemon keyupdaterthread; // AccessKeyUpdater thread
|
private Daemon keyupdaterthread; // AccessKeyUpdater thread
|
||||||
|
|
||||||
NameNodeConnector(Collection<InetSocketAddress> haNNs,
|
NameNodeConnector(URI nameNodeUri,
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
this.namenodeAddress = Lists.newArrayList(haNNs).get(0);
|
this.nameNodeUri = nameNodeUri;
|
||||||
URI nameNodeUri = NameNode.getUri(this.namenodeAddress);
|
|
||||||
|
|
||||||
this.namenode =
|
this.namenode =
|
||||||
NameNodeProxies.createProxy(conf, nameNodeUri, NamenodeProtocol.class)
|
NameNodeProxies.createProxy(conf, nameNodeUri, NamenodeProtocol.class)
|
||||||
@ -186,7 +180,7 @@ void close() {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return getClass().getSimpleName() + "[namenodeAddress=" + namenodeAddress
|
return getClass().getSimpleName() + "[namenodeUri=" + nameNodeUri
|
||||||
+ ", id=" + blockpoolID
|
+ ", id=" + blockpoolID
|
||||||
+ "]";
|
+ "]";
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
@ -38,9 +40,11 @@
|
|||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
@ -52,6 +56,7 @@
|
|||||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
|
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
@ -74,6 +79,8 @@
|
|||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
|
import com.google.common.base.Joiner;
|
||||||
|
|
||||||
/** Utilities for HDFS tests */
|
/** Utilities for HDFS tests */
|
||||||
public class DFSTestUtil {
|
public class DFSTestUtil {
|
||||||
|
|
||||||
@ -681,4 +688,21 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
|
|||||||
|
|
||||||
return BlockOpResponseProto.parseDelimitedFrom(in);
|
return BlockOpResponseProto.parseDelimitedFrom(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void setFederatedConfiguration(MiniDFSCluster cluster,
|
||||||
|
Configuration conf) {
|
||||||
|
Set<String> nameservices = new HashSet<String>();
|
||||||
|
for (NameNodeInfo info : cluster.getNameNodeInfos()) {
|
||||||
|
assert info.nameserviceId != null;
|
||||||
|
nameservices.add(info.nameserviceId);
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
|
||||||
|
info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
|
||||||
|
info.nameNode.getNameNodeAddress()).toString());
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||||
|
info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
|
||||||
|
info.nameNode.getNameNodeAddress()).toString());
|
||||||
|
}
|
||||||
|
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
|
||||||
|
.join(nameservices));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -316,8 +316,13 @@ public class DataNodeProperties {
|
|||||||
static class NameNodeInfo {
|
static class NameNodeInfo {
|
||||||
final NameNode nameNode;
|
final NameNode nameNode;
|
||||||
final Configuration conf;
|
final Configuration conf;
|
||||||
NameNodeInfo(NameNode nn, Configuration conf) {
|
final String nameserviceId;
|
||||||
|
final String nnId;
|
||||||
|
NameNodeInfo(NameNode nn, String nameserviceId, String nnId,
|
||||||
|
Configuration conf) {
|
||||||
this.nameNode = nn;
|
this.nameNode = nn;
|
||||||
|
this.nameserviceId = nameserviceId;
|
||||||
|
this.nnId = nnId;
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -674,6 +679,10 @@ public static URI formatSharedEditsDir(File baseDir, int minNN, int maxNN)
|
|||||||
return fileAsURI(new File(baseDir, "shared-edits-" +
|
return fileAsURI(new File(baseDir, "shared-edits-" +
|
||||||
minNN + "-through-" + maxNN));
|
minNN + "-through-" + maxNN));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public NameNodeInfo[] getNameNodeInfos() {
|
||||||
|
return this.nameNodes;
|
||||||
|
}
|
||||||
|
|
||||||
private void initNameNodeConf(Configuration conf,
|
private void initNameNodeConf(Configuration conf,
|
||||||
String nameserviceId, String nnId,
|
String nameserviceId, String nnId,
|
||||||
@ -763,7 +772,8 @@ private void createNameNode(int nnIndex, Configuration conf,
|
|||||||
.getHostPortString(nn.getHttpAddress()));
|
.getHostPortString(nn.getHttpAddress()));
|
||||||
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
|
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
|
||||||
DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||||
nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
|
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
|
||||||
|
new Configuration(conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1264,7 +1274,7 @@ public synchronized void shutdownNameNode(int nnIndex) {
|
|||||||
nn.stop();
|
nn.stop();
|
||||||
nn.join();
|
nn.join();
|
||||||
Configuration conf = nameNodes[nnIndex].conf;
|
Configuration conf = nameNodes[nnIndex].conf;
|
||||||
nameNodes[nnIndex] = new NameNodeInfo(null, conf);
|
nameNodes[nnIndex] = new NameNodeInfo(null, null, null, conf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1307,10 +1317,12 @@ public synchronized void restartNameNode(int nnIndex) throws IOException {
|
|||||||
*/
|
*/
|
||||||
public synchronized void restartNameNode(int nnIndex, boolean waitActive)
|
public synchronized void restartNameNode(int nnIndex, boolean waitActive)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
String nameserviceId = nameNodes[nnIndex].nameserviceId;
|
||||||
|
String nnId = nameNodes[nnIndex].nnId;
|
||||||
Configuration conf = nameNodes[nnIndex].conf;
|
Configuration conf = nameNodes[nnIndex].conf;
|
||||||
shutdownNameNode(nnIndex);
|
shutdownNameNode(nnIndex);
|
||||||
NameNode nn = NameNode.createNameNode(new String[] {}, conf);
|
NameNode nn = NameNode.createNameNode(new String[] {}, conf);
|
||||||
nameNodes[nnIndex] = new NameNodeInfo(nn, conf);
|
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, conf);
|
||||||
if (waitActive) {
|
if (waitActive) {
|
||||||
waitClusterUp();
|
waitClusterUp();
|
||||||
LOG.info("Restarted the namenode");
|
LOG.info("Restarted the namenode");
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
@ -41,6 +43,8 @@
|
|||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
|
|
||||||
public class TestDFSUtil {
|
public class TestDFSUtil {
|
||||||
@ -233,11 +237,12 @@ public void checkNameServiceId(Configuration conf, String addr,
|
|||||||
* {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
|
* {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testSingleNamenode() {
|
public void testSingleNamenode() throws URISyntaxException {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
final String DEFAULT_ADDRESS = "localhost:9000";
|
final String DEFAULT_ADDRESS = "localhost:9000";
|
||||||
final String NN2_ADDRESS = "localhost:9001";
|
final String NN2_ADDRESS = "localhost:9001";
|
||||||
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
|
||||||
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DEFAULT_ADDRESS);
|
||||||
|
|
||||||
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
|
InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
|
||||||
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
|
boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
|
||||||
@ -247,6 +252,10 @@ public void testSingleNamenode() {
|
|||||||
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
|
isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
|
||||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||||
assertFalse(isDefault);
|
assertFalse(isDefault);
|
||||||
|
|
||||||
|
Collection<URI> uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||||
|
assertEquals(1, uris.size());
|
||||||
|
assertTrue(uris.contains(new URI("hdfs://" + DEFAULT_ADDRESS)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Tests to ensure default namenode is used as fallback */
|
/** Tests to ensure default namenode is used as fallback */
|
||||||
@ -407,13 +416,14 @@ public void testGetServerInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHANameNodesWithFederation() {
|
public void testHANameNodesWithFederation() throws URISyntaxException {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
||||||
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
|
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
|
||||||
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
|
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
|
||||||
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
|
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
|
||||||
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
|
||||||
|
|
||||||
// Two nameservices, each with two NNs.
|
// Two nameservices, each with two NNs.
|
||||||
conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
|
conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
|
||||||
@ -460,6 +470,11 @@ public void testHANameNodesWithFederation() {
|
|||||||
// Ditto for nameservice IDs, if multiple are defined
|
// Ditto for nameservice IDs, if multiple are defined
|
||||||
assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf));
|
assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf));
|
||||||
assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf));
|
assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf));
|
||||||
|
|
||||||
|
Collection<URI> uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||||
|
assertEquals(2, uris.size());
|
||||||
|
assertTrue(uris.contains(new URI("hdfs://ns1")));
|
||||||
|
assertTrue(uris.contains(new URI("hdfs://ns2")));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -509,4 +524,34 @@ public void testSubstituteForWildcardAddress() throws IOException {
|
|||||||
assertEquals("127.0.0.1:12345",
|
assertEquals("127.0.0.1:12345",
|
||||||
DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo"));
|
DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetNNUris() throws Exception {
|
||||||
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
|
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
||||||
|
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
|
||||||
|
final String NS2_NN_HOST = "ns2-nn.example.com:8020";
|
||||||
|
final String NN_HOST = "nn.example.com:8020";
|
||||||
|
|
||||||
|
conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST);
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST);
|
||||||
|
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"),
|
||||||
|
NS2_NN_HOST);
|
||||||
|
|
||||||
|
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN_HOST);
|
||||||
|
|
||||||
|
Collection<URI> uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
|
||||||
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
|
||||||
|
|
||||||
|
assertEquals(3, uris.size());
|
||||||
|
assertTrue(uris.contains(new URI("hdfs://ns1")));
|
||||||
|
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_HOST)));
|
||||||
|
assertTrue(uris.contains(new URI("hdfs://" + NN_HOST)));
|
||||||
|
}
|
||||||
}
|
}
|
@ -18,11 +18,11 @@
|
|||||||
package org.apache.hadoop.hdfs.server.balancer;
|
package org.apache.hadoop.hdfs.server.balancer;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
@ -338,8 +338,7 @@ private void runBalancer(Configuration conf,
|
|||||||
waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
|
waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
|
||||||
|
|
||||||
// start rebalancing
|
// start rebalancing
|
||||||
Map<String, Map<String, InetSocketAddress>> namenodes =
|
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
|
||||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
|
||||||
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
|
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
|
||||||
assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
||||||
|
|
||||||
|
@ -18,9 +18,10 @@
|
|||||||
package org.apache.hadoop.hdfs.server.balancer;
|
package org.apache.hadoop.hdfs.server.balancer;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.URI;
|
||||||
import java.util.Map;
|
import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
@ -67,12 +68,13 @@ public void testBalancerWithHANameNodes() throws Exception {
|
|||||||
int numOfDatanodes = capacities.length;
|
int numOfDatanodes = capacities.length;
|
||||||
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
|
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
|
||||||
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
|
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
|
||||||
MiniDFSNNTopology simpleHATopology = new MiniDFSNNTopology()
|
Configuration copiedConf = new Configuration(conf);
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf(null).addNN(nn1Conf)
|
cluster = new MiniDFSCluster.Builder(copiedConf)
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2")));
|
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||||
cluster = new MiniDFSCluster.Builder(conf).nnTopology(simpleHATopology)
|
.numDataNodes(capacities.length)
|
||||||
.numDataNodes(capacities.length).racks(racks).simulatedCapacities(
|
.racks(racks)
|
||||||
capacities).build();
|
.simulatedCapacities(capacities)
|
||||||
|
.build();
|
||||||
HATestUtil.setFailoverConfigurations(cluster, conf);
|
HATestUtil.setFailoverConfigurations(cluster, conf);
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
@ -89,14 +91,12 @@ public void testBalancerWithHANameNodes() throws Exception {
|
|||||||
// start up an empty node with the same capacity and on the same rack
|
// start up an empty node with the same capacity and on the same rack
|
||||||
cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
|
cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
|
||||||
new long[] { newNodeCapacity });
|
new long[] { newNodeCapacity });
|
||||||
|
|
||||||
HATestUtil.setFailoverConfigurations(cluster, conf, NameNode.getUri(
|
|
||||||
cluster.getNameNode(0).getNameNodeAddress()).getHost());
|
|
||||||
totalCapacity += newNodeCapacity;
|
totalCapacity += newNodeCapacity;
|
||||||
TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
|
TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
|
||||||
cluster);
|
cluster);
|
||||||
Map<String, Map<String, InetSocketAddress>> namenodes = DFSUtil
|
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
|
||||||
.getNNServiceRpcAddresses(conf);
|
assertEquals(1, namenodes.size());
|
||||||
|
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
|
||||||
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
|
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
|
||||||
assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
||||||
TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
|
TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
|
||||||
|
@ -18,10 +18,10 @@
|
|||||||
package org.apache.hadoop.hdfs.server.balancer;
|
package org.apache.hadoop.hdfs.server.balancer;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.URI;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
@ -40,8 +40,8 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
@ -157,8 +157,7 @@ static void runBalancer(Suite s,
|
|||||||
LOG.info("BALANCER 1");
|
LOG.info("BALANCER 1");
|
||||||
|
|
||||||
// start rebalancing
|
// start rebalancing
|
||||||
final Map<String, Map<String, InetSocketAddress>> namenodes =
|
final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(s.conf);
|
||||||
DFSUtil.getNNServiceRpcAddresses(s.conf);
|
|
||||||
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf);
|
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf);
|
||||||
Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
||||||
|
|
||||||
@ -252,7 +251,8 @@ private void unevenDistribution(final int nNameNodes,
|
|||||||
final ExtendedBlock[][] blocks;
|
final ExtendedBlock[][] blocks;
|
||||||
{
|
{
|
||||||
LOG.info("UNEVEN 1");
|
LOG.info("UNEVEN 1");
|
||||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
final MiniDFSCluster cluster = new MiniDFSCluster
|
||||||
|
.Builder(new Configuration(conf))
|
||||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
|
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
|
||||||
.numDataNodes(nDataNodes)
|
.numDataNodes(nDataNodes)
|
||||||
.racks(racks)
|
.racks(racks)
|
||||||
@ -261,6 +261,7 @@ private void unevenDistribution(final int nNameNodes,
|
|||||||
LOG.info("UNEVEN 2");
|
LOG.info("UNEVEN 2");
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
DFSTestUtil.setFederatedConfiguration(cluster, conf);
|
||||||
LOG.info("UNEVEN 3");
|
LOG.info("UNEVEN 3");
|
||||||
final Suite s = new Suite(cluster, nNameNodes, nDataNodes, conf);
|
final Suite s = new Suite(cluster, nNameNodes, nDataNodes, conf);
|
||||||
blocks = generateBlocks(s, usedSpacePerNN);
|
blocks = generateBlocks(s, usedSpacePerNN);
|
||||||
@ -327,13 +328,15 @@ private void runTest(final int nNameNodes, long[] capacities, String[] racks,
|
|||||||
Assert.assertEquals(nDataNodes, racks.length);
|
Assert.assertEquals(nDataNodes, racks.length);
|
||||||
|
|
||||||
LOG.info("RUN_TEST -1");
|
LOG.info("RUN_TEST -1");
|
||||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
final MiniDFSCluster cluster = new MiniDFSCluster
|
||||||
|
.Builder(new Configuration(conf))
|
||||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
|
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
|
||||||
.numDataNodes(nDataNodes)
|
.numDataNodes(nDataNodes)
|
||||||
.racks(racks)
|
.racks(racks)
|
||||||
.simulatedCapacities(capacities)
|
.simulatedCapacities(capacities)
|
||||||
.build();
|
.build();
|
||||||
LOG.info("RUN_TEST 0");
|
LOG.info("RUN_TEST 0");
|
||||||
|
DFSTestUtil.setFederatedConfiguration(cluster, conf);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||||
@ -188,6 +189,12 @@ public static String getLogicalHostname(MiniDFSCluster cluster) {
|
|||||||
return String.format(LOGICAL_HOSTNAME, cluster.getInstanceId());
|
return String.format(LOGICAL_HOSTNAME, cluster.getInstanceId());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static URI getLogicalUri(MiniDFSCluster cluster)
|
||||||
|
throws URISyntaxException {
|
||||||
|
return new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
|
||||||
|
getLogicalHostname(cluster));
|
||||||
|
}
|
||||||
|
|
||||||
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
|
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
|
||||||
List<Integer> txids) throws InterruptedException {
|
List<Integer> txids) throws InterruptedException {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
|
Loading…
Reference in New Issue
Block a user