HDFS-3208. Bogus entries in hosts files are incorrectly displayed in the report. Contributed by Eli Collins
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1310138 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
112c324151
commit
e505b7e704
@ -433,6 +433,9 @@ Release 2.0.0 - UNRELEASED
|
|||||||
HDFS-3210. JsonUtil#toJsonMap for for a DatanodeInfo should use
|
HDFS-3210. JsonUtil#toJsonMap for for a DatanodeInfo should use
|
||||||
"ipAddr" instead of "name". (eli)
|
"ipAddr" instead of "name". (eli)
|
||||||
|
|
||||||
|
HDFS-3208. Bogus entries in hosts files are incorrectly displayed
|
||||||
|
in the report. (eli)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||||
|
|
||||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||||
|
@ -211,7 +211,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final String DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
|
public static final String DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
|
||||||
public static final long DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
|
public static final long DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
|
||||||
public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
|
public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
|
||||||
public static final String DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:50010";
|
public static final int DFS_DATANODE_DEFAULT_PORT = 50010;
|
||||||
|
public static final String DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_DEFAULT_PORT;
|
||||||
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_KEY = "dfs.datanode.data.dir.perm";
|
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_KEY = "dfs.datanode.data.dir.perm";
|
||||||
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT = "700";
|
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT = "700";
|
||||||
public static final String DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY = "dfs.datanode.directoryscan.interval";
|
public static final String DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY = "dfs.datanode.directoryscan.interval";
|
||||||
@ -227,7 +228,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final String DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
|
public static final String DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
|
||||||
public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 3;
|
public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 3;
|
||||||
public static final String DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
|
public static final String DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
|
||||||
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50075";
|
public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
|
||||||
|
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
|
||||||
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
|
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
|
||||||
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
|
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
|
||||||
public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
|
public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
|
||||||
@ -251,13 +253,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final String DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
|
public static final String DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
|
||||||
public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
|
public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
|
||||||
public static final String DFS_HTTPS_PORT_KEY = "dfs.https.port";
|
public static final String DFS_HTTPS_PORT_KEY = "dfs.https.port";
|
||||||
public static final int DFS_HTTPS_PORT_DEFAULT = 50470;
|
|
||||||
public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";
|
public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";
|
||||||
public static final int DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
|
public static final int DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
|
||||||
public static final String DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";
|
public static final String DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";
|
||||||
public static final String DFS_DATANODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50475";
|
public static final String DFS_DATANODE_HTTPS_PORT_KEY = "datanode.https.port";
|
||||||
|
public static final int DFS_DATANODE_HTTPS_DEFAULT_PORT = 50475;
|
||||||
|
public static final String DFS_DATANODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTPS_DEFAULT_PORT;
|
||||||
public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
|
public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
|
||||||
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:50020";
|
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
|
||||||
|
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT;
|
||||||
|
|
||||||
public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
|
public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
|
||||||
public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
|
public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.WritableComparable;
|
import org.apache.hadoop.io.WritableComparable;
|
||||||
|
|
||||||
@ -50,14 +51,21 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
|
|||||||
protected int infoPort; // info server port
|
protected int infoPort; // info server port
|
||||||
protected int ipcPort; // IPC server port
|
protected int ipcPort; // IPC server port
|
||||||
|
|
||||||
/** Equivalent to DatanodeID(""). */
|
public DatanodeID() {
|
||||||
public DatanodeID() {this("");}
|
this("", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
/** Equivalent to DatanodeID(ipAddr, "", -1, -1, -1). */
|
public DatanodeID(String ipAddr, int xferPort) {
|
||||||
public DatanodeID(String ipAddr) {this(ipAddr, "", "", -1, -1, -1);}
|
this(ipAddr, "", "", xferPort,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
/** Equivalent to DatanodeID(ipAddr, "", xferPort, -1, -1). */
|
public DatanodeID(String ipAddr, String hostName, int xferPort) {
|
||||||
public DatanodeID(String ipAddr, int xferPort) {this(ipAddr, "", "", xferPort, -1, -1);}
|
this(ipAddr, hostName, "", xferPort,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DatanodeID copy constructor
|
* DatanodeID copy constructor
|
||||||
|
@ -265,7 +265,7 @@ public String dumpDatanode() {
|
|||||||
long c = getCapacity();
|
long c = getCapacity();
|
||||||
long r = getRemaining();
|
long r = getRemaining();
|
||||||
long u = getDfsUsed();
|
long u = getDfsUsed();
|
||||||
buffer.append(ipAddr);
|
buffer.append(getName());
|
||||||
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
|
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
|
||||||
buffer.append(" "+location);
|
buffer.append(" "+location);
|
||||||
}
|
}
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import java.io.DataInput;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
@ -26,13 +24,10 @@
|
|||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class extends the DatanodeInfo class with ephemeral information (eg
|
* This class extends the DatanodeInfo class with ephemeral information (eg
|
||||||
@ -158,18 +153,18 @@ synchronized void clear() {
|
|||||||
*/
|
*/
|
||||||
private boolean disallowed = false;
|
private boolean disallowed = false;
|
||||||
|
|
||||||
/** Default constructor */
|
|
||||||
public DatanodeDescriptor() {}
|
public DatanodeDescriptor() {}
|
||||||
|
|
||||||
/** DatanodeDescriptor constructor
|
/**
|
||||||
|
* DatanodeDescriptor constructor
|
||||||
* @param nodeID id of the data node
|
* @param nodeID id of the data node
|
||||||
*/
|
*/
|
||||||
public DatanodeDescriptor(DatanodeID nodeID) {
|
public DatanodeDescriptor(DatanodeID nodeID) {
|
||||||
this(nodeID, 0L, 0L, 0L, 0L, 0, 0);
|
this(nodeID, 0L, 0L, 0L, 0L, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** DatanodeDescriptor constructor
|
/**
|
||||||
*
|
* DatanodeDescriptor constructor
|
||||||
* @param nodeID id of the data node
|
* @param nodeID id of the data node
|
||||||
* @param networkLocation location of the data node in network
|
* @param networkLocation location of the data node in network
|
||||||
*/
|
*/
|
||||||
@ -178,8 +173,8 @@ public DatanodeDescriptor(DatanodeID nodeID,
|
|||||||
this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0, 0);
|
this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** DatanodeDescriptor constructor
|
/**
|
||||||
*
|
* DatanodeDescriptor constructor
|
||||||
* @param nodeID id of the data node
|
* @param nodeID id of the data node
|
||||||
* @param capacity capacity of the data node
|
* @param capacity capacity of the data node
|
||||||
* @param dfsUsed space used by the data node
|
* @param dfsUsed space used by the data node
|
||||||
@ -199,8 +194,8 @@ public DatanodeDescriptor(DatanodeID nodeID,
|
|||||||
failedVolumes);
|
failedVolumes);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** DatanodeDescriptor constructor
|
/**
|
||||||
*
|
* DatanodeDescriptor constructor
|
||||||
* @param nodeID id of the data node
|
* @param nodeID id of the data node
|
||||||
* @param networkLocation location of the data node in network
|
* @param networkLocation location of the data node in network
|
||||||
* @param capacity capacity of the data node, including space used by non-dfs
|
* @param capacity capacity of the data node, including space used by non-dfs
|
||||||
|
@ -71,6 +71,8 @@
|
|||||||
import org.apache.hadoop.util.HostsFileReader;
|
import org.apache.hadoop.util.HostsFileReader;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
|
import com.google.common.net.InetAddresses;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manage datanodes, include decommission and other activities.
|
* Manage datanodes, include decommission and other activities.
|
||||||
*/
|
*/
|
||||||
@ -353,14 +355,9 @@ private void wipeDatanode(final DatanodeID node) {
|
|||||||
private void resolveNetworkLocation (DatanodeDescriptor node) {
|
private void resolveNetworkLocation (DatanodeDescriptor node) {
|
||||||
List<String> names = new ArrayList<String>(1);
|
List<String> names = new ArrayList<String>(1);
|
||||||
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
|
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
|
||||||
// get the node's IP address
|
|
||||||
names.add(node.getIpAddr());
|
names.add(node.getIpAddr());
|
||||||
} else {
|
} else {
|
||||||
// get the node's host name
|
names.add(node.getHostName());
|
||||||
String hostName = node.getHostName();
|
|
||||||
int colon = hostName.indexOf(":");
|
|
||||||
hostName = (colon==-1)?hostName:hostName.substring(0,colon);
|
|
||||||
names.add(hostName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolve its network location
|
// resolve its network location
|
||||||
@ -771,6 +768,40 @@ public void fetchDatanodes(final List<DatanodeDescriptor> live,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a DatanodeID from a hosts file entry
|
||||||
|
* @param hostLine of form [hostname|ip][:port]?
|
||||||
|
* @return DatanodeID constructed from the given string
|
||||||
|
*/
|
||||||
|
private DatanodeID parseDNFromHostsEntry(String hostLine) {
|
||||||
|
DatanodeID dnId;
|
||||||
|
String hostStr;
|
||||||
|
int port;
|
||||||
|
int idx = hostLine.indexOf(':');
|
||||||
|
|
||||||
|
if (-1 == idx) {
|
||||||
|
hostStr = hostLine;
|
||||||
|
port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT;
|
||||||
|
} else {
|
||||||
|
hostStr = hostLine.substring(0, idx);
|
||||||
|
port = Integer.valueOf(hostLine.substring(idx));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (InetAddresses.isInetAddress(hostStr)) {
|
||||||
|
// The IP:port is sufficient for listing in a report
|
||||||
|
dnId = new DatanodeID(hostStr, "", port);
|
||||||
|
} else {
|
||||||
|
String ipAddr = "";
|
||||||
|
try {
|
||||||
|
ipAddr = InetAddress.getByName(hostStr).getHostAddress();
|
||||||
|
} catch (UnknownHostException e) {
|
||||||
|
LOG.warn("Invalid hostname " + hostStr + " in hosts file");
|
||||||
|
}
|
||||||
|
dnId = new DatanodeID(ipAddr, hostStr, port);
|
||||||
|
}
|
||||||
|
return dnId;
|
||||||
|
}
|
||||||
|
|
||||||
/** For generating datanode reports */
|
/** For generating datanode reports */
|
||||||
public List<DatanodeDescriptor> getDatanodeListForReport(
|
public List<DatanodeDescriptor> getDatanodeListForReport(
|
||||||
final DatanodeReportType type) {
|
final DatanodeReportType type) {
|
||||||
@ -782,7 +813,7 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
HashMap<String, String> mustList = new HashMap<String, String>();
|
HashMap<String, String> mustList = new HashMap<String, String>();
|
||||||
|
|
||||||
if (listDeadNodes) {
|
if (listDeadNodes) {
|
||||||
//first load all the nodes listed in include and exclude files.
|
// Put all nodes referenced in the hosts files in the map
|
||||||
Iterator<String> it = hostsReader.getHosts().iterator();
|
Iterator<String> it = hostsReader.getHosts().iterator();
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
mustList.put(it.next(), "");
|
mustList.put(it.next(), "");
|
||||||
@ -805,7 +836,7 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) {
|
if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) {
|
||||||
nodes.add(dn);
|
nodes.add(dn);
|
||||||
}
|
}
|
||||||
//Remove any form of the this datanode in include/exclude lists.
|
// Remove any nodes we know about from the map
|
||||||
try {
|
try {
|
||||||
InetAddress inet = InetAddress.getByName(dn.getIpAddr());
|
InetAddress inet = InetAddress.getByName(dn.getIpAddr());
|
||||||
// compare hostname(:port)
|
// compare hostname(:port)
|
||||||
@ -814,7 +845,7 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
// compare ipaddress(:port)
|
// compare ipaddress(:port)
|
||||||
mustList.remove(inet.getHostAddress().toString());
|
mustList.remove(inet.getHostAddress().toString());
|
||||||
mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
|
mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
|
||||||
} catch ( UnknownHostException e ) {
|
} catch (UnknownHostException e) {
|
||||||
mustList.remove(dn.getName());
|
mustList.remove(dn.getName());
|
||||||
mustList.remove(dn.getIpAddr());
|
mustList.remove(dn.getIpAddr());
|
||||||
LOG.warn(e);
|
LOG.warn(e);
|
||||||
@ -825,9 +856,13 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
if (listDeadNodes) {
|
if (listDeadNodes) {
|
||||||
Iterator<String> it = mustList.keySet().iterator();
|
Iterator<String> it = mustList.keySet().iterator();
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
DatanodeDescriptor dn =
|
// The remaining nodes are ones that are referenced by the hosts
|
||||||
new DatanodeDescriptor(new DatanodeID(it.next()));
|
// files but that we do not know about, ie that we have never
|
||||||
dn.setLastUpdate(0);
|
// head from. Eg. a host that is no longer part of the cluster
|
||||||
|
// or a bogus entry was given in the hosts files
|
||||||
|
DatanodeID dnId = parseDNFromHostsEntry(it.next());
|
||||||
|
DatanodeDescriptor dn = new DatanodeDescriptor(dnId);
|
||||||
|
dn.setLastUpdate(0); // Consider this node dead for reporting
|
||||||
nodes.add(dn);
|
nodes.add(dn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -668,8 +668,7 @@ void startDataNode(Configuration conf,
|
|||||||
*/
|
*/
|
||||||
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
||||||
final String xferIp = streamingAddr.getAddress().getHostAddress();
|
final String xferIp = streamingAddr.getAddress().getHostAddress();
|
||||||
DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp);
|
DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp, getXferPort());
|
||||||
bpRegistration.setXferPort(getXferPort());
|
|
||||||
bpRegistration.setInfoPort(getInfoPort());
|
bpRegistration.setInfoPort(getInfoPort());
|
||||||
bpRegistration.setIpcPort(getIpcPort());
|
bpRegistration.setIpcPort(getIpcPort());
|
||||||
bpRegistration.setHostName(hostName);
|
bpRegistration.setHostName(hostName);
|
||||||
|
@ -62,7 +62,7 @@ private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host,
|
|||||||
? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
|
? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
|
||||||
final String scheme = request.getScheme();
|
final String scheme = request.getScheme();
|
||||||
final int port = "https".equals(scheme)
|
final int port = "https".equals(scheme)
|
||||||
? (Integer)getServletContext().getAttribute("datanode.https.port")
|
? (Integer)getServletContext().getAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY)
|
||||||
: host.getInfoPort();
|
: host.getInfoPort();
|
||||||
final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");
|
final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
@ -62,7 +63,7 @@ private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus st
|
|||||||
hostname = host.getIpAddr();
|
hostname = host.getIpAddr();
|
||||||
}
|
}
|
||||||
final int port = "https".equals(scheme)
|
final int port = "https".equals(scheme)
|
||||||
? (Integer)getServletContext().getAttribute("datanode.https.port")
|
? (Integer)getServletContext().getAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY)
|
||||||
: host.getInfoPort();
|
: host.getInfoPort();
|
||||||
|
|
||||||
String dtParam = "";
|
String dtParam = "";
|
||||||
|
@ -165,10 +165,11 @@ private Map<String, String> getAuthFilterParams(Configuration conf)
|
|||||||
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
|
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
|
||||||
useKrb);
|
useKrb);
|
||||||
// assume same ssl port for all datanodes
|
// assume same ssl port for all datanodes
|
||||||
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf
|
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(
|
||||||
.get(DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
|
conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY,
|
||||||
httpServer.setAttribute("datanode.https.port", datanodeSslPort
|
infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
|
||||||
.getPort());
|
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
|
||||||
|
datanodeSslPort.getPort());
|
||||||
}
|
}
|
||||||
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
|
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
|
||||||
httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
|
httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
@ -52,18 +53,9 @@ public class DatanodeRegistration extends DatanodeID
|
|||||||
private StorageInfo storageInfo;
|
private StorageInfo storageInfo;
|
||||||
private ExportedBlockKeys exportedKeys;
|
private ExportedBlockKeys exportedKeys;
|
||||||
|
|
||||||
/**
|
|
||||||
* Default constructor.
|
|
||||||
*/
|
|
||||||
public DatanodeRegistration() {
|
public DatanodeRegistration() {
|
||||||
this("");
|
this("", DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
}
|
new StorageInfo(), new ExportedBlockKeys());
|
||||||
|
|
||||||
/**
|
|
||||||
* Create DatanodeRegistration
|
|
||||||
*/
|
|
||||||
public DatanodeRegistration(String ipAddr) {
|
|
||||||
this(ipAddr, new StorageInfo(), new ExportedBlockKeys());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
|
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
|
||||||
@ -72,10 +64,14 @@ public DatanodeRegistration(DatanodeID dn, StorageInfo info,
|
|||||||
this.storageInfo = info;
|
this.storageInfo = info;
|
||||||
this.exportedKeys = keys;
|
this.exportedKeys = keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
public DatanodeRegistration(String ipAddr, StorageInfo info,
|
public DatanodeRegistration(String ipAddr, int xferPort) {
|
||||||
|
this(ipAddr, xferPort, new StorageInfo(), new ExportedBlockKeys());
|
||||||
|
}
|
||||||
|
|
||||||
|
public DatanodeRegistration(String ipAddr, int xferPort, StorageInfo info,
|
||||||
ExportedBlockKeys keys) {
|
ExportedBlockKeys keys) {
|
||||||
super(ipAddr);
|
super(ipAddr, xferPort);
|
||||||
this.storageInfo = info;
|
this.storageInfo = info;
|
||||||
this.exportedKeys = keys;
|
this.exportedKeys = keys;
|
||||||
}
|
}
|
||||||
@ -114,7 +110,7 @@ public String getAddress() {
|
|||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return getClass().getSimpleName()
|
return getClass().getSimpleName()
|
||||||
+ "(" + ipAddr
|
+ "(" + getIpAddr()
|
||||||
+ ", storageID=" + storageID
|
+ ", storageID=" + storageID
|
||||||
+ ", infoPort=" + infoPort
|
+ ", infoPort=" + infoPort
|
||||||
+ ", ipcPort=" + ipcPort
|
+ ", ipcPort=" + ipcPort
|
||||||
|
@ -395,9 +395,9 @@ public void testGetServerInfo() {
|
|||||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
String httpsport = DFSUtil.getInfoServer(null, conf, true);
|
String httpsport = DFSUtil.getInfoServer(null, conf, true);
|
||||||
assertEquals("0.0.0.0:50470", httpsport);
|
assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTPS_PORT_DEFAULT, httpsport);
|
||||||
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
||||||
assertEquals("0.0.0.0:50070", httpport);
|
assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTP_PORT_DEFAULT, httpport);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
@ -516,7 +517,8 @@ public void testHostsFile(int numNameNodes) throws IOException,
|
|||||||
// Now empty hosts file and ensure the datanode is disallowed
|
// Now empty hosts file and ensure the datanode is disallowed
|
||||||
// from talking to namenode, resulting in it's shutdown.
|
// from talking to namenode, resulting in it's shutdown.
|
||||||
ArrayList<String>list = new ArrayList<String>();
|
ArrayList<String>list = new ArrayList<String>();
|
||||||
list.add("invalidhost");
|
final String badHostname = "BOGUSHOST";
|
||||||
|
list.add(badHostname);
|
||||||
writeConfigFile(hostsFile, list);
|
writeConfigFile(hostsFile, list);
|
||||||
|
|
||||||
for (int j = 0; j < numNameNodes; j++) {
|
for (int j = 0; j < numNameNodes; j++) {
|
||||||
@ -530,6 +532,17 @@ public void testHostsFile(int numNameNodes) throws IOException,
|
|||||||
info = client.datanodeReport(DatanodeReportType.LIVE);
|
info = client.datanodeReport(DatanodeReportType.LIVE);
|
||||||
}
|
}
|
||||||
assertEquals("Number of live nodes should be 0", 0, info.length);
|
assertEquals("Number of live nodes should be 0", 0, info.length);
|
||||||
|
|
||||||
|
// Test that non-live and bogus hostnames are considered "dead".
|
||||||
|
// The dead report should have an entry for (1) the DN that is
|
||||||
|
// now considered dead because it is no longer allowed to connect
|
||||||
|
// and (2) the bogus entry in the hosts file (these entries are
|
||||||
|
// always added last)
|
||||||
|
info = client.datanodeReport(DatanodeReportType.DEAD);
|
||||||
|
assertEquals("There should be 2 dead nodes", 2, info.length);
|
||||||
|
DatanodeID id = cluster.getDataNodes().get(0).getDatanodeId();
|
||||||
|
assertEquals(id.getHostName(), info[0].getHostName());
|
||||||
|
assertEquals(badHostname, info[1].getHostName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ public void testDefaultPolicy() throws Exception {
|
|||||||
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
|
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
|
||||||
datanodes[0] = new DatanodeInfo[0];
|
datanodes[0] = new DatanodeInfo[0];
|
||||||
for(int i = 0; i < infos.length; ) {
|
for(int i = 0; i < infos.length; ) {
|
||||||
infos[i] = new DatanodeInfo(new DatanodeID("dn" + i));
|
infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100));
|
||||||
i++;
|
i++;
|
||||||
datanodes[i] = new DatanodeInfo[i];
|
datanodes[i] = new DatanodeInfo[i];
|
||||||
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
|
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
|
||||||
|
@ -115,7 +115,7 @@ private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
|
|||||||
0, HdfsConstants.LAYOUT_VERSION))
|
0, HdfsConstants.LAYOUT_VERSION))
|
||||||
.when(mock).versionRequest();
|
.when(mock).versionRequest();
|
||||||
|
|
||||||
Mockito.doReturn(new DatanodeRegistration("fake-node"))
|
Mockito.doReturn(new DatanodeRegistration("fake-node", 100))
|
||||||
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
|
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
|
||||||
|
|
||||||
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
|
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
|
||||||
|
@ -779,9 +779,9 @@ private static int getNodePort(int num) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
|
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
|
||||||
|
String ipAddr = DNS.getDefaultIP("default");
|
||||||
String hostName = DNS.getDefaultHost("default", "default");
|
String hostName = DNS.getDefaultHost("default", "default");
|
||||||
dnRegistration = new DatanodeRegistration(hostName);
|
dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
|
||||||
dnRegistration.setXferPort(getNodePort(dnIdx));
|
|
||||||
dnRegistration.setHostName(hostName);
|
dnRegistration.setHostName(hostName);
|
||||||
this.blocks = new ArrayList<Block>(blockCapacity);
|
this.blocks = new ArrayList<Block>(blockCapacity);
|
||||||
this.nrBlocks = 0;
|
this.nrBlocks = 0;
|
||||||
@ -894,10 +894,10 @@ private int transferBlocks( Block blocks[],
|
|||||||
for(int t = 0; t < blockTargets.length; t++) {
|
for(int t = 0; t < blockTargets.length; t++) {
|
||||||
DatanodeInfo dnInfo = blockTargets[t];
|
DatanodeInfo dnInfo = blockTargets[t];
|
||||||
DatanodeRegistration receivedDNReg;
|
DatanodeRegistration receivedDNReg;
|
||||||
receivedDNReg = new DatanodeRegistration(dnInfo.getIpAddr());
|
receivedDNReg =
|
||||||
|
new DatanodeRegistration(dnInfo.getIpAddr(), dnInfo.getXferPort());
|
||||||
receivedDNReg.setStorageInfo(
|
receivedDNReg.setStorageInfo(
|
||||||
new DataStorage(nsInfo, dnInfo.getStorageID()));
|
new DataStorage(nsInfo, dnInfo.getStorageID()));
|
||||||
receivedDNReg.setXferPort(dnInfo.getXferPort());
|
|
||||||
receivedDNReg.setInfoPort(dnInfo.getInfoPort());
|
receivedDNReg.setInfoPort(dnInfo.getInfoPort());
|
||||||
receivedDNReg.setIpcPort(dnInfo.getIpcPort());
|
receivedDNReg.setIpcPort(dnInfo.getIpcPort());
|
||||||
ReceivedDeletedBlockInfo[] rdBlocks = {
|
ReceivedDeletedBlockInfo[] rdBlocks = {
|
||||||
|
Loading…
Reference in New Issue
Block a user