diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 27b6b7fd4e..38ede68fe7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1705,6 +1705,9 @@ Release 0.23.5 - UNRELEASED HDFS-3824. TestHftpDelegationToken fails intermittently with JDK7 (Trevor Robinson via tgraves) + HDFS-3224. Bug in check for DN re-registration with different storage ID + (jlowe) + Release 0.23.4 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 0781218e0b..817b183e64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -612,7 +612,8 @@ public void registerDatanode(DatanodeRegistration nodeReg) + " storage " + nodeReg.getStorageID()); DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID()); - DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr()); + DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr( + nodeReg.getIpAddr(), nodeReg.getXferPort()); if (nodeN != null && nodeN != nodeS) { NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java index 082816d0e0..6f9049a960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java @@ -159,6 +159,35 @@ DatanodeDescriptor getDatanodeByHost(String ipAddr) { } } + /** + * Find data node by its transfer address + * + * @return DatanodeDescriptor if found or null otherwise + */ + public DatanodeDescriptor getDatanodeByXferAddr(String ipAddr, + int xferPort) { + if (ipAddr==null) { + return null; + } + + hostmapLock.readLock().lock(); + try { + DatanodeDescriptor[] nodes = map.get(ipAddr); + // no entry + if (nodes== null) { + return null; + } + for(DatanodeDescriptor containedNode:nodes) { + if (xferPort == containedNode.getXferPort()) { + return containedNode; + } + } + return null; + } finally { + hostmapLock.readLock().unlock(); + } + } + @Override public String toString() { final StringBuilder b = new StringBuilder(getClass().getSimpleName()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 4d32b1f6a8..63371ec474 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -91,6 +92,58 @@ public void testChangeIpcPort() throws Exception { } } + @Test + public void testChangeStorageID() throws Exception { + final String DN_IP_ADDR = "127.0.0.1"; + final String DN_HOSTNAME = "localhost"; + final int DN_XFER_PORT = 12345; + final int DN_INFO_PORT = 12346; + final int DN_IPC_PORT = 12347; + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + InetSocketAddress addr = new InetSocketAddress( + "localhost", + cluster.getNameNodePort()); + DFSClient client = new DFSClient(addr, conf); + NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); + + // register a datanode + DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME, + "fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT); + long nnCTime = cluster.getNamesystem().getFSImage().getStorage() + .getCTime(); + StorageInfo mockStorageInfo = mock(StorageInfo.class); + doReturn(nnCTime).when(mockStorageInfo).getCTime(); + doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo) + .getLayoutVersion(); + DatanodeRegistration dnReg = new DatanodeRegistration(dnId, + mockStorageInfo, null, VersionInfo.getVersion()); + rpcServer.registerDatanode(dnReg); + + DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL); + assertEquals("Expected a registered datanode", 1, report.length); + + // register the same datanode again with a different storage ID + dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME, + "changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT); + dnReg = new DatanodeRegistration(dnId, + mockStorageInfo, null, VersionInfo.getVersion()); + rpcServer.registerDatanode(dnReg); + + report = client.datanodeReport(DatanodeReportType.ALL); + assertEquals("Datanode with changed storage ID not recognized", + 1, report.length); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + @Test public void testRegistrationWithDifferentSoftwareVersions() throws Exception { Configuration conf = new HdfsConfiguration();