HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp.

This commit is contained in:
Kihwal Lee 2015-03-04 17:21:51 -06:00
parent c66c3ac6bf
commit 722b479469
6 changed files with 43 additions and 43 deletions

View File

@ -1091,6 +1091,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
(Chris Nauroth via wheat9)
HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

View File

@ -47,19 +47,23 @@ public class DatanodeID implements Comparable<DatanodeID> {
private int infoSecurePort; // info server port
private int ipcPort; // IPC server port
private String xferAddr;
private int hashCode = -1;
/**
* UUID identifying a given datanode. For upgraded Datanodes this is the
* same as the StorageID that was previously used by this Datanode.
* For newly formatted Datanodes it is a UUID.
*/
private String datanodeUuid = null;
private final String datanodeUuid;
public DatanodeID(DatanodeID from) {
this(from.getDatanodeUuid(), from);
}
@VisibleForTesting
public DatanodeID(String datanodeUuid, DatanodeID from) {
this(from.getIpAddr(),
from.getHostName(),
from.getDatanodeUuid(),
datanodeUuid,
from.getXferPort(),
from.getInfoPort(),
from.getInfoSecurePort(),
@ -81,19 +85,24 @@ public DatanodeID(DatanodeID from) {
*/
public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
this.ipAddr = ipAddr;
setIpAndXferPort(ipAddr, xferPort);
this.hostName = hostName;
this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
this.xferPort = xferPort;
this.infoPort = infoPort;
this.infoSecurePort = infoSecurePort;
this.ipcPort = ipcPort;
updateXferAddrAndInvalidateHashCode();
}
public void setIpAddr(String ipAddr) {
//updated during registration, preserve former xferPort
setIpAndXferPort(ipAddr, xferPort);
}
private void setIpAndXferPort(String ipAddr, int xferPort) {
// build xferAddr string to reduce cost of frequent use
this.ipAddr = ipAddr;
updateXferAddrAndInvalidateHashCode();
this.xferPort = xferPort;
this.xferAddr = ipAddr + ":" + xferPort;
}
public void setPeerHostName(String peerHostName) {
@ -107,12 +116,6 @@ public String getDatanodeUuid() {
return datanodeUuid;
}
@VisibleForTesting
public void setDatanodeUuidForTesting(String datanodeUuid) {
this.datanodeUuid = datanodeUuid;
updateXferAddrAndInvalidateHashCode();
}
private String checkDatanodeUuid(String uuid) {
if (uuid == null || uuid.isEmpty()) {
return null;
@ -242,11 +245,7 @@ public boolean equals(Object to) {
@Override
public int hashCode() {
if (hashCode == -1) {
int newHashCode = xferAddr.hashCode() ^ datanodeUuid.hashCode();
hashCode = newHashCode & Integer.MAX_VALUE;
}
return hashCode;
return datanodeUuid.hashCode();
}
@Override
@ -259,14 +258,12 @@ public String toString() {
* Note that this does not update storageID.
*/
public void updateRegInfo(DatanodeID nodeReg) {
ipAddr = nodeReg.getIpAddr();
setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getXferPort());
hostName = nodeReg.getHostName();
peerHostName = nodeReg.getPeerHostName();
xferPort = nodeReg.getXferPort();
infoPort = nodeReg.getInfoPort();
infoSecurePort = nodeReg.getInfoSecurePort();
ipcPort = nodeReg.getIpcPort();
updateXferAddrAndInvalidateHashCode();
}
/**
@ -279,13 +276,4 @@ public void updateRegInfo(DatanodeID nodeReg) {
public int compareTo(DatanodeID that) {
return getXferAddr().compareTo(that.getXferAddr());
}
// NOTE: mutable hash codes are dangerous, however this class chooses to
// use them. this method must be called when a value mutates that is used
// to compute the hash, equality, or comparison of instances.
private void updateXferAddrAndInvalidateHashCode() {
xferAddr = ipAddr + ":" + xferPort;
// can't compute new hash yet because uuid might still null...
hashCode = -1;
}
}

View File

@ -25,6 +25,8 @@
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import com.google.common.annotations.VisibleForTesting;
/**
* DatanodeRegistration class contains all information the name-node needs
* to identify and verify a data-node when it contacts the name-node.
@ -39,6 +41,14 @@ public class DatanodeRegistration extends DatanodeID
private ExportedBlockKeys exportedKeys;
private final String softwareVersion;
@VisibleForTesting
public DatanodeRegistration(String uuid, DatanodeRegistration dnr) {
this(new DatanodeID(uuid, dnr),
dnr.getStorageInfo(),
dnr.getExportedKeys(),
dnr.getSoftwareVersion());
}
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
ExportedBlockKeys keys, String softwareVersion) {
super(dn);

View File

@ -538,10 +538,6 @@ public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception {
public void testSafeModeIBR() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
DatanodeStorageInfo ds = node.getStorageInfos()[0];
// TODO: Needs to be fixed. DatanodeUuid is not storageID.
node.setDatanodeUuidForTesting(ds.getStorageID());
node.isAlive = true;
DatanodeRegistration nodeReg =
@ -587,9 +583,6 @@ public void testSafeModeIBRAfterIncremental() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
DatanodeStorageInfo ds = node.getStorageInfos()[0];
// TODO: Needs to be fixed. DatanodeUuid is not storageID.
node.setDatanodeUuidForTesting(ds.getStorageID());
node.isAlive = true;
DatanodeRegistration nodeReg =

View File

@ -38,7 +38,6 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.util.VersionInfo;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
@ -119,10 +118,17 @@ public void testCompInvalidate() throws Exception {
public void testDatanodeReformat() throws Exception {
namesystem.writeLock();
try {
// Change the datanode UUID to emulate a reformat
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort())
.getDNRegistrationForBP(poolId);
dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr);
cluster.stopDataNode(nodes[0].getXferAddr());
Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP);
bm.addToInvalidates(block, nodes[0]);
// Change the datanode UUID to emulate a reformation
nodes[0].setDatanodeUuidForTesting("fortesting");
bm.getDatanodeManager().registerDatanode(dnr);
// Since UUID has changed, the invalidation work should be skipped
assertEquals(0, bm.computeInvalidateWork(1));
assertEquals(0, bm.getPendingDeletionBlocksCount());
@ -158,8 +164,8 @@ public void testDatanodeReRegistration() throws Exception {
// Re-register each DN and see that it wipes the invalidation work
for (DataNode dn : cluster.getDataNodes()) {
DatanodeID did = dn.getDatanodeId();
did.setDatanodeUuidForTesting(UUID.randomUUID().toString());
DatanodeRegistration reg = new DatanodeRegistration(did,
DatanodeRegistration reg = new DatanodeRegistration(
new DatanodeID(UUID.randomUUID().toString(), did),
new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
new ExportedBlockKeys(),
VersionInfo.getVersion());

View File

@ -173,7 +173,8 @@ public DatanodeRegistration answer(InvocationOnMock invocation)
} else {
DatanodeRegistration dr =
(DatanodeRegistration) invocation.getArguments()[0];
datanodeRegistration.setDatanodeUuidForTesting(dr.getDatanodeUuid());
datanodeRegistration =
new DatanodeRegistration(dr.getDatanodeUuid(), dr);
LOG.info("mockito succeeded " + datanodeRegistration);
return datanodeRegistration;
}