diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index d5d0dfbbf5..52fc5ebf71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -766,7 +767,8 @@ private void updatePipeline(ExtendedBlock newBG) throws IOException { newNodes[i] = nodes[0]; newStorageIDs[i] = storageIDs[0]; } else { - newNodes[i] = new DatanodeInfo(DatanodeID.EMPTY_DATANODE_ID); + newNodes[i] = new DatanodeInfoBuilder() + .setNodeID(DatanodeID.EMPTY_DATANODE_ID).build(); newStorageIDs[i] = ""; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index e9ee8b905a..8f9f3d5076 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -86,7 +86,7 @@ public static AdminStates fromValue(final String value) { protected AdminStates adminState; private long maintenanceExpireTimeInMS; - public DatanodeInfo(DatanodeInfo from) { + protected DatanodeInfo(DatanodeInfo from) { super(from); this.capacity = from.getCapacity(); this.dfsUsed = from.getDfsUsed(); @@ -103,7 +103,7 @@ public DatanodeInfo(DatanodeInfo from) { this.upgradeDomain = from.getUpgradeDomain(); } - public DatanodeInfo(DatanodeID nodeID) { + protected DatanodeInfo(DatanodeID nodeID) { super(nodeID); this.capacity = 0L; this.dfsUsed = 0L; @@ -118,57 +118,13 @@ public DatanodeInfo(DatanodeID nodeID) { this.adminState = null; } - public DatanodeInfo(DatanodeID nodeID, String location) { + protected DatanodeInfo(DatanodeID nodeID, String location) { this(nodeID); this.location = location; } - public DatanodeInfo(DatanodeID nodeID, String location, - final long capacity, final long dfsUsed, final long remaining, - final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, - final long lastUpdate, final long lastUpdateMonotonic, - final int xceiverCount, final AdminStates adminState, - final String upgradeDomain) { - this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(), - nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(), - nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, - cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic, - xceiverCount, location, adminState, upgradeDomain); - } - - /** Constructor */ - public DatanodeInfo(final String ipAddr, final String hostName, - final String datanodeUuid, final int xferPort, final int infoPort, - final int infoSecurePort, final int ipcPort, - final long capacity, final long dfsUsed, final long remaining, - final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, - final long lastUpdate, final long lastUpdateMonotonic, - final int xceiverCount, final String networkLocation, - final AdminStates adminState) { - this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort, - ipcPort, capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity, - cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount, - networkLocation, adminState, null); - } - - /** Constructor */ - public DatanodeInfo(final String ipAddr, final String hostName, - final String datanodeUuid, final int xferPort, final int infoPort, - final int infoSecurePort, final int ipcPort, - final long capacity, final long dfsUsed, final long remaining, - final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, - final long lastUpdate, final long lastUpdateMonotonic, - final int xceiverCount, final String networkLocation, - final AdminStates adminState, - final String upgradeDomain) { - this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort, - ipcPort, capacity, dfsUsed, 0L, remaining, blockPoolUsed, - cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic, - xceiverCount, networkLocation, adminState, upgradeDomain); - } - /** Constructor. */ - public DatanodeInfo(final String ipAddr, final String hostName, + private DatanodeInfo(final String ipAddr, final String hostName, final String datanodeUuid, final int xferPort, final int infoPort, final int infoSecurePort, final int ipcPort, final long capacity, final long dfsUsed, final long nonDfsUsed, final long remaining, @@ -662,4 +618,169 @@ public String getSoftwareVersion() { public void setSoftwareVersion(String softwareVersion) { this.softwareVersion = softwareVersion; } + + /** + * Building the DataNodeInfo. + */ + public static class DatanodeInfoBuilder { + private String location = NetworkTopology.DEFAULT_RACK; + private long capacity; + private long dfsUsed; + private long remaining; + private long blockPoolUsed; + private long cacheCapacity; + private long cacheUsed; + private long lastUpdate; + private long lastUpdateMonotonic; + private int xceiverCount; + private DatanodeInfo.AdminStates adminState; + private String upgradeDomain; + private String ipAddr; + private String hostName; + private String datanodeUuid; + private int xferPort; + private int infoPort; + private int infoSecurePort; + private int ipcPort; + private long nonDfsUsed = 0L; + + public DatanodeInfoBuilder setFrom(DatanodeInfo from) { + this.capacity = from.getCapacity(); + this.dfsUsed = from.getDfsUsed(); + this.nonDfsUsed = from.getNonDfsUsed(); + this.remaining = from.getRemaining(); + this.blockPoolUsed = from.getBlockPoolUsed(); + this.cacheCapacity = from.getCacheCapacity(); + this.cacheUsed = from.getCacheUsed(); + this.lastUpdate = from.getLastUpdate(); + this.lastUpdateMonotonic = from.getLastUpdateMonotonic(); + this.xceiverCount = from.getXceiverCount(); + this.location = from.getNetworkLocation(); + this.adminState = from.getAdminState(); + this.upgradeDomain = from.getUpgradeDomain(); + setNodeID(from); + return this; + } + + public DatanodeInfoBuilder setNodeID(DatanodeID nodeID) { + this.ipAddr = nodeID.getIpAddr(); + this.hostName = nodeID.getHostName(); + this.datanodeUuid = nodeID.getDatanodeUuid(); + this.xferPort = nodeID.getXferPort(); + this.infoPort = nodeID.getInfoPort(); + this.infoSecurePort = nodeID.getInfoSecurePort(); + this.ipcPort = nodeID.getIpcPort(); + return this; + } + + public DatanodeInfoBuilder setCapacity(long capacity) { + this.capacity = capacity; + return this; + } + + public DatanodeInfoBuilder setDfsUsed(long dfsUsed) { + this.dfsUsed = dfsUsed; + return this; + } + + public DatanodeInfoBuilder setRemaining(long remaining) { + this.remaining = remaining; + return this; + } + + public DatanodeInfoBuilder setBlockPoolUsed(long blockPoolUsed) { + this.blockPoolUsed = blockPoolUsed; + return this; + } + + public DatanodeInfoBuilder setCacheCapacity(long cacheCapacity) { + this.cacheCapacity = cacheCapacity; + return this; + } + + public DatanodeInfoBuilder setCacheUsed(long cacheUsed) { + this.cacheUsed = cacheUsed; + return this; + } + + public DatanodeInfoBuilder setLastUpdate(long lastUpdate) { + this.lastUpdate = lastUpdate; + return this; + } + + public DatanodeInfoBuilder setLastUpdateMonotonic( + long lastUpdateMonotonic) { + this.lastUpdateMonotonic = lastUpdateMonotonic; + return this; + } + + public DatanodeInfoBuilder setXceiverCount(int xceiverCount) { + this.xceiverCount = xceiverCount; + return this; + } + + public DatanodeInfoBuilder setAdminState( + DatanodeInfo.AdminStates adminState) { + this.adminState = adminState; + return this; + } + + public DatanodeInfoBuilder setUpgradeDomain(String upgradeDomain) { + this.upgradeDomain = upgradeDomain; + return this; + } + + public DatanodeInfoBuilder setIpAddr(String ipAddr) { + this.ipAddr = ipAddr; + return this; + } + + public DatanodeInfoBuilder setHostName(String hostName) { + this.hostName = hostName; + return this; + } + + public DatanodeInfoBuilder setDatanodeUuid(String datanodeUuid) { + this.datanodeUuid = datanodeUuid; + return this; + } + + public DatanodeInfoBuilder setXferPort(int xferPort) { + this.xferPort = xferPort; + return this; + } + + public DatanodeInfoBuilder setInfoPort(int infoPort) { + this.infoPort = infoPort; + return this; + } + + public DatanodeInfoBuilder setInfoSecurePort(int infoSecurePort) { + this.infoSecurePort = infoSecurePort; + return this; + } + + public DatanodeInfoBuilder setIpcPort(int ipcPort) { + this.ipcPort = ipcPort; + return this; + } + + public DatanodeInfoBuilder setNetworkLocation(String networkLocation) { + this.location = networkLocation; + return this; + } + + public DatanodeInfoBuilder setNonDfsUsed(long nonDfsUsed) { + this.nonDfsUsed = nonDfsUsed; + return this; + } + + public DatanodeInfo build() { + return new DatanodeInfo(ipAddr, hostName, datanodeUuid, xferPort, + infoPort, infoSecurePort, ipcPort, capacity, dfsUsed, nonDfsUsed, + remaining, blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate, + lastUpdateMonotonic, xceiverCount, location, adminState, + upgradeDomain); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 246b5a5a7a..2ba7bad05f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -585,13 +586,18 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { if (di == null) { return null; } - DatanodeInfo dinfo = new DatanodeInfo(convert(di.getId()), - di.hasLocation() ? di.getLocation() : null, di.getCapacity(), - di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed(), - di.getCacheCapacity(), di.getCacheUsed(), di.getLastUpdate(), - di.getLastUpdateMonotonic(), di.getXceiverCount(), - convert(di.getAdminState()), - di.hasUpgradeDomain() ? di.getUpgradeDomain() : null); + DatanodeInfoBuilder dinfo = + new DatanodeInfoBuilder().setNodeID(convert(di.getId())) + .setNetworkLocation(di.hasLocation() ? di.getLocation() : null) + .setCapacity(di.getCapacity()).setDfsUsed(di.getDfsUsed()) + .setRemaining(di.getRemaining()) + .setBlockPoolUsed(di.getBlockPoolUsed()) + .setCacheCapacity(di.getCacheCapacity()) + .setCacheUsed(di.getCacheUsed()).setLastUpdate(di.getLastUpdate()) + .setLastUpdateMonotonic(di.getLastUpdateMonotonic()) + .setXceiverCount(di.getXceiverCount()) + .setAdminState(convert(di.getAdminState())).setUpgradeDomain( + di.hasUpgradeDomain() ? di.getUpgradeDomain() : null); if (di.hasNonDfsUsed()) { dinfo.setNonDfsUsed(di.getNonDfsUsed()); } else { @@ -599,7 +605,7 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining(); dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed); } - return dinfo; + return dinfo.build(); } public static StorageType[] convertStorageTypes( diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 97cb0429a8..a75f4f12de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; @@ -271,27 +272,26 @@ static DatanodeInfo toDatanodeInfo(final Map m) } // TODO: Fix storageID - return new DatanodeInfo( - ipAddr, - (String)m.get("hostName"), - (String)m.get("storageID"), - xferPort, - ((Number) m.get("infoPort")).intValue(), - getInt(m, "infoSecurePort", 0), - ((Number) m.get("ipcPort")).intValue(), - - getLong(m, "capacity", 0l), - getLong(m, "dfsUsed", 0l), - getLong(m, "remaining", 0l), - getLong(m, "blockPoolUsed", 0l), - getLong(m, "cacheCapacity", 0l), - getLong(m, "cacheUsed", 0l), - getLong(m, "lastUpdate", 0l), - getLong(m, "lastUpdateMonotonic", 0l), - getInt(m, "xceiverCount", 0), - getString(m, "networkLocation", ""), - DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")), - getString(m, "upgradeDomain", "")); + return new DatanodeInfoBuilder().setIpAddr(ipAddr) + .setHostName((String) m.get("hostName")) + .setDatanodeUuid((String) m.get("storageID")).setXferPort(xferPort) + .setInfoPort(((Number) m.get("infoPort")).intValue()) + .setInfoSecurePort(getInt(m, "infoSecurePort", 0)) + .setIpcPort(((Number) m.get("ipcPort")).intValue()) + .setCapacity(getLong(m, "capacity", 0L)) + .setDfsUsed(getLong(m, "dfsUsed", 0L)) + .setRemaining(getLong(m, "remaining", 0L)) + .setBlockPoolUsed(getLong(m, "blockPoolUsed", 0L)) + .setCacheCapacity(getLong(m, "cacheCapacity", 0L)) + .setCacheUsed(getLong(m, "cacheUsed", 0L)) + .setLastUpdate(getLong(m, "lastUpdate", 0L)) + .setLastUpdateMonotonic(getLong(m, "lastUpdateMonotonic", 0L)) + .setXceiverCount(getInt(m, "xceiverCount", 0)) + .setNetworkLocation(getString(m, "networkLocation", "")).setAdminState( + DatanodeInfo.AdminStates + .valueOf(getString(m, "adminState", "NORMAL"))) + .setUpgradeDomain(getString(m, "upgradeDomain", "")) + .build(); } /** Convert an Object[] to a DatanodeInfo[]. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java index db7a8d2d8e..c7b3e74be0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto; @@ -78,8 +79,9 @@ public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) { @Override public GetBlocksResponseProto getBlocks(RpcController unused, GetBlocksRequestProto request) throws ServiceException { - DatanodeInfo dnInfo = new DatanodeInfo(PBHelperClient.convert(request - .getDatanode())); + DatanodeInfo dnInfo = new DatanodeInfoBuilder() + .setNodeID(PBHelperClient.convert(request.getDatanode())) + .build(); BlocksWithLocations blocks; try { blocks = impl.getBlocks(dnInfo, request.getSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index de9e48e990..78a20443dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -2384,7 +2385,8 @@ public void run() { in = new DataInputStream(unbufIn); blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, true, DataNode.this, null, cachingStrategy); - DatanodeInfo srcNode = new DatanodeInfo(bpReg); + DatanodeInfo srcNode = new DatanodeInfoBuilder().setNodeID(bpReg) + .build(); new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken, clientname, targets, targetStorageTypes, srcNode, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java index be46707afc..2946358a5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; @@ -52,7 +53,8 @@ public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, if (bpRegistration == null) { return; } - DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) }; + DatanodeInfo[] dnArr = {new DatanodeInfoBuilder() + .setNodeID(bpRegistration).build()}; String[] uuids = { storageUuid }; StorageType[] types = { storageType }; LocatedBlock[] locatedBlock = { new LocatedBlock(block, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java index 11551e7e43..592be45f9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.DFSPacket; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; @@ -132,7 +133,8 @@ private void init() throws IOException { DFSUtilClient.getSmallBufferSize(conf))); in = new DataInputStream(unbufIn); - DatanodeInfo source = new DatanodeInfo(datanode.getDatanodeId()); + DatanodeInfo source = new DatanodeInfoBuilder() + .setNodeID(datanode.getDatanodeId()).build(); new Sender(out).writeBlock(block, storageType, blockToken, "", new DatanodeInfo[]{target}, new StorageType[]{storageType}, source, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d6ae0fa76f..7e3fabe16c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -181,6 +181,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.EncryptionZone; @@ -4040,7 +4041,8 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type DatanodeInfo[] arr = new DatanodeInfo[results.size()]; for (int i=0; i info) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 4072071295..5cb25711da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -735,7 +736,8 @@ public void testBlockECRecoveryCommand() { @Test public void testDataNodeInfoPBHelper() { DatanodeID id = DFSTestUtil.getLocalDatanodeID(); - DatanodeInfo dnInfos0 = new DatanodeInfo(id); + DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id) + .build(); dnInfos0.setCapacity(3500L); dnInfos0.setDfsUsed(1000L); dnInfos0.setNonDfsUsed(2000L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index c09303f480..f011b9d605 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -69,6 +69,7 @@ import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -510,8 +511,9 @@ public void testRWRReplicas() throws IOException { private Collection initRecoveringBlocks() throws IOException { Collection blocks = new ArrayList(1); DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo(); - DatanodeInfo[] locs = new DatanodeInfo[] { - new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())), + DatanodeInfo[] locs = new DatanodeInfo[] {new DatanodeInfoBuilder() + .setNodeID(dn.getDNRegistrationForBP( + block.getBlockPoolId())).build(), mockOtherDN }; RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID); blocks.add(rBlock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index 4f6db24946..86e9f90cc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -400,7 +401,8 @@ public void testInterDNProtocolTimeout() throws Throwable { final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); - DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); + DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId) + .build(); InterDatanodeProtocol proxy = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index 8e217c2219..06c6cf64d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.net.DomainPeer; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; @@ -430,8 +431,9 @@ public void visit(HashMap info) DomainPeer peer = getDomainPeerToDn(conf); MutableBoolean usedPeer = new MutableBoolean(false); ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz"); - final DatanodeInfo datanode = - new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); + final DatanodeInfo datanode = new DatanodeInfoBuilder() + .setNodeID(cluster.getDataNodes().get(0).getDatanodeId()) + .build(); // Allocating the first shm slot requires using up a peer. Slot slot = cache.allocShmSlot(datanode, peer, usedPeer, blockId, "testAllocShm_client"); @@ -571,8 +573,9 @@ public void visit(HashMap info) Assert.assertTrue(Arrays.equals(contents, expected)); // Loading this file brought the ShortCircuitReplica into our local // replica cache. - final DatanodeInfo datanode = - new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); + final DatanodeInfo datanode = new DatanodeInfoBuilder() + .setNodeID(cluster.getDataNodes().get(0).getDatanodeId()) + .build(); cache.getDfsClientShmManager().visit(new Visitor() { @Override public void visit(HashMap info)