HDFS-9482. Replace DatanodeInfo constructors with a builder pattern. Contributed by Brahma Reddy Battula.
This commit is contained in:
parent
62d8c17dfd
commit
ed0bebabaa
@ -50,6 +50,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
@ -766,7 +767,8 @@ private void updatePipeline(ExtendedBlock newBG) throws IOException {
|
||||
newNodes[i] = nodes[0];
|
||||
newStorageIDs[i] = storageIDs[0];
|
||||
} else {
|
||||
newNodes[i] = new DatanodeInfo(DatanodeID.EMPTY_DATANODE_ID);
|
||||
newNodes[i] = new DatanodeInfoBuilder()
|
||||
.setNodeID(DatanodeID.EMPTY_DATANODE_ID).build();
|
||||
newStorageIDs[i] = "";
|
||||
}
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ public static AdminStates fromValue(final String value) {
|
||||
protected AdminStates adminState;
|
||||
private long maintenanceExpireTimeInMS;
|
||||
|
||||
public DatanodeInfo(DatanodeInfo from) {
|
||||
protected DatanodeInfo(DatanodeInfo from) {
|
||||
super(from);
|
||||
this.capacity = from.getCapacity();
|
||||
this.dfsUsed = from.getDfsUsed();
|
||||
@ -103,7 +103,7 @@ public DatanodeInfo(DatanodeInfo from) {
|
||||
this.upgradeDomain = from.getUpgradeDomain();
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID) {
|
||||
protected DatanodeInfo(DatanodeID nodeID) {
|
||||
super(nodeID);
|
||||
this.capacity = 0L;
|
||||
this.dfsUsed = 0L;
|
||||
@ -118,57 +118,13 @@ public DatanodeInfo(DatanodeID nodeID) {
|
||||
this.adminState = null;
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID, String location) {
|
||||
protected DatanodeInfo(DatanodeID nodeID, String location) {
|
||||
this(nodeID);
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID, String location,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
|
||||
final long lastUpdate, final long lastUpdateMonotonic,
|
||||
final int xceiverCount, final AdminStates adminState,
|
||||
final String upgradeDomain) {
|
||||
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
|
||||
nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
|
||||
nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
|
||||
cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
|
||||
xceiverCount, location, adminState, upgradeDomain);
|
||||
}
|
||||
|
||||
/** Constructor */
|
||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||
final String datanodeUuid, final int xferPort, final int infoPort,
|
||||
final int infoSecurePort, final int ipcPort,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
|
||||
final long lastUpdate, final long lastUpdateMonotonic,
|
||||
final int xceiverCount, final String networkLocation,
|
||||
final AdminStates adminState) {
|
||||
this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
|
||||
ipcPort, capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity,
|
||||
cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount,
|
||||
networkLocation, adminState, null);
|
||||
}
|
||||
|
||||
/** Constructor */
|
||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||
final String datanodeUuid, final int xferPort, final int infoPort,
|
||||
final int infoSecurePort, final int ipcPort,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
|
||||
final long lastUpdate, final long lastUpdateMonotonic,
|
||||
final int xceiverCount, final String networkLocation,
|
||||
final AdminStates adminState,
|
||||
final String upgradeDomain) {
|
||||
this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
|
||||
ipcPort, capacity, dfsUsed, 0L, remaining, blockPoolUsed,
|
||||
cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
|
||||
xceiverCount, networkLocation, adminState, upgradeDomain);
|
||||
}
|
||||
|
||||
/** Constructor. */
|
||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||
private DatanodeInfo(final String ipAddr, final String hostName,
|
||||
final String datanodeUuid, final int xferPort, final int infoPort,
|
||||
final int infoSecurePort, final int ipcPort, final long capacity,
|
||||
final long dfsUsed, final long nonDfsUsed, final long remaining,
|
||||
@ -662,4 +618,169 @@ public String getSoftwareVersion() {
|
||||
public void setSoftwareVersion(String softwareVersion) {
|
||||
this.softwareVersion = softwareVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Building the DataNodeInfo.
|
||||
*/
|
||||
public static class DatanodeInfoBuilder {
|
||||
private String location = NetworkTopology.DEFAULT_RACK;
|
||||
private long capacity;
|
||||
private long dfsUsed;
|
||||
private long remaining;
|
||||
private long blockPoolUsed;
|
||||
private long cacheCapacity;
|
||||
private long cacheUsed;
|
||||
private long lastUpdate;
|
||||
private long lastUpdateMonotonic;
|
||||
private int xceiverCount;
|
||||
private DatanodeInfo.AdminStates adminState;
|
||||
private String upgradeDomain;
|
||||
private String ipAddr;
|
||||
private String hostName;
|
||||
private String datanodeUuid;
|
||||
private int xferPort;
|
||||
private int infoPort;
|
||||
private int infoSecurePort;
|
||||
private int ipcPort;
|
||||
private long nonDfsUsed = 0L;
|
||||
|
||||
public DatanodeInfoBuilder setFrom(DatanodeInfo from) {
|
||||
this.capacity = from.getCapacity();
|
||||
this.dfsUsed = from.getDfsUsed();
|
||||
this.nonDfsUsed = from.getNonDfsUsed();
|
||||
this.remaining = from.getRemaining();
|
||||
this.blockPoolUsed = from.getBlockPoolUsed();
|
||||
this.cacheCapacity = from.getCacheCapacity();
|
||||
this.cacheUsed = from.getCacheUsed();
|
||||
this.lastUpdate = from.getLastUpdate();
|
||||
this.lastUpdateMonotonic = from.getLastUpdateMonotonic();
|
||||
this.xceiverCount = from.getXceiverCount();
|
||||
this.location = from.getNetworkLocation();
|
||||
this.adminState = from.getAdminState();
|
||||
this.upgradeDomain = from.getUpgradeDomain();
|
||||
setNodeID(from);
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setNodeID(DatanodeID nodeID) {
|
||||
this.ipAddr = nodeID.getIpAddr();
|
||||
this.hostName = nodeID.getHostName();
|
||||
this.datanodeUuid = nodeID.getDatanodeUuid();
|
||||
this.xferPort = nodeID.getXferPort();
|
||||
this.infoPort = nodeID.getInfoPort();
|
||||
this.infoSecurePort = nodeID.getInfoSecurePort();
|
||||
this.ipcPort = nodeID.getIpcPort();
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setCapacity(long capacity) {
|
||||
this.capacity = capacity;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setDfsUsed(long dfsUsed) {
|
||||
this.dfsUsed = dfsUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setRemaining(long remaining) {
|
||||
this.remaining = remaining;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setBlockPoolUsed(long blockPoolUsed) {
|
||||
this.blockPoolUsed = blockPoolUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setCacheCapacity(long cacheCapacity) {
|
||||
this.cacheCapacity = cacheCapacity;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setCacheUsed(long cacheUsed) {
|
||||
this.cacheUsed = cacheUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setLastUpdate(long lastUpdate) {
|
||||
this.lastUpdate = lastUpdate;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setLastUpdateMonotonic(
|
||||
long lastUpdateMonotonic) {
|
||||
this.lastUpdateMonotonic = lastUpdateMonotonic;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setXceiverCount(int xceiverCount) {
|
||||
this.xceiverCount = xceiverCount;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setAdminState(
|
||||
DatanodeInfo.AdminStates adminState) {
|
||||
this.adminState = adminState;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setUpgradeDomain(String upgradeDomain) {
|
||||
this.upgradeDomain = upgradeDomain;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setIpAddr(String ipAddr) {
|
||||
this.ipAddr = ipAddr;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setHostName(String hostName) {
|
||||
this.hostName = hostName;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setDatanodeUuid(String datanodeUuid) {
|
||||
this.datanodeUuid = datanodeUuid;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setXferPort(int xferPort) {
|
||||
this.xferPort = xferPort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setInfoPort(int infoPort) {
|
||||
this.infoPort = infoPort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setInfoSecurePort(int infoSecurePort) {
|
||||
this.infoSecurePort = infoSecurePort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setIpcPort(int ipcPort) {
|
||||
this.ipcPort = ipcPort;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setNetworkLocation(String networkLocation) {
|
||||
this.location = networkLocation;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfoBuilder setNonDfsUsed(long nonDfsUsed) {
|
||||
this.nonDfsUsed = nonDfsUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatanodeInfo build() {
|
||||
return new DatanodeInfo(ipAddr, hostName, datanodeUuid, xferPort,
|
||||
infoPort, infoSecurePort, ipcPort, capacity, dfsUsed, nonDfsUsed,
|
||||
remaining, blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate,
|
||||
lastUpdateMonotonic, xceiverCount, location, adminState,
|
||||
upgradeDomain);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -70,6 +70,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
@ -585,13 +586,18 @@ static public DatanodeInfo convert(DatanodeInfoProto di) {
|
||||
if (di == null) {
|
||||
return null;
|
||||
}
|
||||
DatanodeInfo dinfo = new DatanodeInfo(convert(di.getId()),
|
||||
di.hasLocation() ? di.getLocation() : null, di.getCapacity(),
|
||||
di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed(),
|
||||
di.getCacheCapacity(), di.getCacheUsed(), di.getLastUpdate(),
|
||||
di.getLastUpdateMonotonic(), di.getXceiverCount(),
|
||||
convert(di.getAdminState()),
|
||||
di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
|
||||
DatanodeInfoBuilder dinfo =
|
||||
new DatanodeInfoBuilder().setNodeID(convert(di.getId()))
|
||||
.setNetworkLocation(di.hasLocation() ? di.getLocation() : null)
|
||||
.setCapacity(di.getCapacity()).setDfsUsed(di.getDfsUsed())
|
||||
.setRemaining(di.getRemaining())
|
||||
.setBlockPoolUsed(di.getBlockPoolUsed())
|
||||
.setCacheCapacity(di.getCacheCapacity())
|
||||
.setCacheUsed(di.getCacheUsed()).setLastUpdate(di.getLastUpdate())
|
||||
.setLastUpdateMonotonic(di.getLastUpdateMonotonic())
|
||||
.setXceiverCount(di.getXceiverCount())
|
||||
.setAdminState(convert(di.getAdminState())).setUpgradeDomain(
|
||||
di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
|
||||
if (di.hasNonDfsUsed()) {
|
||||
dinfo.setNonDfsUsed(di.getNonDfsUsed());
|
||||
} else {
|
||||
@ -599,7 +605,7 @@ static public DatanodeInfo convert(DatanodeInfoProto di) {
|
||||
long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining();
|
||||
dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed);
|
||||
}
|
||||
return dinfo;
|
||||
return dinfo.build();
|
||||
}
|
||||
|
||||
public static StorageType[] convertStorageTypes(
|
||||
|
@ -36,6 +36,7 @@
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||
@ -271,27 +272,26 @@ static DatanodeInfo toDatanodeInfo(final Map<?, ?> m)
|
||||
}
|
||||
|
||||
// TODO: Fix storageID
|
||||
return new DatanodeInfo(
|
||||
ipAddr,
|
||||
(String)m.get("hostName"),
|
||||
(String)m.get("storageID"),
|
||||
xferPort,
|
||||
((Number) m.get("infoPort")).intValue(),
|
||||
getInt(m, "infoSecurePort", 0),
|
||||
((Number) m.get("ipcPort")).intValue(),
|
||||
|
||||
getLong(m, "capacity", 0l),
|
||||
getLong(m, "dfsUsed", 0l),
|
||||
getLong(m, "remaining", 0l),
|
||||
getLong(m, "blockPoolUsed", 0l),
|
||||
getLong(m, "cacheCapacity", 0l),
|
||||
getLong(m, "cacheUsed", 0l),
|
||||
getLong(m, "lastUpdate", 0l),
|
||||
getLong(m, "lastUpdateMonotonic", 0l),
|
||||
getInt(m, "xceiverCount", 0),
|
||||
getString(m, "networkLocation", ""),
|
||||
DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")),
|
||||
getString(m, "upgradeDomain", ""));
|
||||
return new DatanodeInfoBuilder().setIpAddr(ipAddr)
|
||||
.setHostName((String) m.get("hostName"))
|
||||
.setDatanodeUuid((String) m.get("storageID")).setXferPort(xferPort)
|
||||
.setInfoPort(((Number) m.get("infoPort")).intValue())
|
||||
.setInfoSecurePort(getInt(m, "infoSecurePort", 0))
|
||||
.setIpcPort(((Number) m.get("ipcPort")).intValue())
|
||||
.setCapacity(getLong(m, "capacity", 0L))
|
||||
.setDfsUsed(getLong(m, "dfsUsed", 0L))
|
||||
.setRemaining(getLong(m, "remaining", 0L))
|
||||
.setBlockPoolUsed(getLong(m, "blockPoolUsed", 0L))
|
||||
.setCacheCapacity(getLong(m, "cacheCapacity", 0L))
|
||||
.setCacheUsed(getLong(m, "cacheUsed", 0L))
|
||||
.setLastUpdate(getLong(m, "lastUpdate", 0L))
|
||||
.setLastUpdateMonotonic(getLong(m, "lastUpdateMonotonic", 0L))
|
||||
.setXceiverCount(getInt(m, "xceiverCount", 0))
|
||||
.setNetworkLocation(getString(m, "networkLocation", "")).setAdminState(
|
||||
DatanodeInfo.AdminStates
|
||||
.valueOf(getString(m, "adminState", "NORMAL")))
|
||||
.setUpgradeDomain(getString(m, "upgradeDomain", ""))
|
||||
.build();
|
||||
}
|
||||
|
||||
/** Convert an Object[] to a DatanodeInfo[]. */
|
||||
|
@ -20,6 +20,7 @@
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
|
||||
@ -78,8 +79,9 @@ public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
|
||||
@Override
|
||||
public GetBlocksResponseProto getBlocks(RpcController unused,
|
||||
GetBlocksRequestProto request) throws ServiceException {
|
||||
DatanodeInfo dnInfo = new DatanodeInfo(PBHelperClient.convert(request
|
||||
.getDatanode()));
|
||||
DatanodeInfo dnInfo = new DatanodeInfoBuilder()
|
||||
.setNodeID(PBHelperClient.convert(request.getDatanode()))
|
||||
.build();
|
||||
BlocksWithLocations blocks;
|
||||
try {
|
||||
blocks = impl.getBlocks(dnInfo, request.getSize());
|
||||
|
@ -123,6 +123,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
@ -2384,7 +2385,8 @@ public void run() {
|
||||
in = new DataInputStream(unbufIn);
|
||||
blockSender = new BlockSender(b, 0, b.getNumBytes(),
|
||||
false, false, true, DataNode.this, null, cachingStrategy);
|
||||
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
|
||||
DatanodeInfo srcNode = new DatanodeInfoBuilder().setNodeID(bpReg)
|
||||
.build();
|
||||
|
||||
new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
|
||||
clientname, targets, targetStorageTypes, srcNode,
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||
@ -52,7 +53,8 @@ public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
|
||||
if (bpRegistration == null) {
|
||||
return;
|
||||
}
|
||||
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
|
||||
DatanodeInfo[] dnArr = {new DatanodeInfoBuilder()
|
||||
.setNodeID(bpRegistration).build()};
|
||||
String[] uuids = { storageUuid };
|
||||
StorageType[] types = { storageType };
|
||||
LocatedBlock[] locatedBlock = { new LocatedBlock(block,
|
||||
|
@ -23,6 +23,7 @@
|
||||
import org.apache.hadoop.hdfs.DFSPacket;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
@ -132,7 +133,8 @@ private void init() throws IOException {
|
||||
DFSUtilClient.getSmallBufferSize(conf)));
|
||||
in = new DataInputStream(unbufIn);
|
||||
|
||||
DatanodeInfo source = new DatanodeInfo(datanode.getDatanodeId());
|
||||
DatanodeInfo source = new DatanodeInfoBuilder()
|
||||
.setNodeID(datanode.getDatanodeId()).build();
|
||||
new Sender(out).writeBlock(block, storageType,
|
||||
blockToken, "", new DatanodeInfo[]{target},
|
||||
new StorageType[]{storageType}, source,
|
||||
|
@ -181,6 +181,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
@ -4040,7 +4041,8 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type
|
||||
|
||||
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
|
||||
for (int i=0; i<arr.length; i++) {
|
||||
arr[i] = new DatanodeInfo(results.get(i));
|
||||
arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i))
|
||||
.build();
|
||||
}
|
||||
return arr;
|
||||
} finally {
|
||||
@ -4061,7 +4063,8 @@ DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
|
||||
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
|
||||
for (int i = 0; i < reports.length; i++) {
|
||||
final DatanodeDescriptor d = datanodes.get(i);
|
||||
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
|
||||
reports[i] = new DatanodeStorageReport(
|
||||
new DatanodeInfoBuilder().setFrom(d).build(),
|
||||
d.getStorageReports());
|
||||
}
|
||||
return reports;
|
||||
|
@ -110,6 +110,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
@ -1066,34 +1067,42 @@ public static DatanodeDescriptor getLocalDatanodeDescriptor() {
|
||||
}
|
||||
|
||||
public static DatanodeInfo getLocalDatanodeInfo() {
|
||||
return new DatanodeInfo(getLocalDatanodeID());
|
||||
return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID())
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getDatanodeInfo(String ipAddr) {
|
||||
return new DatanodeInfo(getDatanodeID(ipAddr));
|
||||
return new DatanodeInfoBuilder().setNodeID(getDatanodeID(ipAddr))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getLocalDatanodeInfo(int port) {
|
||||
return new DatanodeInfo(getLocalDatanodeID(port));
|
||||
return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID(port))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getDatanodeInfo(String ipAddr,
|
||||
String host, int port) {
|
||||
return new DatanodeInfo(new DatanodeID(ipAddr, host,
|
||||
UUID.randomUUID().toString(), port,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
|
||||
return new DatanodeInfoBuilder().setNodeID(
|
||||
new DatanodeID(ipAddr, host, UUID.randomUUID().toString(), port,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT)).build();
|
||||
}
|
||||
|
||||
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
|
||||
String hostname, AdminStates adminState) {
|
||||
return new DatanodeInfo(ipAddr, hostname, "",
|
||||
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
|
||||
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
|
||||
1l, 2l, 3l, 4l, 0l, 0l, 0l, 5, 6, "local", adminState);
|
||||
return new DatanodeInfoBuilder().setIpAddr(ipAddr).setHostName(hostname)
|
||||
.setDatanodeUuid("")
|
||||
.setXferPort(DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)
|
||||
.setInfoPort(DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT)
|
||||
.setInfoSecurePort(DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT)
|
||||
.setIpcPort(DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT).setCapacity(1L)
|
||||
.setDfsUsed(2L).setRemaining(3L).setBlockPoolUsed(4L)
|
||||
.setCacheCapacity(0L).setCacheUsed(0L).setLastUpdate(0L)
|
||||
.setLastUpdateMonotonic(5).setXceiverCount(6)
|
||||
.setNetworkLocation("local").setAdminState(adminState)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
|
||||
|
@ -18,7 +18,7 @@
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
|
||||
@ -91,7 +91,9 @@ private int getSendBufferSize(Configuration conf) throws IOException {
|
||||
cluster.waitActive();
|
||||
LOG.info("MiniDFSCluster started.");
|
||||
try (Socket socket = DataStreamer.createSocketForPipeline(
|
||||
new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()),
|
||||
new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.dataNodes.get(0).datanode.getDatanodeId())
|
||||
.build(),
|
||||
1, cluster.getFileSystem().getClient())) {
|
||||
return socket.getSendBufferSize();
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
@ -43,7 +44,6 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
@ -161,8 +161,9 @@ public void testArrayOutOfBoundsException() throws Exception {
|
||||
FSNamesystem ns = cluster.getNamesystem();
|
||||
ns.writeLock();
|
||||
try {
|
||||
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
|
||||
blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
|
||||
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
|
||||
new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
|
||||
"STORAGE_ID");
|
||||
} finally {
|
||||
ns.writeUnlock();
|
||||
}
|
||||
|
@ -48,6 +48,7 @@
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
|
||||
@ -333,8 +334,9 @@ public void testShortCircuitReadFromServerWithoutShm() throws Exception {
|
||||
Assert.assertTrue(Arrays.equals(contents, expected));
|
||||
final ShortCircuitCache cache =
|
||||
fs.getClient().getClientContext().getShortCircuitCache();
|
||||
final DatanodeInfo datanode =
|
||||
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
|
||||
final DatanodeInfo datanode = new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
|
||||
.build();
|
||||
cache.getDfsClientShmManager().visit(new Visitor() {
|
||||
@Override
|
||||
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
|
||||
|
@ -39,6 +39,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
@ -735,7 +736,8 @@ public void testBlockECRecoveryCommand() {
|
||||
@Test
|
||||
public void testDataNodeInfoPBHelper() {
|
||||
DatanodeID id = DFSTestUtil.getLocalDatanodeID();
|
||||
DatanodeInfo dnInfos0 = new DatanodeInfo(id);
|
||||
DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id)
|
||||
.build();
|
||||
dnInfos0.setCapacity(3500L);
|
||||
dnInfos0.setDfsUsed(1000L);
|
||||
dnInfos0.setNonDfsUsed(2000L);
|
||||
|
@ -69,6 +69,7 @@
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
@ -510,8 +511,9 @@ public void testRWRReplicas() throws IOException {
|
||||
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
|
||||
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
|
||||
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
|
||||
DatanodeInfo[] locs = new DatanodeInfo[] {
|
||||
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
|
||||
DatanodeInfo[] locs = new DatanodeInfo[] {new DatanodeInfoBuilder()
|
||||
.setNodeID(dn.getDNRegistrationForBP(
|
||||
block.getBlockPoolId())).build(),
|
||||
mockOtherDN };
|
||||
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
|
||||
blocks.add(rBlock);
|
||||
|
@ -38,6 +38,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
@ -400,7 +401,8 @@ public void testInterDNProtocolTimeout() throws Throwable {
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
||||
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
|
||||
DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId)
|
||||
.build();
|
||||
InterDatanodeProtocol proxy = null;
|
||||
|
||||
try {
|
||||
|
@ -51,6 +51,7 @@
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.net.DomainPeer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
||||
@ -430,8 +431,9 @@ public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
|
||||
DomainPeer peer = getDomainPeerToDn(conf);
|
||||
MutableBoolean usedPeer = new MutableBoolean(false);
|
||||
ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz");
|
||||
final DatanodeInfo datanode =
|
||||
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
|
||||
final DatanodeInfo datanode = new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
|
||||
.build();
|
||||
// Allocating the first shm slot requires using up a peer.
|
||||
Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
|
||||
blockId, "testAllocShm_client");
|
||||
@ -571,8 +573,9 @@ public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
|
||||
Assert.assertTrue(Arrays.equals(contents, expected));
|
||||
// Loading this file brought the ShortCircuitReplica into our local
|
||||
// replica cache.
|
||||
final DatanodeInfo datanode =
|
||||
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
|
||||
final DatanodeInfo datanode = new DatanodeInfoBuilder()
|
||||
.setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
|
||||
.build();
|
||||
cache.getDfsClientShmManager().visit(new Visitor() {
|
||||
@Override
|
||||
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
|
||||
|
Loading…
Reference in New Issue
Block a user