diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 43bc3321de..478a931575 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -1,2 +1,10 @@
+
+
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
new file mode 100644
index 0000000000..2817b66c37
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.util.StringUtils;
+
+import java.io.UnsupportedEncodingException;
+
+public class DFSUtilClient {
+ /**
+ * Converts a byte array to a string using UTF8 encoding.
+ */
+ public static String bytes2String(byte[] bytes) {
+ return bytes2String(bytes, 0, bytes.length);
+ }
+
+ /** Return used as percentage of capacity */
+ public static float getPercentUsed(long used, long capacity) {
+ return capacity <= 0 ? 100 : (used * 100.0f)/capacity;
+ }
+
+ /** Return remaining as percentage of capacity */
+ public static float getPercentRemaining(long remaining, long capacity) {
+ return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
+ }
+
+ /** Convert percentage to a string. */
+ public static String percent2String(double percentage) {
+ return StringUtils.format("%.2f%%", percentage);
+ }
+
+ /**
+ * Decode a specific range of bytes of the given byte array to a string
+ * using UTF8.
+ *
+ * @param bytes The bytes to be decoded into characters
+ * @param offset The index of the first byte to decode
+ * @param length The number of bytes to decode
+ * @return The decoded string
+ */
+ private static String bytes2String(byte[] bytes, int offset, int length) {
+ try {
+ return new String(bytes, offset, length, "UTF8");
+ } catch(UnsupportedEncodingException e) {
+ assert false : "UTF8 encoding is not supported ";
+ }
+ return null;
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
index 2dc1d04d74..11daccc7d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.io.*;
/**************************************************
- * A Block is a Hadoop FS primitive, identified by a
+ * A Block is a Hadoop FS primitive, identified by a
* long.
*
**************************************************/
@@ -124,7 +124,7 @@ public void set(long blkid, long len, long genStamp) {
public long getBlockId() {
return blockId;
}
-
+
public void setBlockId(long bid) {
blockId = bid;
}
@@ -147,7 +147,7 @@ public void setNumBytes(long len) {
public long getGenerationStamp() {
return generationStamp;
}
-
+
public void setGenerationStamp(long stamp) {
generationStamp = stamp;
}
@@ -179,13 +179,13 @@ public void write(DataOutput out) throws IOException {
public void readFields(DataInput in) throws IOException {
readHelper(in);
}
-
+
final void writeHelper(DataOutput out) throws IOException {
out.writeLong(blockId);
out.writeLong(numBytes);
out.writeLong(generationStamp);
}
-
+
final void readHelper(DataInput in) throws IOException {
this.blockId = in.readLong();
this.numBytes = in.readLong();
@@ -194,7 +194,7 @@ final void readHelper(DataInput in) throws IOException {
throw new IOException("Unexpected block size: " + numBytes);
}
}
-
+
// write only the identifier part of the block
public void writeId(DataOutput out) throws IOException {
out.writeLong(blockId);
@@ -223,7 +223,7 @@ public boolean equals(Object o) {
}
return compareTo((Block)o) == 0;
}
-
+
/**
* @return true if the two blocks have the same block ID and the same
* generation stamp, or if both blocks are null.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index f91696fb28..6d72285426 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -50,7 +50,7 @@ public class DatanodeID implements Comparable {
/**
* UUID identifying a given datanode. For upgraded Datanodes this is the
- * same as the StorageID that was previously used by this Datanode.
+ * same as the StorageID that was previously used by this Datanode.
* For newly formatted Datanodes it is a UUID.
*/
private final String datanodeUuid;
@@ -80,7 +80,7 @@ public DatanodeID(String datanodeUuid, DatanodeID from) {
* e.g. if this is a new datanode. A new UUID will
* be assigned by the namenode.
* @param xferPort data transfer port
- * @param infoPort info server port
+ * @param infoPort info server port
* @param ipcPort ipc server port
*/
public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
@@ -92,7 +92,7 @@ public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
this.infoSecurePort = infoSecurePort;
this.ipcPort = ipcPort;
}
-
+
public void setIpAddr(String ipAddr) {
//updated during registration, preserve former xferPort
setIpAndXferPort(ipAddr, xferPort);
@@ -108,7 +108,7 @@ private void setIpAndXferPort(String ipAddr, int xferPort) {
public void setPeerHostName(String peerHostName) {
this.peerHostName = peerHostName;
}
-
+
/**
* @return data node ID.
*/
@@ -139,12 +139,12 @@ public String getHostName() {
}
/**
- * @return hostname from the actual connection
+ * @return hostname from the actual connection
*/
public String getPeerHostName() {
return peerHostName;
}
-
+
/**
* @return IP:xferPort string
*/
@@ -242,17 +242,17 @@ public boolean equals(Object to) {
return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
datanodeUuid.equals(((DatanodeID)to).getDatanodeUuid()));
}
-
+
@Override
public int hashCode() {
return datanodeUuid.hashCode();
}
-
+
@Override
public String toString() {
return getXferAddr();
}
-
+
/**
* Update fields when a new registration request comes in.
* Note that this does not update storageID.
@@ -265,7 +265,7 @@ public void updateRegInfo(DatanodeID nodeReg) {
infoSecurePort = nodeReg.getInfoSecurePort();
ipcPort = nodeReg.getIpcPort();
}
-
+
/**
* Compare based on data transfer address.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
similarity index 91%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 5ded26b185..3555add7aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -19,8 +19,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@@ -32,9 +31,9 @@
import java.util.LinkedList;
import java.util.List;
-import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
+import static org.apache.hadoop.hdfs.DFSUtilClient.percent2String;
-/**
+/**
* This class extends the primary identifier of a Datanode with ephemeral
* state, eg usage information, current administrative state, and the
* network location that is communicated to clients.
@@ -54,12 +53,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
private String location = NetworkTopology.DEFAULT_RACK;
private String softwareVersion;
private List dependentHostNames = new LinkedList();
-
-
+
+
// Datanode administrative states
public enum AdminStates {
- NORMAL("In Service"),
- DECOMMISSION_INPROGRESS("Decommission In Progress"),
+ NORMAL("In Service"),
+ DECOMMISSION_INPROGRESS("Decommission In Progress"),
DECOMMISSIONED("Decommissioned");
final String value;
@@ -72,7 +71,7 @@ public enum AdminStates {
public String toString() {
return value;
}
-
+
public static AdminStates fromValue(final String value) {
for (AdminStates as : AdminStates.values()) {
if (as.value.equals(value)) return as;
@@ -109,14 +108,14 @@ public DatanodeInfo(DatanodeID nodeID) {
this.lastUpdate = 0L;
this.lastUpdateMonotonic = 0L;
this.xceiverCount = 0;
- this.adminState = null;
+ this.adminState = null;
}
-
+
public DatanodeInfo(DatanodeID nodeID, String location) {
this(nodeID);
this.location = location;
}
-
+
public DatanodeInfo(DatanodeID nodeID, String location,
final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
@@ -152,16 +151,16 @@ public DatanodeInfo(final String ipAddr, final String hostName,
this.location = networkLocation;
this.adminState = adminState;
}
-
+
/** Network location name */
@Override
public String getName() {
return getXferAddr();
}
-
+
/** The raw capacity. */
public long getCapacity() { return capacity; }
-
+
/** The used space by the data node. */
public long getDfsUsed() { return dfsUsed; }
@@ -169,14 +168,14 @@ public String getName() {
public long getBlockPoolUsed() { return blockPoolUsed; }
/** The used space by the data node. */
- public long getNonDfsUsed() {
+ public long getNonDfsUsed() {
long nonDFSUsed = capacity - dfsUsed - remaining;
return nonDFSUsed < 0 ? 0 : nonDFSUsed;
}
/** The used space by the data node as percentage of present capacity */
- public float getDfsUsedPercent() {
- return DFSUtil.getPercentUsed(dfsUsed, capacity);
+ public float getDfsUsedPercent() {
+ return DFSUtilClient.getPercentUsed(dfsUsed, capacity);
}
/** The raw free space. */
@@ -184,12 +183,12 @@ public float getDfsUsedPercent() {
/** Used space by the block pool as percentage of present capacity */
public float getBlockPoolUsedPercent() {
- return DFSUtil.getPercentUsed(blockPoolUsed, capacity);
+ return DFSUtilClient.getPercentUsed(blockPoolUsed, capacity);
}
-
+
/** The remaining space as percentage of configured capacity. */
- public float getRemainingPercent() {
- return DFSUtil.getPercentRemaining(remaining, capacity);
+ public float getRemainingPercent() {
+ return DFSUtilClient.getPercentRemaining(remaining, capacity);
}
/**
@@ -210,7 +209,7 @@ public long getCacheUsed() {
* @return Cache used as a percentage of the datanode's total cache capacity
*/
public float getCacheUsedPercent() {
- return DFSUtil.getPercentUsed(cacheUsed, cacheCapacity);
+ return DFSUtilClient.getPercentUsed(cacheUsed, cacheCapacity);
}
/**
@@ -225,7 +224,7 @@ public long getCacheRemaining() {
* capacity
*/
public float getCacheRemainingPercent() {
- return DFSUtil.getPercentRemaining(getCacheRemaining(), cacheCapacity);
+ return DFSUtilClient.getPercentRemaining(getCacheRemaining(), cacheCapacity);
}
/**
@@ -234,10 +233,10 @@ public float getCacheRemainingPercent() {
*/
public long getLastUpdate() { return lastUpdate; }
- /**
+ /**
* The time when this information was accurate.
* Ps: So return value is ideal for calculation of time differences.
- * Should not be used to convert to Date.
+ * Should not be used to convert to Date.
*/
public long getLastUpdateMonotonic() { return lastUpdateMonotonic;}
@@ -252,23 +251,23 @@ public void setLastUpdateMonotonic(long lastUpdateMonotonic) {
public int getXceiverCount() { return xceiverCount; }
/** Sets raw capacity. */
- public void setCapacity(long capacity) {
- this.capacity = capacity;
+ public void setCapacity(long capacity) {
+ this.capacity = capacity;
}
-
+
/** Sets the used space for the datanode. */
public void setDfsUsed(long dfsUsed) {
this.dfsUsed = dfsUsed;
}
/** Sets raw free space. */
- public void setRemaining(long remaining) {
- this.remaining = remaining;
+ public void setRemaining(long remaining) {
+ this.remaining = remaining;
}
/** Sets block pool used space */
- public void setBlockPoolUsed(long bpUsed) {
- this.blockPoolUsed = bpUsed;
+ public void setBlockPoolUsed(long bpUsed) {
+ this.blockPoolUsed = bpUsed;
}
/** Sets cache capacity. */
@@ -282,40 +281,40 @@ public void setCacheUsed(long cacheUsed) {
}
/** Sets time when this information was accurate. */
- public void setLastUpdate(long lastUpdate) {
- this.lastUpdate = lastUpdate;
+ public void setLastUpdate(long lastUpdate) {
+ this.lastUpdate = lastUpdate;
}
/** Sets number of active connections */
- public void setXceiverCount(int xceiverCount) {
- this.xceiverCount = xceiverCount;
+ public void setXceiverCount(int xceiverCount) {
+ this.xceiverCount = xceiverCount;
}
/** network location */
@Override
public synchronized String getNetworkLocation() {return location;}
-
+
/** Sets the network location */
@Override
public synchronized void setNetworkLocation(String location) {
this.location = NodeBase.normalize(location);
}
-
+
/** Add a hostname to a list of network dependencies */
public void addDependentHostName(String hostname) {
dependentHostNames.add(hostname);
}
-
+
/** List of Network dependencies */
public List getDependentHostNames() {
return dependentHostNames;
}
-
+
/** Sets the network dependencies */
public void setDependentHostNames(List dependencyList) {
dependentHostNames = dependencyList;
}
-
+
/** A formatted string for reporting the status of the DataNode. */
public String getDatanodeReport() {
StringBuilder buffer = new StringBuilder();
@@ -444,14 +443,13 @@ public AdminStates getAdminState() {
}
return adminState;
}
-
+
/**
- * Check if the datanode is in stale state. Here if
- * the namenode has not received heartbeat msg from a
- * datanode for more than staleInterval (default value is
- * {@link DFSConfigKeys#DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT}),
+ * Check if the datanode is in stale state. Here if
+ * the namenode has not received heartbeat msg from a
+ * datanode for more than staleInterval,
* the datanode will be treated as stale node.
- *
+ *
* @param staleInterval
* the time interval for marking the node as stale. If the last
* update time is beyond the given time interval, the node will be
@@ -461,7 +459,7 @@ public AdminStates getAdminState() {
public boolean isStale(long staleInterval) {
return (Time.monotonicNow() - lastUpdateMonotonic) >= staleInterval;
}
-
+
/**
* Sets the admin state of this node.
*/
@@ -482,7 +480,7 @@ protected void setAdminState(AdminStates newState) {
public Node getParent() { return parent; }
@Override
public void setParent(Node parent) {this.parent = parent;}
-
+
/** Return this node's level in the tree.
* E.g. the root of a tree returns 0 and its children return 1
*/
@@ -496,7 +494,7 @@ public int hashCode() {
// Super implementation is sufficient
return super.hashCode();
}
-
+
@Override
public boolean equals(Object obj) {
// Sufficient to use super equality as datanodes are uniquely identified
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
similarity index 99%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
index 27c1761ad2..af24909f01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
@@ -36,11 +36,11 @@ public ExtendedBlock() {
public ExtendedBlock(final ExtendedBlock b) {
this(b.poolId, new Block(b.block));
}
-
+
public ExtendedBlock(final String poolId, final long blockId) {
this(poolId, blockId, 0, 0);
}
-
+
public ExtendedBlock(String poolId, Block b) {
this.poolId = poolId;
this.block = b;
@@ -76,7 +76,7 @@ public long getGenerationStamp() {
public void setBlockId(final long bid) {
block.setBlockId(bid);
}
-
+
public void setGenerationStamp(final long genStamp) {
block.setGenerationStamp(genStamp);
}
@@ -84,7 +84,7 @@ public void setGenerationStamp(final long genStamp) {
public void setNumBytes(final long len) {
block.setNumBytes(len);
}
-
+
public void set(String poolId, Block blk) {
this.poolId = poolId;
this.block = blk;
@@ -93,11 +93,11 @@ public void set(String poolId, Block blk) {
public static Block getLocalBlock(final ExtendedBlock b) {
return b == null ? null : b.getLocalBlock();
}
-
+
public Block getLocalBlock() {
return block;
}
-
+
@Override // Object
public boolean equals(Object o) {
if (this == o) {
@@ -109,13 +109,13 @@ public boolean equals(Object o) {
ExtendedBlock b = (ExtendedBlock)o;
return b.block.equals(block) && b.poolId.equals(poolId);
}
-
+
@Override // Object
public int hashCode() {
int result = 31 + poolId.hashCode();
return (31 * result + block.hashCode());
}
-
+
@Override // Object
public String toString() {
return poolId + ":" + block;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
index aa5f1f9d12..40c320366c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
@@ -31,4 +31,5 @@ public interface HdfsConstantsClient {
* uses GRANDFATHER_INODE_ID for backward compatibility.
*/
long GRANDFATHER_INODE_ID = 0;
+ byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
similarity index 95%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 94d9a92cfa..34f429a21b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
/** Interface that represents the over the wire information for a file.
*/
@@ -47,11 +47,11 @@ public class HdfsFileStatus {
private final long fileId;
private final FileEncryptionInfo feInfo;
-
+
// Used by dir, not including dot and dotdot. Always zero for a regular file.
private final int childrenNum;
private final byte storagePolicy;
-
+
public static final byte[] EMPTY_NAME = new byte[0];
/**
@@ -80,9 +80,9 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication,
this.blocksize = blocksize;
this.modification_time = modification_time;
this.access_time = access_time;
- this.permission = (permission == null) ?
- ((isdir || symlink!=null) ?
- FsPermission.getDefault() :
+ this.permission = (permission == null) ?
+ ((isdir || symlink!=null) ?
+ FsPermission.getDefault() :
FsPermission.getFileDefault()) :
permission;
this.owner = (owner == null) ? "" : owner;
@@ -118,7 +118,7 @@ public final boolean isDir() {
public boolean isSymlink() {
return symlink != null;
}
-
+
/**
* Get the block size of the file.
* @return the number of bytes
@@ -158,7 +158,7 @@ public final long getAccessTime() {
public final FsPermission getPermission() {
return permission;
}
-
+
/**
* Get the owner of the file.
* @return owner of the file
@@ -166,15 +166,15 @@ public final FsPermission getPermission() {
public final String getOwner() {
return owner;
}
-
+
/**
* Get the group associated with the file.
- * @return group for the file.
+ * @return group for the file.
*/
public final String getGroup() {
return group;
}
-
+
/**
* Check if the local name is empty
* @return true if the name is empty
@@ -188,9 +188,9 @@ public final boolean isEmptyLocalName() {
* @return the local name in string
*/
public final String getLocalName() {
- return DFSUtil.bytes2String(path);
+ return DFSUtilClient.bytes2String(path);
}
-
+
/**
* Get the Java UTF8 representation of the local name
* @return the local name in java UTF8
@@ -208,7 +208,7 @@ public final String getFullName(final String parent) {
if (isEmptyLocalName()) {
return parent;
}
-
+
StringBuilder fullName = new StringBuilder(parent);
if (!parent.endsWith(Path.SEPARATOR)) {
fullName.append(Path.SEPARATOR);
@@ -226,7 +226,7 @@ public final Path getFullPath(final Path parent) {
if (isEmptyLocalName()) {
return parent;
}
-
+
return new Path(parent, getLocalName());
}
@@ -235,17 +235,17 @@ public final Path getFullPath(final Path parent) {
* @return the symlink as a string.
*/
public final String getSymlink() {
- return DFSUtil.bytes2String(symlink);
+ return DFSUtilClient.bytes2String(symlink);
}
-
+
public final byte[] getSymlinkInBytes() {
return symlink;
}
-
+
public final long getFileId() {
return fileId;
}
-
+
public final FileEncryptionInfo getFileEncryptionInfo() {
return feInfo;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
similarity index 92%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index e729869878..cc13f10605 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -24,8 +24,6 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.security.token.Token;
import com.google.common.collect.Lists;
@@ -48,7 +46,7 @@ public class LocatedBlock {
/** Cached storage type for each replica, if reported. */
private final StorageType[] storageTypes;
// corrupt flag is true if all of the replicas of a block are corrupt.
- // else false. If block has few corrupt replicas, they are filtered and
+ // else false. If block has few corrupt replicas, they are filtered and
// their locations are not part of this object
private boolean corrupt;
private Token blockToken = new Token();
@@ -71,14 +69,6 @@ public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
}
- public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages,
- long startOffset, boolean corrupt) {
- this(b, DatanodeStorageInfo.toDatanodeInfos(storages),
- DatanodeStorageInfo.toStorageIDs(storages),
- DatanodeStorageInfo.toStorageTypes(storages),
- startOffset, corrupt, EMPTY_LOCS); // startOffset is unknown
- }
-
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs,
StorageType[] storageTypes, long startOffset,
boolean corrupt, DatanodeInfo[] cachedLocs) {
@@ -132,7 +122,7 @@ public DatanodeInfo[] getLocations() {
public StorageType[] getStorageTypes() {
return storageTypes;
}
-
+
public String[] getStorageIDs() {
return storageIDs;
}
@@ -157,7 +147,7 @@ public void updateCachedStorageInfo() {
public long getStartOffset() {
return offset;
}
-
+
public long getBlockSize() {
return b.getNumBytes();
}
@@ -169,14 +159,14 @@ public void setStartOffset(long value) {
public void setCorrupt(boolean corrupt) {
this.corrupt = corrupt;
}
-
+
public boolean isCorrupt() {
return this.corrupt;
}
/**
* Add a the location of a cached replica of the block.
- *
+ *
* @param loc of datanode with the cached replica
*/
public void addCachedLoc(DatanodeInfo loc) {
@@ -214,4 +204,3 @@ public String toString() {
+ "}";
}
}
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
index e35a431074..e48969776a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
@@ -57,19 +57,19 @@ public LocatedBlocks(long flength, boolean isUnderConstuction,
this.isLastBlockComplete = isLastBlockCompleted;
this.fileEncryptionInfo = feInfo;
}
-
+
/**
* Get located blocks.
*/
public List getLocatedBlocks() {
return blocks;
}
-
+
/** Get the last located block. */
public LocatedBlock getLastLocatedBlock() {
return lastLocatedBlock;
}
-
+
/** Is the last block completed? */
public boolean isLastBlockComplete() {
return isLastBlockComplete;
@@ -81,7 +81,7 @@ public boolean isLastBlockComplete() {
public LocatedBlock get(int index) {
return blocks.get(index);
}
-
+
/**
* Get number of located blocks.
*/
@@ -90,7 +90,7 @@ public int locatedBlockCount() {
}
/**
- *
+ *
*/
public long getFileLength() {
return this.fileLength;
@@ -113,7 +113,7 @@ public FileEncryptionInfo getFileEncryptionInfo() {
/**
* Find block containing specified offset.
- *
+ *
* @return block if found, or null otherwise.
*/
public int findBlock(long offset) {
@@ -122,7 +122,7 @@ public int findBlock(long offset) {
new ExtendedBlock(), new DatanodeInfo[0]);
key.setStartOffset(offset);
key.getBlock().setNumBytes(1);
- Comparator comp =
+ Comparator comp =
new Comparator() {
// Returns 0 iff a is inside b or b is inside a
@Override
@@ -131,7 +131,7 @@ public int compare(LocatedBlock a, LocatedBlock b) {
long bBeg = b.getStartOffset();
long aEnd = aBeg + a.getBlockSize();
long bEnd = bBeg + b.getBlockSize();
- if(aBeg <= bBeg && bEnd <= aEnd
+ if(aBeg <= bBeg && bEnd <= aEnd
|| bBeg <= aBeg && aEnd <= bEnd)
return 0; // one of the blocks is inside the other
if(aBeg < bBeg)
@@ -141,11 +141,11 @@ public int compare(LocatedBlock a, LocatedBlock b) {
};
return Collections.binarySearch(blocks, key, comp);
}
-
+
public void insertRange(int blockIdx, List newBlocks) {
int oldIdx = blockIdx;
int insStart = 0, insEnd = 0;
- for(int newIdx = 0; newIdx < newBlocks.size() && oldIdx < blocks.size();
+ for(int newIdx = 0; newIdx < newBlocks.size() && oldIdx < blocks.size();
newIdx++) {
long newOff = newBlocks.get(newIdx).getStartOffset();
long oldOff = blocks.get(oldIdx).getStartOffset();
@@ -169,7 +169,7 @@ public void insertRange(int blockIdx, List newBlocks) {
blocks.addAll(oldIdx, newBlocks.subList(insStart, insEnd));
}
}
-
+
public static int getInsertIndex(int binSearchResult) {
return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
index e293dcc388..3f2c9ca98e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
@@ -46,7 +46,7 @@ public enum AccessMode {
private final EnumSet modes;
private byte [] cache;
-
+
public BlockTokenIdentifier() {
this(null, null, 0, EnumSet.noneOf(AccessMode.class));
}
@@ -129,7 +129,7 @@ public boolean equals(Object obj) {
if (obj instanceof BlockTokenIdentifier) {
BlockTokenIdentifier that = (BlockTokenIdentifier) obj;
return this.expiryDate == that.expiryDate && this.keyId == that.keyId
- && isEqual(this.userId, that.userId)
+ && isEqual(this.userId, that.userId)
&& isEqual(this.blockPoolId, that.blockPoolId)
&& this.blockId == that.blockId
&& isEqual(this.modes, that.modes);
@@ -171,11 +171,11 @@ public void write(DataOutput out) throws IOException {
WritableUtils.writeEnum(out, aMode);
}
}
-
+
@Override
public byte[] getBytes() {
if(cache == null) cache = super.getBytes();
-
+
return cache;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
similarity index 91%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
index 07052ddf76..2a5d5fe54b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
@@ -23,8 +23,7 @@
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
@@ -33,7 +32,7 @@
* A delegation token identifier that is specific to HDFS.
*/
@InterfaceAudience.Private
-public class DelegationTokenIdentifier
+public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier {
public static final Text HDFS_DELEGATION_KIND = new Text("HDFS_DELEGATION_TOKEN");
@@ -68,7 +67,7 @@ public String toString() {
public static String stringifyToken(final Token> token) throws IOException {
DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
- DataInputStream in = new DataInputStream(buf);
+ DataInputStream in = new DataInputStream(buf);
ident.readFields(in);
if (token.getService().getLength() > 0) {
@@ -77,7 +76,7 @@ public static String stringifyToken(final Token> token) throws IOException {
return ident.toString();
}
}
-
+
public static class WebHdfsDelegationTokenIdentifier
extends DelegationTokenIdentifier {
public WebHdfsDelegationTokenIdentifier() {
@@ -85,17 +84,17 @@ public WebHdfsDelegationTokenIdentifier() {
}
@Override
public Text getKind() {
- return WebHdfsFileSystem.TOKEN_KIND;
+ return WebHdfsConstants.WEBHDFS_TOKEN_KIND;
}
}
-
+
public static class SWebHdfsDelegationTokenIdentifier extends WebHdfsDelegationTokenIdentifier {
public SWebHdfsDelegationTokenIdentifier() {
super();
}
@Override
public Text getKind() {
- return SWebHdfsFileSystem.TOKEN_KIND;
+ return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
similarity index 75%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
index 544ffe54e2..25f3cfde08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
@@ -17,9 +17,16 @@
*/
package org.apache.hadoop.hdfs.web;
+import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.io.Text;
+
+@InterfaceAudience.Private
+public class WebHdfsConstants {
+ /** Delegation token kind */
+ public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation");
+ public static final Text SWEBHDFS_TOKEN_KIND = new Text("SWEBHDFS delegation");
-class WebHdfsConstants {
enum PathType {
FILE, DIRECTORY, SYMLINK;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 39ee33e583..9bb1fd42a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -450,6 +450,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)
+ HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 8b3f5121bf..a2c16b713e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1234,21 +1234,6 @@ public static void setGenericConf(Configuration conf,
}
}
}
-
- /** Return used as percentage of capacity */
- public static float getPercentUsed(long used, long capacity) {
- return capacity <= 0 ? 100 : (used * 100.0f)/capacity;
- }
-
- /** Return remaining as percentage of capacity */
- public static float getPercentRemaining(long remaining, long capacity) {
- return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
- }
-
- /** Convert percentage to a string. */
- public static String percent2String(double percentage) {
- return StringUtils.format("%.2f%%", percentage);
- }
/**
* Round bytes to GiB (gibibyte)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 31feb1e53e..518e91a406 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -25,7 +25,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
/**
* Metadata about a snapshottable directory
@@ -62,7 +61,7 @@ public SnapshottableDirectoryStatus(long modification_time, long access_time,
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName, inodeId,
- childrenNum, null, BlockStoragePolicySuite.ID_UNSPECIFIED);
+ childrenNum, null, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index a7872d58a9..b440e60b01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -184,7 +184,6 @@
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@@ -1442,7 +1441,7 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
- : BlockStoragePolicySuite.ID_UNSPECIFIED);
+ : HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
}
public static SnapshottableDirectoryStatus convert(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4c9582c0a8..2a7b02a206 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -123,7 +123,7 @@ public class BlockManager {
private final AtomicLong excessBlocksCount = new AtomicLong(0L);
private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L);
private final long startupDelayBlockDeletionInMs;
-
+
/** Used by metrics */
public long getPendingReplicationBlocksCount() {
return pendingReplicationBlocksCount;
@@ -836,7 +836,7 @@ private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final lon
(BlockInfoContiguousUnderConstruction) blk;
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
- return new LocatedBlock(eb, storages, pos, false);
+ return newLocatedBlock(eb, storages, pos, false);
}
// get block locations
@@ -868,7 +868,7 @@ private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final lon
" numCorrupt: " + numCorruptNodes +
" numCorruptRepls: " + numCorruptReplicas;
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
- return new LocatedBlock(eb, machines, pos, isCorrupt);
+ return newLocatedBlock(eb, machines, pos, isCorrupt);
}
/** Create a LocatedBlocks. */
@@ -3723,7 +3723,18 @@ public void clearQueues() {
postponedMisreplicatedBlocks.clear();
postponedMisreplicatedBlocksCount.set(0);
};
-
+
+ public static LocatedBlock newLocatedBlock(
+ ExtendedBlock b, DatanodeStorageInfo[] storages,
+ long startOffset, boolean corrupt) {
+ // startOffset is unknown
+ return new LocatedBlock(
+ b, DatanodeStorageInfo.toDatanodeInfos(storages),
+ DatanodeStorageInfo.toStorageIDs(storages),
+ DatanodeStorageInfo.toStorageTypes(storages),
+ startOffset, corrupt,
+ null);
+ }
private static class ReplicationWork {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 020cb5f065..6c352f3706 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -42,7 +42,6 @@ public class BlockStoragePolicySuite {
public static final XAttr.NameSpace XAttrNS = XAttr.NameSpace.SYSTEM;
public static final int ID_BIT_LENGTH = 4;
- public static final byte ID_UNSPECIFIED = 0;
@VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index be16a87314..8c752ac37d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -25,6 +25,9 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index b0ab31560f..9017fe1ef9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -115,7 +115,7 @@ public synchronized long getCapacityUsed() {
@Override
public synchronized float getCapacityUsedPercent() {
- return DFSUtil.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
+ return DFSUtilClient.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
}
@Override
@@ -125,8 +125,8 @@ public synchronized long getCapacityRemaining() {
@Override
public synchronized float getCapacityRemainingPercent() {
- return DFSUtil.getPercentRemaining(
- stats.capacityRemaining, stats.capacityTotal);
+ return DFSUtilClient.getPercentRemaining(stats.capacityRemaining,
+ stats.capacityTotal);
}
@Override
@@ -136,7 +136,8 @@ public synchronized long getBlockPoolUsed() {
@Override
public synchronized float getPercentBlockPoolUsed() {
- return DFSUtil.getPercentUsed(stats.blockPoolUsed, stats.capacityTotal);
+ return DFSUtilClient.getPercentUsed(stats.blockPoolUsed,
+ stats.capacityTotal);
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 89487aa68f..f1beb759d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -53,7 +53,6 @@
import java.io.BufferedReader;
import java.io.FileInputStream;
-import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
@@ -331,7 +330,7 @@ private boolean processRecursively(String parent, HdfsFileStatus status) {
private boolean processFile(String fullPath, HdfsLocatedFileStatus status) {
final byte policyId = status.getStoragePolicy();
// currently we ignore files with unspecified storage policy
- if (policyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
+ if (policyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return false;
}
final BlockStoragePolicy policy = blockStoragePolicies[policyId];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 850b3bd6c5..cea2b82f33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -31,12 +31,12 @@
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -140,7 +140,7 @@ static ContentSummary getContentSummary(
}
private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) {
- return inodePolicy != BlockStoragePolicySuite.ID_UNSPECIFIED ? inodePolicy :
+ return inodePolicy != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? inodePolicy :
parentPolicy;
}
@@ -176,8 +176,8 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
if (targetNode == null)
return null;
byte parentStoragePolicy = isSuperUser ?
- targetNode.getStoragePolicyID() : BlockStoragePolicySuite
- .ID_UNSPECIFIED;
+ targetNode.getStoragePolicyID() : HdfsConstantsClient
+ .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
if (!targetNode.isDirectory()) {
return new DirectoryListing(
@@ -199,7 +199,7 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
INode cur = contents.get(startChild+i);
byte curPolicy = isSuperUser && !cur.isSymlink()?
cur.getLocalStoragePolicyID():
- BlockStoragePolicySuite.ID_UNSPECIFIED;
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
needLocation, getStoragePolicyID(curPolicy,
parentStoragePolicy), snapshot, isRawPath, iip);
@@ -254,7 +254,7 @@ private static DirectoryListing getSnapshotsListing(
for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
- BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
false, INodesInPath.fromINode(sRoot));
}
return new DirectoryListing(
@@ -277,7 +277,7 @@ static HdfsFileStatus getFileInfo(
try {
final INode i = src.getLastINode();
byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
- i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
+ i.getStoragePolicyID() : HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return i == null ? null : createFileStatus(
fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
src.getPathSnapshotId(), isRawPath, src);
@@ -295,7 +295,7 @@ static HdfsFileStatus getFileInfo(
if (fsd.getINode4DotSnapshot(srcs) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
- BlockStoragePolicySuite.ID_UNSPECIFIED);
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
}
return null;
}
@@ -322,7 +322,7 @@ private static HdfsFileStatus getFileInfo4DotSnapshot(
if (fsd.getINode4DotSnapshot(src) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
- BlockStoragePolicySuite.ID_UNSPECIFIED);
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
}
return null;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index f74c42a010..0189f63117 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -49,6 +49,7 @@
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
@@ -61,7 +62,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.security.AccessControlException;
@@ -742,7 +742,7 @@ public EnumCounters getStorageTypeDeltas(byte storagePolicyID,
EnumCounters typeSpaceDeltas =
new EnumCounters(StorageType.class);
// Storage type and its quota are only available when storage policy is set
- if (storagePolicyID != BlockStoragePolicySuite.ID_UNSPECIFIED) {
+ if (storagePolicyID != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
BlockStoragePolicy storagePolicy = getBlockManager().getStoragePolicy(storagePolicyID);
if (oldRep != newRep) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index d38ef67a73..293bcbae79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -380,7 +379,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
if (toAddRetryCache) {
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile,
- BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
false, iip);
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
addCloseOp.rpcCallId, stat);
@@ -400,7 +399,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
fsNamesys.dir, path,
HdfsFileStatus.EMPTY_NAME, newFile,
- BlockStoragePolicySuite.ID_UNSPECIFIED,
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
Snapshot.CURRENT_STATE_ID, false, iip);
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat));
@@ -473,7 +472,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
if (toAddRetryCache) {
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file,
- BlockStoragePolicySuite.ID_UNSPECIFIED,
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
Snapshot.CURRENT_STATE_ID, false, iip);
fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId,
appendOp.rpcCallId, new LastBlockWithStatus(lb, stat));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 2b9bbd2df1..0daf12fd82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -108,7 +108,6 @@
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
@@ -434,7 +433,7 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin
private AddCloseOp(FSEditLogOpCodes opCode) {
super(opCode);
- storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED;
+ storagePolicyId = HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND);
}
@@ -636,7 +635,7 @@ void readFields(DataInputStream in, int logVersion)
NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) {
this.storagePolicyId = FSImageSerialization.readByte(in);
} else {
- this.storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED;
+ this.storagePolicyId = HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
// read clientId and callId
readRpcIds(in, logVersion);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 02a66de1c6..f174a4e470 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3287,7 +3287,7 @@ FileState analyzeFileState(String src,
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
long offset) throws IOException {
- LocatedBlock lBlk = new LocatedBlock(
+ LocatedBlock lBlk = BlockManager.newLocatedBlock(
getExtendedBlock(blk), locs, offset, false);
getBlockManager().setBlockToken(
lBlk, BlockTokenIdentifier.AccessMode.WRITE);
@@ -3351,7 +3351,8 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(
src, numAdditionalNodes, clientnode, chosen,
excludes, preferredblocksize, storagePolicyID);
- final LocatedBlock lb = new LocatedBlock(blk, targets, -1, false);
+ final LocatedBlock lb = BlockManager.newLocatedBlock(
+ blk, targets, -1, false);
blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
return lb;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index e6294418ee..cf0325e49f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -510,7 +511,7 @@ public final boolean isQuotaSet() {
*/
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) {
final byte storagePolicyId = isSymlink() ?
- BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID();
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID();
return computeQuotaUsage(bsps, storagePolicyId,
new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID);
}
@@ -554,7 +555,7 @@ public abstract QuotaCounts computeQuotaUsage(
public final QuotaCounts computeQuotaUsage(
BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache) {
final byte storagePolicyId = isSymlink() ?
- BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID();
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID();
return computeQuotaUsage(bsps, storagePolicyId, counts,
useCache, Snapshot.CURRENT_STATE_ID);
}
@@ -711,7 +712,7 @@ public final INode setAccessTime(long accessTime, int latestSnapshotId) {
/**
* @return the storage policy directly specified on the INode. Return
- * {@link BlockStoragePolicySuite#ID_UNSPECIFIED} if no policy has
+ * {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} if no policy has
* been specified.
*/
public abstract byte getLocalStoragePolicyID();
@@ -720,13 +721,13 @@ public final INode setAccessTime(long accessTime, int latestSnapshotId) {
* Get the storage policy ID while computing quota usage
* @param parentStoragePolicyId the storage policy ID of the parent directory
* @return the storage policy ID of this INode. Note that for an
- * {@link INodeSymlink} we return {@link BlockStoragePolicySuite#ID_UNSPECIFIED}
+ * {@link INodeSymlink} we return {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED}
* instead of throwing Exception
*/
public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) {
byte localId = isSymlink() ?
- BlockStoragePolicySuite.ID_UNSPECIFIED : getLocalStoragePolicyID();
- return localId != BlockStoragePolicySuite.ID_UNSPECIFIED ?
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getLocalStoragePolicyID();
+ return localId != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ?
localId : parentStoragePolicyId;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 9f55fc464b..12fa7aaee0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -45,7 +45,7 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
-import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED;
+import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
/**
* Directory INode class.
@@ -125,18 +125,17 @@ public byte getLocalStoragePolicyID() {
return (xattr.getValue())[0];
}
}
- return ID_UNSPECIFIED;
+ return BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
@Override
public byte getStoragePolicyID() {
byte id = getLocalStoragePolicyID();
- if (id != ID_UNSPECIFIED) {
+ if (id != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return id;
}
// if it is unspecified, check its parent
- return getParent() != null ? getParent().getStoragePolicyID() :
- ID_UNSPECIFIED;
+ return getParent() != null ? getParent().getStoragePolicyID() : BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 294dd1443b..e9d3644c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED;
+import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
@@ -393,7 +393,7 @@ public byte getLocalStoragePolicyID() {
@Override
public byte getStoragePolicyID() {
byte id = getLocalStoragePolicyID();
- if (id == ID_UNSPECIFIED) {
+ if (id == BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return this.getParent() != null ?
this.getParent().getStoragePolicyID() : id;
}
@@ -584,7 +584,7 @@ public final QuotaCounts computeQuotaUsage(
counts.addNameSpace(nsDelta);
counts.addStorageSpace(ssDeltaNoReplication * replication);
- if (blockStoragePolicyId != ID_UNSPECIFIED){
+ if (blockStoragePolicyId != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
List storageTypes = bsp.chooseStorageTypes(replication);
for (StorageType t : storageTypes) {
@@ -619,7 +619,7 @@ public final ContentSummaryComputationContext computeContentSummary(
counts.addContent(Content.LENGTH, fileLen);
counts.addContent(Content.DISKSPACE, storagespaceConsumed());
- if (getStoragePolicyID() != ID_UNSPECIFIED){
+ if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
getPolicy(getStoragePolicyID());
List storageTypes = bsp.chooseStorageTypes(getFileReplication());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
index fc1cbec7fa..9a1e1f4ec6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet;
@@ -123,12 +124,12 @@ public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps,
@Override
public byte getStoragePolicyID(){
- return BlockStoragePolicySuite.ID_UNSPECIFIED;
+ return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
@Override
public byte getLocalStoragePolicyID() {
- return BlockStoragePolicySuite.ID_UNSPECIFIED;
+ return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
};
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index abd54a273b..7baf7dcbb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
@@ -150,7 +151,7 @@ public QuotaCounts updateQuotaAndCollectBlocks(BlockStoragePolicySuite bsps, INo
BlockStoragePolicy bsp = null;
EnumCounters typeSpaces =
new EnumCounters(StorageType.class);
- if (storagePolicyID != BlockStoragePolicySuite.ID_UNSPECIFIED) {
+ if (storagePolicyID != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
bsp = bsps.getPolicy(file.getStoragePolicyID());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 0a6f133099..2c1148e4d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -73,7 +73,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.ParamFilter;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.io.Text;
@@ -246,8 +246,8 @@ private Token extends TokenIdentifier> generateDelegationToken(
return null;
}
final Token extends TokenIdentifier> t = c.getAllTokens().iterator().next();
- Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND
- : SWebHdfsFileSystem.TOKEN_KIND;
+ Text kind = request.getScheme().equals("http") ? WebHdfsConstants.WEBHDFS_TOKEN_KIND
+ : WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
t.setKind(kind);
return t;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index d1b6017d7e..ba1fd0f961 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -22,8 +22,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
@@ -151,7 +151,7 @@ public int run(Configuration conf, List args) throws IOException {
return 2;
}
byte storagePolicyId = status.getStoragePolicy();
- if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
+ if (storagePolicyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
System.out.println("The storage policy of " + path + " is unspecified");
return 0;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index ad17a7de82..005607822d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -39,7 +39,6 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -128,7 +127,7 @@ static HdfsFileStatus toFileStatus(final Map, ?> json, boolean includesType) {
final int childrenNum = getInt(m, "childrenNum", -1);
final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() :
- BlockStoragePolicySuite.ID_UNSPECIFIED;
+ HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
index fa89ec3551..ffca099d2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
@@ -22,7 +22,6 @@
public class SWebHdfsFileSystem extends WebHdfsFileSystem {
- public static final Text TOKEN_KIND = new Text("SWEBHDFS delegation");
public static final String SCHEME = "swebhdfs";
@Override
@@ -37,7 +36,7 @@ protected String getTransportScheme() {
@Override
protected Text getTokenKind() {
- return TOKEN_KIND;
+ return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
index 164692e8af..e5e755ad4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hdfs.web;
-import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
-
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
@@ -57,8 +55,8 @@ public void cancel(Token> token, Configuration conf) throws IOException {
@Override
public boolean handleKind(Text kind) {
- return kind.equals(WebHdfsFileSystem.TOKEN_KIND)
- || kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
+ return kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)
+ || kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND);
}
@Override
@@ -87,9 +85,9 @@ private TokenManagementDelegator getInstance(Token> token,
}
private static String getSchemeByKind(Text kind) {
- if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
+ if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) {
return WebHdfsFileSystem.SCHEME;
- } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
+ } else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) {
return SWebHdfsFileSystem.SCHEME;
} else {
throw new IllegalArgumentException("Unsupported scheme");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 4c2122972e..6f944c7534 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -101,9 +101,6 @@ public class WebHdfsFileSystem extends FileSystem
/** Default connection factory may be overridden in tests to use smaller timeout values */
protected URLConnectionFactory connectionFactory;
- /** Delegation token kind */
- public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
-
@VisibleForTesting
public static final String CANT_FALLBACK_TO_INSECURE_MSG =
"The client is configured to only allow connecting to secure cluster";
@@ -139,7 +136,7 @@ protected String getTransportScheme() {
}
protected Text getTokenKind() {
- return TOKEN_KIND;
+ return WebHdfsConstants.WEBHDFS_TOKEN_KIND;
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 8f99a850f5..89c8e11945 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED;
+import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import java.io.File;
import java.io.FileNotFoundException;
@@ -853,8 +853,10 @@ public void testSetStoragePolicy() throws Exception {
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
HdfsFileStatus[] barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
- checkDirectoryListing(dirList, ID_UNSPECIFIED, ID_UNSPECIFIED);
- checkDirectoryListing(barList, ID_UNSPECIFIED, ID_UNSPECIFIED);
+ checkDirectoryListing(dirList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
+ BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
+ checkDirectoryListing(barList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
+ BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
final Path invalidPath = new Path("/invalidPath");
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
index 53ddbf77d1..0b814351ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
@@ -34,7 +34,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -103,11 +103,12 @@ public void testVolumeSize() throws Exception {
+ " percentRemaining " + percentRemaining);
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
- assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity));
- assertTrue(percentRemaining == DFSUtil.getPercentRemaining(remaining,
- configCapacity));
- assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed,
- configCapacity));
+ assertTrue(percentUsed == DFSUtilClient.getPercentUsed(used,
+ configCapacity));
+ assertTrue(percentRemaining == DFSUtilClient.getPercentRemaining(
+ remaining, configCapacity));
+ assertTrue(percentBpUsed == DFSUtilClient.getPercentUsed(bpUsed,
+ configCapacity));
}
DF df = new DF(new File(cluster.getDataDirectory()), conf);
@@ -152,10 +153,12 @@ public void testVolumeSize() throws Exception {
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
// Ensure percent used is calculated based on used and present capacity
- assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity));
+ assertTrue(percentUsed == DFSUtilClient.getPercentUsed(used,
+ configCapacity));
// Ensure percent used is calculated based on used and present capacity
- assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed, configCapacity));
+ assertTrue(percentBpUsed == DFSUtilClient.getPercentUsed(bpUsed,
+ configCapacity));
// Ensure percent used is calculated based on used and present capacity
assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
index 45cd8fe3af..69d1a0456f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
@@ -328,7 +328,7 @@ private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
dtId, dtSecretManager);
SecurityUtil.setTokenService(
token, NetUtils.createSocketAddr(uri.getAuthority()));
- token.setKind(WebHdfsFileSystem.TOKEN_KIND);
+ token.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
ugi.addToken(token);
}
return (WebHdfsFileSystem) FileSystem.get(uri, conf);