diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 00216673ac..0f1bf69423 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -9,6 +9,9 @@ Trunk (unreleased changes) HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia) + HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants -> + HdfsConstants. (Harsh J Chouraria via atm) + BUG FIXES HDFS-2287. TestParallelRead has a small off-by-one bug. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml index 2bfa2e0bbd..0d3ed89c7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml @@ -505,7 +505,7 @@ using 'bin/hadoop dfsadmin -safemode' command. NameNode front page shows whether Safemode is on or off. A more detailed description and configuration is maintained as JavaDoc for - setSafeMode(). + setSafeMode().

fsck diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 642f60be8b..7772ad9792 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -70,9 +70,9 @@ public class Hdfs extends AbstractFileSystem { * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { - super(theUri, FSConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); + super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); - if (!theUri.getScheme().equalsIgnoreCase(FSConstants.HDFS_URI_SCHEME)) { + if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) { throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs"); } String host = theUri.getHost(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 237372377e..85639afc1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -60,10 +60,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -156,14 +156,14 @@ static class Conf { DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT); confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsConstants.WRITE_TIMEOUT); + HdfsServerConstants.WRITE_TIMEOUT); ioBufferSize = conf.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT); socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsConstants.READ_TIMEOUT); + HdfsServerConstants.READ_TIMEOUT); /** dfs.write.packet.size is an internal config variable */ writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); @@ -279,12 +279,12 @@ int getMaxBlockAcquireFailures() { */ int getDatanodeWriteTimeout(int numNodes) { return (dfsClientConf.confTime > 0) ? - (dfsClientConf.confTime + HdfsConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0; + (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0; } int getDatanodeReadTimeout(int numNodes) { return dfsClientConf.socketTimeout > 0 ? - (HdfsConstants.READ_TIMEOUT_EXTENSION * numNodes + + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes + dfsClientConf.socketTimeout) : 0; } @@ -1046,7 +1046,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(NetUtils.getInputStream(sock)); if (LOG.isDebugEnabled()) { @@ -1225,7 +1225,7 @@ public DatanodeInfo[] datanodeReport(DatanodeReportType type) /** * Enter, leave or get safe mode. * - * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction) + * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction) */ public boolean setSafeMode(SafeModeAction action) throws IOException { return namenode.setSafeMode(action); @@ -1293,7 +1293,7 @@ public void finalizeUpgrade() throws IOException { } /** - * @see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction) + * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction) */ public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) throws IOException { @@ -1392,10 +1392,10 @@ ContentSummary getContentSummary(String src) throws IOException { void setQuota(String src, long namespaceQuota, long diskspaceQuota) throws IOException { // sanity check - if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET && - namespaceQuota != FSConstants.QUOTA_RESET) || - (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET && - diskspaceQuota != FSConstants.QUOTA_RESET)) { + if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET && + namespaceQuota != HdfsConstants.QUOTA_RESET) || + (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET && + diskspaceQuota != HdfsConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Invalid values for quota : " + namespaceQuota + " and " + diskspaceQuota); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 03879338dd..c330297cd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -166,7 +166,7 @@ private class Packet { this.seqno = HEART_BEAT_SEQNO; buffer = null; - int packetSize = PacketHeader.PKT_HEADER_LEN + FSConstants.BYTES_IN_INTEGER; + int packetSize = PacketHeader.PKT_HEADER_LEN + HdfsConstants.BYTES_IN_INTEGER; buf = new byte[packetSize]; checksumStart = dataStart = packetSize; @@ -234,12 +234,12 @@ ByteBuffer getBuffer() { dataStart - checksumLen , checksumLen); } - int pktLen = FSConstants.BYTES_IN_INTEGER + dataLen + checksumLen; + int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen; //normally dataStart == checksumPos, i.e., offset is zero. buffer = ByteBuffer.wrap( buf, dataStart - checksumPos, - PacketHeader.PKT_HEADER_LEN + pktLen - FSConstants.BYTES_IN_INTEGER); + PacketHeader.PKT_HEADER_LEN + pktLen - HdfsConstants.BYTES_IN_INTEGER); buf = null; buffer.mark(); @@ -849,7 +849,7 @@ private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets, final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2); out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(sock, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); //send the TRANSFER_BLOCK request new Sender(out).transferBlock(block, blockToken, dfsClient.clientName, @@ -1023,7 +1023,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS, // out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); assert null == blockReplyStream : "Previous blockReplyStream unclosed"; blockReplyStream = new DataInputStream(NetUtils.getInputStream(s)); @@ -1173,7 +1173,7 @@ static Socket createSocketForPipeline(final DatanodeInfo first, final int timeout = client.getDatanodeReadTimeout(length); NetUtils.connect(sock, isa, timeout); sock.setSoTimeout(timeout); - sock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE); + sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 8ccba1f8ae..538f4ba533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -646,7 +646,7 @@ static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr, static ClientProtocol createNamenode(ClientProtocol rpcNamenode) throws IOException { RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( - 5, FSConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); + 5, HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); Map,RetryPolicy> remoteExceptionToPolicyMap = new HashMap, RetryPolicy>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index ef5ad425c9..68f8616941 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -49,9 +49,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -108,7 +108,7 @@ public void initialize(URI uri, Configuration conf) throws IOException { InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); - this.uri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority()); + this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority()); this.workingDir = getHomeDirectory(); } @@ -642,9 +642,9 @@ public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type * Enter, leave or get safe mode. * * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode( - * FSConstants.SafeModeAction) + * HdfsConstants.SafeModeAction) */ - public boolean setSafeMode(FSConstants.SafeModeAction action) + public boolean setSafeMode(HdfsConstants.SafeModeAction action) throws IOException { return dfs.setSafeMode(action); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java index ba26ad2c24..35d45bac32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.StringUtils; @@ -162,7 +162,7 @@ private synchronized void remove(final LeaseRenewer r) { /** The time in milliseconds that the map became empty. */ private long emptyTime = Long.MAX_VALUE; /** A fixed lease renewal time period in milliseconds */ - private long renewal = FSConstants.LEASE_SOFTLIMIT_PERIOD/2; + private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2; /** A daemon for renewing lease */ private Daemon daemon = null; @@ -352,7 +352,7 @@ synchronized void closeClient(final DFSClient dfsc) { //update renewal time if (renewal == dfsc.getHdfsTimeout()/2) { - long min = FSConstants.LEASE_SOFTLIMIT_PERIOD; + long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; for(DFSClient c : dfsclients) { if (c.getHdfsTimeout() > 0) { final long timeout = c.getHdfsTimeout(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index 51311f5216..0be0bb9fb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; @@ -394,7 +394,7 @@ public static RemoteBlockReader newBlockReader( Socket sock, String file, throws IOException { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( - NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT))); + NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT))); new Sender(out).readBlock(block, blockToken, clientName, startOffset, len); // @@ -486,7 +486,7 @@ public boolean hasSentStatusCode() { void sendReadResult(Socket sock, Status statusCode) { assert !sentStatusCode : "already sent status code to " + sock; try { - OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT); + OutputStream out = NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT); ClientReadStatusProto.newBuilder() .setStatus(statusCode) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java index 165096f24b..e1006a65d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 4153110ca9..e2ecbaa46d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; @@ -578,7 +578,7 @@ public void renewLease(String clientName) throws AccessControlException, * Return live datanodes if type is LIVE; dead datanodes if type is DEAD; * otherwise all datanodes if type is ALL. */ - public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type) + public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type) throws IOException; /** @@ -601,7 +601,7 @@ public long getPreferredBlockSize(String filename) *

* Safe mode is entered automatically at name node startup. * Safe mode can also be entered manually using - * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}. + * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}. *

* At startup the name node accepts data node reports collecting * information about block locations. @@ -617,11 +617,11 @@ public long getPreferredBlockSize(String filename) * Then the name node leaves safe mode. *

* If safe mode is turned on manually using - * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)} + * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)} * then the name node stays in safe mode until it is manually turned off - * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}. + * using {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}. * Current state of the name node can be verified using - * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)} + * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)} *

Configuration parameters:

* dfs.safemode.threshold.pct is the threshold parameter.
* dfs.safemode.extension is the safe mode extension parameter.
@@ -644,7 +644,7 @@ public long getPreferredBlockSize(String filename) * * @throws IOException */ - public boolean setSafeMode(FSConstants.SafeModeAction action) + public boolean setSafeMode(HdfsConstants.SafeModeAction action) throws IOException; /** @@ -685,7 +685,7 @@ public boolean setSafeMode(FSConstants.SafeModeAction action) /** * Report distributed upgrade progress or force current upgrade to proceed. * - * @param action {@link FSConstants.UpgradeAction} to perform + * @param action {@link HdfsConstants.UpgradeAction} to perform * @return upgrade status information or null if no upgrades are in progress * @throws IOException */ @@ -777,8 +777,8 @@ public ContentSummary getContentSummary(String path) *

* * The quota can have three types of values : (1) 0 or more will set - * the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies - * the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET} + * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies + * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET} * implies the quota will be reset. Any other value is a runtime error. * * @throws AccessControlException permission denied diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 260cd7600b..2f224409f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -55,15 +55,15 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; @@ -306,13 +306,13 @@ private void dispatch() { DataInputStream in = null; try { sock.connect(NetUtils.createSocketAddr( - target.datanode.getName()), HdfsConstants.READ_TIMEOUT); + target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT); sock.setKeepAlive(true); out = new DataOutputStream( new BufferedOutputStream( - sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE)); + sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE)); sendRequest(out); in = new DataInputStream( new BufferedInputStream( - sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE)); + sock.getInputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE)); receiveResponse(in); bytesMoved.inc(block.getNumBytes()); LOG.info( "Moving block " + block.getBlock().getBlockId() + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index e7c160af25..293d5c5969 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.util.LightWeightGSet; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index c3f676e3d2..29565ace47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -22,8 +22,8 @@ import java.util.List; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.NameNode; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4e45449b1f..682d272922 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -50,8 +50,8 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 1b483a7537..6455b579a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; @@ -439,7 +439,7 @@ private boolean isGoodTarget(DatanodeDescriptor node, long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize); // check the remaining capacity of the target machine - if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) { + if (blockSize* HdfsConstants.MIN_BLOCKS_FOR_WRITE>remaining) { if(LOG.isDebugEnabled()) { threadLocalBuilder.get().append(node.toString()).append(": ") .append("Node ").append(NodeBase.getPath(node)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index a069d761f3..e0c2de955a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java index 990b089fd7..5f0b2604b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; /** * The exception is thrown when external version does not match @@ -34,7 +34,7 @@ public class IncorrectVersionException extends IOException { private static final long serialVersionUID = 1L; public IncorrectVersionException(int versionReported, String ofWhat) { - this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION); + this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION); } public IncorrectVersionException(int versionReported, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 81f182d6ca..6e220d6bd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -166,8 +166,8 @@ public static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom) try { s = new Socket(); - s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); } catch (IOException e) { deadNodes.add(chosenNode); s.close(); @@ -188,8 +188,8 @@ public static void streamBlockInAscii(InetSocketAddress addr, String poolId, JspWriter out, Configuration conf) throws IOException { if (chunkSizeToView == 0) return; Socket s = new Socket(); - s.connect(addr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(addr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 19ad35bb9a..4c11973d4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -32,11 +32,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.util.VersionInfo; @@ -434,10 +434,10 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage) this.lock(); // lock storage if it exists - if (startOpt == HdfsConstants.StartupOption.FORMAT) + if (startOpt == HdfsServerConstants.StartupOption.FORMAT) return StorageState.NOT_FORMATTED; - if (startOpt != HdfsConstants.StartupOption.IMPORT) { + if (startOpt != HdfsServerConstants.StartupOption.IMPORT) { storage.checkOldLayoutStorage(this); } @@ -866,7 +866,7 @@ public static void deleteDir(File dir) throws IOException { * @throws IOException */ public void writeAll() throws IOException { - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; for (Iterator it = storageDirs.iterator(); it.hasNext();) { writeProperties(it.next()); } @@ -938,7 +938,7 @@ protected void setClusterId(Properties props, int layoutVersion, protected void setLayoutVersion(Properties props, StorageDirectory sd) throws IncorrectVersionException, InconsistentFSStateException { int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion")); - if (lv < FSConstants.LAYOUT_VERSION) { // future version + if (lv < HdfsConstants.LAYOUT_VERSION) { // future version throw new IncorrectVersionException(lv, "storage directory " + sd.root.getAbsolutePath()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java index 911dd407d4..405006bfb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java @@ -21,7 +21,7 @@ import java.util.SortedSet; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; /** @@ -69,7 +69,7 @@ public synchronized boolean initializeUpgrade() throws IOException { currentUpgrades = getDistributedUpgrades(); if(currentUpgrades == null) { // set new upgrade state - setUpgradeState(false, FSConstants.LAYOUT_VERSION); + setUpgradeState(false, HdfsConstants.LAYOUT_VERSION); return false; } Upgradeable curUO = currentUpgrades.first(); @@ -85,7 +85,7 @@ public synchronized boolean isUpgradeCompleted() { return false; } - public abstract HdfsConstants.NodeType getType(); + public abstract HdfsServerConstants.NodeType getType(); public abstract boolean startUpgrade() throws IOException; public abstract void completeUpgrade() throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java index c2558bed32..b59ef965d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java @@ -22,7 +22,7 @@ import java.util.TreeSet; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.StringUtils; /** @@ -40,7 +40,7 @@ public class UpgradeObjectCollection { static class UOSignature implements Comparable { int version; - HdfsConstants.NodeType type; + HdfsServerConstants.NodeType type; String className; UOSignature(Upgradeable uo) { @@ -53,7 +53,7 @@ int getVersion() { return version; } - HdfsConstants.NodeType getType() { + HdfsServerConstants.NodeType getType() { return type; } @@ -111,13 +111,13 @@ static void registerUpgrade(Upgradeable uo) { } public static SortedSet getDistributedUpgrades(int versionFrom, - HdfsConstants.NodeType type + HdfsServerConstants.NodeType type ) throws IOException { - assert FSConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version " - + versionFrom + ". Expected to be <= " + FSConstants.LAYOUT_VERSION; + assert HdfsConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version " + + versionFrom + ". Expected to be <= " + HdfsConstants.LAYOUT_VERSION; SortedSet upgradeObjects = new TreeSet(); for(UOSignature sig : upgradeTable) { - if(sig.getVersion() < FSConstants.LAYOUT_VERSION) + if(sig.getVersion() < HdfsConstants.LAYOUT_VERSION) continue; if(sig.getVersion() > versionFrom) break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java index 6081c4cfc6..016fd948e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java @@ -42,7 +42,7 @@ public interface Upgradeable extends Comparable { * Get the type of the software component, which this object is upgrading. * @return type */ - HdfsConstants.NodeType getType(); + HdfsServerConstants.NodeType getType(); /** * Description of the upgrade object for displaying. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index b547701b85..668b45bff8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -30,14 +30,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.HardLink; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; @@ -89,7 +89,7 @@ public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) { */ void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, Collection dataDirs, StartupOption startOpt) throws IOException { - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Block-pool and name-node layout versions must be the same."; // 1. For each BP data directory analyze the state and @@ -171,7 +171,7 @@ private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOExce LOG.info("Formatting block pool " + blockpoolID + " directory " + bpSdir.getCurrentDir()); bpSdir.clearDirectory(); // create directory - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; this.cTime = nsInfo.getCTime(); this.namespaceID = nsInfo.getNamespaceID(); this.blockpoolID = nsInfo.getBlockPoolID(); @@ -239,7 +239,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd, readProperties(sd); checkVersionUpgradable(this.layoutVersion); - assert this.layoutVersion >= FSConstants.LAYOUT_VERSION + assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION : "Future version is not allowed"; if (getNamespaceID() != nsInfo.getNamespaceID()) { throw new IOException("Incompatible namespaceIDs in " @@ -253,7 +253,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd, + nsInfo.getBlockPoolID() + "; datanode blockpoolID = " + blockpoolID); } - if (this.layoutVersion == FSConstants.LAYOUT_VERSION + if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) return; // regular startup @@ -261,7 +261,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd, UpgradeManagerDatanode um = datanode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID()); verifyDistributedUpgradeProgress(um, nsInfo); - if (this.layoutVersion > FSConstants.LAYOUT_VERSION + if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION || this.cTime < nsInfo.getCTime()) { doUpgrade(sd, nsInfo); // upgrade return; @@ -327,7 +327,7 @@ void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException { // 3. Create new /current with block files hardlinks and VERSION linkAllBlocks(bpTmpDir, bpCurDir); - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; assert this.namespaceID == nsInfo.getNamespaceID() : "Data-node and name-node layout versions must be the same."; this.cTime = nsInfo.getCTime(); @@ -389,7 +389,7 @@ void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo) // the namespace state or can be further upgraded to it. // In another word, we can only roll back when ( storedLV >= software LV) // && ( DN.previousCTime <= NN.ctime) - if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION && + if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION && prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback throw new InconsistentFSStateException(bpSd.getRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index b51241ed3f..50e118aaa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; @@ -179,7 +179,7 @@ class BlockReceiver implements Closeable { this.out = streams.dataOut; this.cout = streams.checksumOut; this.checksumOut = new DataOutputStream(new BufferedOutputStream( - streams.checksumOut, FSConstants.SMALL_BUFFER_SIZE)); + streams.checksumOut, HdfsConstants.SMALL_BUFFER_SIZE)); // write data chunk header if creating a new replica if (isCreate) { BlockMetadataHeader.writeHeader(checksumOut, checksum); @@ -398,7 +398,7 @@ private void readNextPacket() throws IOException { buf.limit(bufRead); } - while (buf.remaining() < FSConstants.BYTES_IN_INTEGER) { + while (buf.remaining() < HdfsConstants.BYTES_IN_INTEGER) { if (buf.position() > 0) { shiftBufData(); } @@ -420,7 +420,7 @@ private void readNextPacket() throws IOException { // Subtract BYTES_IN_INTEGER since that accounts for the payloadLen that // we read above. int pktSize = payloadLen + PacketHeader.PKT_HEADER_LEN - - FSConstants.BYTES_IN_INTEGER; + - HdfsConstants.BYTES_IN_INTEGER; if (buf.remaining() < pktSize) { //we need to read more data diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index ac194622a3..b9e3858f3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -32,7 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; @@ -155,7 +155,7 @@ class BlockSender implements java.io.Closeable { if ( !corruptChecksumOk || datanode.data.metaFileExists(block) ) { checksumIn = new DataInputStream(new BufferedInputStream(datanode.data - .getMetaDataInputStream(block), FSConstants.IO_FILE_BUFFER_SIZE)); + .getMetaDataInputStream(block), HdfsConstants.IO_FILE_BUFFER_SIZE)); // read and handle the common header here. For now just a version BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); @@ -472,14 +472,14 @@ long sendBlock(DataOutputStream out, OutputStream baseStream, streamForSendChunks = baseStream; // assure a mininum buffer size. - maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE, + maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)/bytesPerChecksum; // allocate smaller buffer while using transferTo(). pktSize += checksumSize * maxChunksPerPacket; } else { - maxChunksPerPacket = Math.max(1, (FSConstants.IO_FILE_BUFFER_SIZE + maxChunksPerPacket = Math.max(1, (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum); pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index a9c29cc821..edc57fd797 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; @@ -83,9 +83,9 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.Storage; @@ -438,9 +438,9 @@ private static String getHostName(Configuration config) private void initConfig(Configuration conf) { this.socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsConstants.READ_TIMEOUT); + HdfsServerConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsConstants.WRITE_TIMEOUT); + HdfsServerConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean( @@ -623,7 +623,7 @@ private void initDataXceiver(Configuration conf) throws IOException { } else { ss = secureResources.getStreamingSocket(); } - ss.setReceiveBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE); + ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port int tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), @@ -768,9 +768,9 @@ private NamespaceInfo handshake() throws IOException { } catch (InterruptedException ie) {} } - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Data-node and name-node layout versions must be the same." - + "Expected: "+ FSConstants.LAYOUT_VERSION + + "Expected: "+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } @@ -814,7 +814,7 @@ void setupBPStorage() throws IOException { if (simulatedFSDataset) { initFsDataSet(conf, dataDirs); bpRegistration.setStorageID(getStorageId()); //same as DN - bpRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; + bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION; bpRegistration.storageInfo.namespaceID = bpNSInfo.namespaceID; bpRegistration.storageInfo.clusterID = bpNSInfo.clusterID; } else { @@ -1162,9 +1162,9 @@ void register() throws IOException { throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer); } - if (FSConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) { + if (HdfsConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) { LOG.warn("Data-node and name-node layout versions must be " + - "the same. Expected: "+ FSConstants.LAYOUT_VERSION + + "the same. Expected: "+ HdfsConstants.LAYOUT_VERSION + " actual "+ bpNSInfo.getLayoutVersion()); throw new IncorrectVersionException (bpNSInfo.getLayoutVersion(), "namenode"); @@ -1995,10 +1995,10 @@ public void run() { sock.setSoTimeout(targets.length * socketTimeout); long writeTimeout = socketWriteTimeout + - HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout); out = new DataOutputStream(new BufferedOutputStream(baseStream, - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, false, DataNode.this); DatanodeInfo srcNode = new DatanodeInfo(bpReg); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 784ab949ec..488c0188c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -43,15 +43,15 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Daemon; @@ -137,8 +137,8 @@ synchronized void recoverTransitionRead(DataNode datanode, // DN storage has been initialized, no need to do anything return; } - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : - "Data-node version " + FSConstants.LAYOUT_VERSION + + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + "Data-node version " + HdfsConstants.LAYOUT_VERSION + " and name-node layout version " + nsInfo.getLayoutVersion() + " must be the same."; @@ -268,7 +268,7 @@ static void makeBlockPoolDataDir(Collection dataDirs, void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { sd.clearDirectory(); // create directory - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; this.clusterID = nsInfo.getClusterID(); this.namespaceID = nsInfo.getNamespaceID(); this.cTime = 0; @@ -374,7 +374,7 @@ private void doTransition( DataNode datanode, } readProperties(sd); checkVersionUpgradable(this.layoutVersion); - assert this.layoutVersion >= FSConstants.LAYOUT_VERSION : + assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION : "Future version is not allowed"; boolean federationSupported = @@ -397,7 +397,7 @@ private void doTransition( DataNode datanode, } // regular start up - if (this.layoutVersion == FSConstants.LAYOUT_VERSION + if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) return; // regular startup // verify necessity of a distributed upgrade @@ -406,7 +406,7 @@ private void doTransition( DataNode datanode, verifyDistributedUpgradeProgress(um, nsInfo); // do upgrade - if (this.layoutVersion > FSConstants.LAYOUT_VERSION + if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION || this.cTime < nsInfo.getCTime()) { doUpgrade(sd, nsInfo); // upgrade return; @@ -482,7 +482,7 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT)); // 4. Write version file under /current - layoutVersion = FSConstants.LAYOUT_VERSION; + layoutVersion = HdfsConstants.LAYOUT_VERSION; clusterID = nsInfo.getClusterID(); writeProperties(sd); @@ -542,7 +542,7 @@ void doRollback( StorageDirectory sd, // We allow rollback to a state, which is either consistent with // the namespace state or can be further upgraded to it. - if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION + if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION && prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback throw new InconsistentFSStateException(sd.getRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 374d309504..8d7d95f8aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; @@ -53,7 +53,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.io.IOUtils; @@ -86,7 +86,7 @@ class DataXceiver extends Receiver implements Runnable { public DataXceiver(Socket s, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { super(new DataInputStream(new BufferedInputStream( - NetUtils.getInputStream(s), FSConstants.SMALL_BUFFER_SIZE))); + NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE))); this.s = s; this.isLocal = s.getInetAddress().equals(s.getLocalAddress()); @@ -203,7 +203,7 @@ public void readBlock(final ExtendedBlock block, OutputStream baseStream = NetUtils.getOutputStream(s, datanode.socketWriteTimeout); DataOutputStream out = new DataOutputStream(new BufferedOutputStream( - baseStream, FSConstants.SMALL_BUFFER_SIZE)); + baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); checkAccess(out, true, block, blockToken, Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ); @@ -329,7 +329,7 @@ public void writeBlock(final ExtendedBlock block, final DataOutputStream replyOut = new DataOutputStream( new BufferedOutputStream( NetUtils.getOutputStream(s, datanode.socketWriteTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE); @@ -364,16 +364,16 @@ public void writeBlock(final ExtendedBlock block, mirrorSock = datanode.newSocket(); try { int timeoutValue = datanode.socketTimeout - + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length); + + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length); int writeTimeout = datanode.socketWriteTimeout + - (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length); + (HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setSoTimeout(timeoutValue); - mirrorSock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE); + mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); mirrorOut = new DataOutputStream( new BufferedOutputStream( NetUtils.getOutputStream(mirrorSock, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); new Sender(mirrorOut).writeBlock(originalBlock, blockToken, @@ -524,7 +524,7 @@ public void blockChecksum(final ExtendedBlock block, final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream( - metadataIn, FSConstants.IO_FILE_BUFFER_SIZE)); + metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); updateCurrentThreadName("Getting checksum for block " + block); try { @@ -603,7 +603,7 @@ public void copyBlock(final ExtendedBlock block, OutputStream baseStream = NetUtils.getOutputStream( s, datanode.socketWriteTimeout); reply = new DataOutputStream(new BufferedOutputStream( - baseStream, FSConstants.SMALL_BUFFER_SIZE)); + baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); // send status first writeResponse(SUCCESS, reply); @@ -682,14 +682,14 @@ public void replaceBlock(final ExtendedBlock block, OutputStream baseStream = NetUtils.getOutputStream(proxySock, datanode.socketWriteTimeout); proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream, - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); /* send request to the proxy */ new Sender(proxyOut).copyBlock(block, blockToken); // receive the response from the proxy proxyReply = new DataInputStream(new BufferedInputStream( - NetUtils.getInputStream(proxySock), FSConstants.IO_FILE_BUFFER_SIZE)); + NetUtils.getInputStream(proxySock), HdfsConstants.IO_FILE_BUFFER_SIZE)); BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( HdfsProtoUtil.vintPrefixed(proxyReply)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index 89928a2971..f192747db5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.balancer.Balancer; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index 8348c8f9c9..5ecdca7b79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -53,10 +53,10 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; @@ -465,7 +465,7 @@ private long validateIntegrity(File blockFile, long genStamp) { } checksumIn = new DataInputStream( new BufferedInputStream(new FileInputStream(metaFile), - FSConstants.IO_FILE_BUFFER_SIZE)); + HdfsConstants.IO_FILE_BUFFER_SIZE)); // read and handle the common header here. For now just a version BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java index 76b0bba209..d0fc32c769 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java @@ -20,7 +20,7 @@ import java.io.File; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java index d2ab20e914..bd0485394a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; /** * This represents block replicas which are stored in DataNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java index 921437df20..d2a6f46c2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java @@ -20,7 +20,7 @@ import java.io.File; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; /** This class represents replicas being written. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java index d246f6f8dc..447b9337ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java @@ -23,7 +23,7 @@ import java.io.RandomAccessFile; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java index c2cb5cfc40..972353962c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java @@ -19,7 +19,7 @@ import java.io.File; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java index 86bef1ea38..91045b7ea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java @@ -20,7 +20,7 @@ import java.io.File; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index d92b5913da..c6744f9317 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.http.HttpServer; import org.mortbay.jetty.nio.SelectChannelConnector; @@ -71,7 +71,7 @@ public void init(DaemonContext context) throws Exception { // Obtain secure port for data streaming to datanode InetSocketAddress socAddr = DataNode.getStreamingAddr(conf); int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsConstants.WRITE_TIMEOUT); + HdfsServerConstants.WRITE_TIMEOUT); ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java index 5fc2f2b5d6..478fb5660d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java @@ -19,8 +19,8 @@ import java.io.IOException; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeManager; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -47,8 +47,8 @@ class UpgradeManagerDatanode extends UpgradeManager { this.bpid = bpid; } - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.DATA_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.DATA_NODE; } synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException { @@ -57,7 +57,7 @@ synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException { DataNode.LOG.info("\n Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is initialized."); + + HdfsConstants.LAYOUT_VERSION + " is initialized."); UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first(); curUO.setDatanode(dataNode, this.bpid); upgradeState = curUO.preUpgradeAction(nsInfo); @@ -102,7 +102,7 @@ public synchronized boolean startUpgrade() throws IOException { if(currentUpgrades == null) { DataNode.LOG.info("\n Distributed upgrade for DataNode version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " cannot be started. " + + HdfsConstants.LAYOUT_VERSION + " cannot be started. " + "The upgrade object is not defined."); return false; } @@ -115,7 +115,7 @@ public synchronized boolean startUpgrade() throws IOException { DataNode.LOG.info("\n Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is started."); + + HdfsConstants.LAYOUT_VERSION + " is started."); return true; } @@ -130,7 +130,7 @@ synchronized void processUpgradeCommand(UpgradeCommand command throw new IOException( "Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " cannot be started. " + + HdfsConstants.LAYOUT_VERSION + " cannot be started. " + "The upgrade object is not defined."); } @@ -145,7 +145,7 @@ public synchronized void completeUpgrade() throws IOException { DataNode.LOG.info("\n Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is complete."); + + HdfsConstants.LAYOUT_VERSION + " is complete."); } synchronized void shutdownUpgrade() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java index 9e51f230f9..ddb1d6029f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeObject; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -36,8 +36,8 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run private DataNode dataNode = null; private String bpid = null; - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.DATA_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.DATA_NODE; } protected DataNode getDatanode() { @@ -118,7 +118,7 @@ public void run() { if(getUpgradeStatus() < 100) { DataNode.LOG.info("\n Distributed upgrade for DataNode version " + getVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " cannot be completed."); + + HdfsConstants.LAYOUT_VERSION + " cannot be completed."); } // Complete the upgrade by calling the manager method diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index d72509cee2..ce2a79b488 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageState; @@ -106,7 +106,7 @@ void recoverCreateRead() throws IOException { StorageDirectory sd = it.next(); StorageState curState; try { - curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage); + curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index ad575b9e27..1efd4fae3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -26,8 +26,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; @@ -134,7 +134,7 @@ protected void initialize(Configuration conf) throws IOException { // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( - FSConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); + HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); clusterId = nsInfo.getClusterID(); blockPoolId = nsInfo.getBlockPoolID(); @@ -356,9 +356,9 @@ private static NamespaceInfo handshake(NamenodeProtocol namenode) LOG.fatal(errorMsg); throw new IOException(errorMsg); } - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Active and backup node layout versions must be the same. Expected: " - + FSConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); + + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index f75410031d..5e73d93ab3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 532b2f2dcf..4d86fd4b30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -24,7 +24,7 @@ import java.io.BufferedInputStream; import java.io.EOFException; import java.io.DataInputStream; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.io.IOUtils; @@ -143,11 +143,11 @@ static int readLogVersion(DataInputStream in) throw new LogHeaderCorruptException( "Reached EOF when reading log header"); } - if (logVersion < FSConstants.LAYOUT_VERSION) { // future version + if (logVersion < HdfsConstants.LAYOUT_VERSION) { // future version throw new LogHeaderCorruptException( "Unexpected version of the file system log file: " + logVersion + ". Current version = " - + FSConstants.LAYOUT_VERSION + "."); + + HdfsConstants.LAYOUT_VERSION + "."); } assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION : "Unsupported version " + logVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index f79f44266e..be75f637a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.IOUtils; import com.google.common.annotations.VisibleForTesting; @@ -109,7 +109,7 @@ void writeRaw(byte[] bytes, int offset, int length) throws IOException { void create() throws IOException { fc.truncate(0); fc.position(0); - doubleBuf.getCurrentBuf().writeInt(FSConstants.LAYOUT_VERSION); + doubleBuf.getCurrentBuf().writeInt(HdfsConstants.LAYOUT_VERSION); setReadyToFlush(); flush(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index 0dd90588f4..5312b145ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.io.OutputStream; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; @@ -129,7 +129,7 @@ public TxnBuffer(int initBufferSize) { } public void writeOp(FSEditLogOp op) throws IOException { - if (firstTxId == FSConstants.INVALID_TXID) { + if (firstTxId == HdfsConstants.INVALID_TXID) { firstTxId = op.txid; } else { assert op.txid > firstTxId; @@ -141,7 +141,7 @@ public void writeOp(FSEditLogOp op) throws IOException { @Override public DataOutputBuffer reset() { super.reset(); - firstTxId = FSConstants.INVALID_TXID; + firstTxId = HdfsConstants.INVALID_TXID; numTxns = 0; return this; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 4ad7c7e451..9cd1799ab3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; @@ -55,8 +55,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.util.ByteArray; /************************************************* @@ -1876,10 +1876,10 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) UnresolvedLinkException { assert hasWriteLock(); // sanity check - if ((nsQuota < 0 && nsQuota != FSConstants.QUOTA_DONT_SET && - nsQuota < FSConstants.QUOTA_RESET) || - (dsQuota < 0 && dsQuota != FSConstants.QUOTA_DONT_SET && - dsQuota < FSConstants.QUOTA_RESET)) { + if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET && + nsQuota < HdfsConstants.QUOTA_RESET) || + (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET && + dsQuota < HdfsConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Illegal value for nsQuota or " + "dsQuota : " + nsQuota + " and " + dsQuota); @@ -1893,16 +1893,16 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) throw new FileNotFoundException("Directory does not exist: " + srcs); } else if (!targetNode.isDirectory()) { throw new FileNotFoundException("Cannot set quota on a file: " + srcs); - } else if (targetNode.isRoot() && nsQuota == FSConstants.QUOTA_RESET) { + } else if (targetNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) { throw new IllegalArgumentException("Cannot clear namespace quota on root."); } else { // a directory inode INodeDirectory dirNode = (INodeDirectory)targetNode; long oldNsQuota = dirNode.getNsQuota(); long oldDsQuota = dirNode.getDsQuota(); - if (nsQuota == FSConstants.QUOTA_DONT_SET) { + if (nsQuota == HdfsConstants.QUOTA_DONT_SET) { nsQuota = oldNsQuota; } - if (dsQuota == FSConstants.QUOTA_DONT_SET) { + if (dsQuota == HdfsConstants.QUOTA_DONT_SET) { dsQuota = oldDsQuota; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 495c42e45a..82ad9d7ec7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -29,9 +29,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import static org.apache.hadoop.hdfs.server.common.Util.now; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -91,7 +91,7 @@ private enum State { // the first txid of the log that's currently open for writing. // If this value is N, we are currently writing to edits_inprogress_N - private long curSegmentTxId = FSConstants.INVALID_TXID; + private long curSegmentTxId = HdfsConstants.INVALID_TXID; // the time of printing the statistics to the log file. private long lastPrintTime; @@ -904,7 +904,7 @@ public void purgeLogsOlderThan(final long minTxIdToKeep) { // synchronized to prevent findbugs warning about inconsistent // synchronization. This will be JIT-ed out if asserts are // off. - assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op + assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op minTxIdToKeep <= curSegmentTxId : "cannot purge logs older than txid " + minTxIdToKeep + " when current segment starts at " + curSegmentTxId; @@ -1078,7 +1078,7 @@ private void disableAndReportErrorOnJournals(List badJournals) static class JournalAndStream { private final JournalManager manager; private EditLogOutputStream stream; - private long segmentStartsAtTxId = FSConstants.INVALID_TXID; + private long segmentStartsAtTxId = HdfsConstants.INVALID_TXID; private JournalAndStream(JournalManager manager) { this.manager = manager; @@ -1110,7 +1110,7 @@ void abort() { LOG.error("Unable to abort stream " + stream, ioe); } stream = null; - segmentStartsAtTxId = FSConstants.INVALID_TXID; + segmentStartsAtTxId = HdfsConstants.INVALID_TXID; } private boolean isActive() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index db985691f6..51865c82de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -27,7 +27,7 @@ import java.util.EnumMap; import org.apache.hadoop.fs.permission.PermissionStatus; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; @@ -274,14 +274,14 @@ int loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit, SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op; fsDir.unprotectedSetQuota(setNSQuotaOp.src, setNSQuotaOp.nsQuota, - FSConstants.QUOTA_DONT_SET); + HdfsConstants.QUOTA_DONT_SET); break; } case OP_CLEAR_NS_QUOTA: { ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op; fsDir.unprotectedSetQuota(clearNSQuotaOp.src, - FSConstants.QUOTA_RESET, - FSConstants.QUOTA_DONT_SET); + HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_DONT_SET); break; } @@ -435,7 +435,7 @@ private void check203UpgradeFailure(int logVersion, IOException ex) // The editlog must be emptied by restarting the namenode, before proceeding // with the upgrade. if (Storage.is203LayoutVersion(logVersion) - && logVersion != FSConstants.LAYOUT_VERSION) { + && logVersion != HdfsConstants.LAYOUT_VERSION) { String msg = "During upgrade failed to load the editlog version " + logVersion + " from release 0.20.203. Please go back to the old " + " release and restart the namenode. This empties the editlog " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 6529c876c0..25f99b4081 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 8b259018f1..0ad96e0868 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -35,7 +35,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; @@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageState; import org.apache.hadoop.hdfs.server.common.Util; import static org.apache.hadoop.hdfs.server.common.Util.now; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; @@ -227,11 +227,11 @@ boolean recoverTransitionRead(StartupOption startOpt) } if (startOpt != StartupOption.UPGRADE && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION - && layoutVersion != FSConstants.LAYOUT_VERSION) { + && layoutVersion != HdfsConstants.LAYOUT_VERSION) { throw new IOException( "\nFile system image contains an old layout version " + storage.getLayoutVersion() + ".\nAn upgrade to version " - + FSConstants.LAYOUT_VERSION + " is required.\n" + + HdfsConstants.LAYOUT_VERSION + " is required.\n" + "Please restart NameNode with -upgrade option."); } @@ -349,7 +349,7 @@ private void doUpgrade() throws IOException { long oldCTime = storage.getCTime(); storage.cTime = now(); // generate new cTime for the state int oldLV = storage.getLayoutVersion(); - storage.layoutVersion = FSConstants.LAYOUT_VERSION; + storage.layoutVersion = HdfsConstants.LAYOUT_VERSION; List errorSDs = Collections.synchronizedList(new ArrayList()); @@ -423,7 +423,7 @@ private void doRollback() throws IOException { // Directories that don't have previous state do not rollback boolean canRollback = false; FSImage prevState = new FSImage(conf, getFSNamesystem()); - prevState.getStorage().layoutVersion = FSConstants.LAYOUT_VERSION; + prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION; for (Iterator it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); File prevDir = sd.getPreviousDir(); @@ -438,12 +438,12 @@ private void doRollback() throws IOException { // read and verify consistency of the prev dir prevState.getStorage().readPreviousVersionProperties(sd); - if (prevState.getLayoutVersion() != FSConstants.LAYOUT_VERSION) { + if (prevState.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) { throw new IOException( "Cannot rollback to storage version " + prevState.getLayoutVersion() + " using this version of the NameNode, which uses storage version " + - FSConstants.LAYOUT_VERSION + ". " + + HdfsConstants.LAYOUT_VERSION + ". " + "Please use the previous version of HDFS to perform the rollback."); } canRollback = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 453985d917..8579764419 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -39,7 +39,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -555,7 +555,7 @@ void save(File newFile, DigestOutputStream fos = new DigestOutputStream(fout, digester); DataOutputStream out = new DataOutputStream(fos); try { - out.writeInt(FSConstants.LAYOUT_VERSION); + out.writeInt(HdfsConstants.LAYOUT_VERSION); out.writeInt(sourceNamesystem.getFSImage() .getStorage().getNamespaceID()); // TODO bad dependency out.writeLong(fsDir.rootDir.numItemsInTree()); @@ -568,7 +568,7 @@ void save(File newFile, " using " + compression); - byte[] byteStore = new byte[4*FSConstants.MAX_PATH_LENGTH]; + byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH]; ByteBuffer strbuf = ByteBuffer.wrap(byteStore); // save the root FSImageSerialization.saveINode2Image(fsDir.rootDir, out); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 277fac0eb9..3ed8513636 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java index 0814a140b5..c3a7694789 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java @@ -35,7 +35,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; @@ -224,7 +224,7 @@ LogLoadPlan createLogLoadPlan(long sinceTxId, long maxStartTxId) throws IOExcept logGroup.planRecovery(); - if (expectedTxId != FSConstants.INVALID_TXID && logStartTxId != expectedTxId) { + if (expectedTxId != HdfsConstants.INVALID_TXID && logStartTxId != expectedTxId) { throw new IOException("Expected next log group would start at txid " + expectedTxId + " but starts at txid " + logStartTxId); } @@ -239,7 +239,7 @@ LogLoadPlan createLogLoadPlan(long sinceTxId, long maxStartTxId) throws IOExcept } else { // the log group was in-progress so we don't know what ID // the next group should start from. - expectedTxId = FSConstants.INVALID_TXID; + expectedTxId = HdfsConstants.INVALID_TXID; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ceb557b4e6..0f67d7cb99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -78,10 +78,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -99,9 +99,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.Util; @@ -2756,7 +2756,7 @@ void finalizeUpgrade() throws IOException { * not tracked because the name node is not intended to leave safe mode * automatically in the case. * - * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction) + * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction) * @see SafeModeMonitor */ class SafeModeInfo { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index b7587c0dd1..d8bd502597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper; @@ -120,7 +120,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response new HdfsConfiguration(datanode.getConf()); final int socketTimeout = conf.getInt( DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsConstants.READ_TIMEOUT); + HdfsServerConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java index 3831b4580f..8476e27cdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 7663ecff76..2440c4dd12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; /** * I-node for file being written. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 257d37e0cb..44857739b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -32,8 +32,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import static org.apache.hadoop.hdfs.server.common.Util.now; @@ -65,8 +65,8 @@ public class LeaseManager { private final FSNamesystem fsnamesystem; - private long softLimit = FSConstants.LEASE_SOFTLIMIT_PERIOD; - private long hardLimit = FSConstants.LEASE_HARDLIMIT_PERIOD; + private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; + private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD; // // Used for handling lock-leases @@ -379,7 +379,7 @@ public void run() { try { - Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL); + Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL); } catch(InterruptedException ie) { if (LOG.isDebugEnabled()) { LOG.debug(name + " is interrupted", ie); @@ -409,7 +409,7 @@ private synchronized void checkLeases() { oldest.getPaths().toArray(leasePaths); for(String p : leasePaths) { try { - if(fsnamesystem.internalReleaseLease(oldest, p, HdfsConstants.NAMENODE_LEASE_HOLDER)) { + if(fsnamesystem.internalReleaseLease(oldest, p, HdfsServerConstants.NAMENODE_LEASE_HOLDER)) { LOG.info("Lease recovery for file " + p + " is complete. File closed."); removing.add(p); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 00461e2fb3..869922abb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -42,11 +42,11 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.UpgradeManager; @@ -126,7 +126,7 @@ public boolean isOfType(StorageDirType type) { * recent fsimage file. This does not include any transactions * that have since been written to the edit log. */ - protected long mostRecentCheckpointTxId = FSConstants.INVALID_TXID; + protected long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID; /** * list of failed (and thus removed) storages @@ -501,7 +501,7 @@ private void format(StorageDirectory sd) throws IOException { * Format all available storage directories. */ public void format(String clusterId) throws IOException { - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; this.namespaceID = newNamespaceID(); this.clusterID = clusterId; this.blockpoolID = newBlockPoolID(); @@ -574,7 +574,7 @@ private void setDeprecatedPropertiesForUpgrade(Properties props) { * This should only be used during upgrades. */ String getDeprecatedProperty(String prop) { - assert getLayoutVersion() > FSConstants.LAYOUT_VERSION : + assert getLayoutVersion() > HdfsConstants.LAYOUT_VERSION : "getDeprecatedProperty should only be done when loading " + "storage from past versions during upgrade."; return deprecatedProperties.get(prop); @@ -764,7 +764,7 @@ void verifyDistributedUpgradeProgress(StartupOption startOpt if(upgradeManager.getDistributedUpgrades() != null) throw new IOException("\n Distributed upgrade for NameNode version " + upgradeManager.getUpgradeVersion() - + " to current LV " + FSConstants.LAYOUT_VERSION + + " to current LV " + HdfsConstants.LAYOUT_VERSION + " is required.\n Please restart NameNode" + " with -upgrade option."); } @@ -780,7 +780,7 @@ void initializeDistributedUpgrade() throws IOException { writeAll(); LOG.info("\n Distributed upgrade for NameNode version " + upgradeManager.getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is initialized."); + + HdfsConstants.LAYOUT_VERSION + " is initialized."); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 52f576484a..5c481f7652 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -55,12 +55,12 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; -import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_LENGTH; -import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_DEPTH; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -68,8 +68,8 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; @@ -308,12 +308,12 @@ static InetSocketAddress getAddress(URI filesystemURI) { "Invalid URI for NameNode address (check %s): %s has no authority.", FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString())); } - if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase( + if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase( filesystemURI.getScheme())) { throw new IllegalArgumentException(String.format( "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.", FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(), - FSConstants.HDFS_URI_SCHEME)); + HdfsConstants.HDFS_URI_SCHEME)); } return getAddress(authority); } @@ -321,7 +321,7 @@ static InetSocketAddress getAddress(URI filesystemURI) { public static URI getUri(InetSocketAddress namenode) { int port = namenode.getPort(); String portString = port == DEFAULT_PORT ? "" : (":"+port); - return URI.create(FSConstants.HDFS_URI_SCHEME + "://" + return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + namenode.getHostName()+portString); } @@ -1279,7 +1279,7 @@ void verifyRequest(NodeRegistration nodeReg) throws IOException { * @throws IOException */ void verifyVersion(int version) throws IOException { - if (version != FSConstants.LAYOUT_VERSION) + if (version != HdfsConstants.LAYOUT_VERSION) throw new IncorrectVersionException(version, "data node"); } @@ -1573,7 +1573,7 @@ public static void initializeGenericKeys(Configuration conf) { DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS); if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { - URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 26376d476f..e74859af18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NodeBase; @@ -502,8 +502,8 @@ private void copyBlock(DFSClient dfs, LocatedBlock lblock, } try { s = new Socket(); - s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index f50e1f8b9f..de94cbeba0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index f126f17eeb..f8339eb69d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -45,8 +45,8 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -456,7 +456,7 @@ InetSocketAddress getNameNodeAddress() { */ private String getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); - if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { + if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { throw new IOException("This is not a DFS"); } @@ -793,7 +793,7 @@ void recoverCreate(boolean format) throws IOException { StorageState curState; try { - curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage); + curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 944e998ecf..cc8dccaf1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.util.DataTransferThrottler; @@ -124,7 +124,7 @@ static void uploadImageFromStorage(String fsName, static void getFileServer(OutputStream outstream, File localfile, DataTransferThrottler throttler) throws IOException { - byte buf[] = new byte[FSConstants.IO_FILE_BUFFER_SIZE]; + byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; FileInputStream infile = null; try { infile = new FileInputStream(localfile); @@ -139,7 +139,7 @@ static void getFileServer(OutputStream outstream, File localfile, && localfile.getAbsolutePath().contains("fsimage")) { // Test sending image shorter than localfile long len = localfile.length(); - buf = new byte[(int)Math.min(len/2, FSConstants.IO_FILE_BUFFER_SIZE)]; + buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); @@ -179,7 +179,7 @@ static void getFileServer(OutputStream outstream, File localfile, static MD5Hash getFileClient(String nnHostPort, String queryString, List localPaths, NNStorage dstStorage, boolean getChecksum) throws IOException { - byte[] buf = new byte[FSConstants.IO_FILE_BUFFER_SIZE]; + byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://"; StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?"); str.append(queryString); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java index b4e89e3fa1..a46efae8a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java @@ -19,9 +19,9 @@ import java.io.IOException; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.UpgradeManager; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; @@ -38,8 +38,8 @@ * and updates its status. */ class UpgradeManagerNamenode extends UpgradeManager { - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.NAME_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.NAME_NODE; } private final FSNamesystem namesystem; @@ -66,7 +66,7 @@ public synchronized boolean startUpgrade() throws IOException { this.broadcastCommand = currentUpgrades.first().startUpgrade(); NameNode.LOG.info("\n Distributed upgrade for NameNode version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is started."); + + HdfsConstants.LAYOUT_VERSION + " is started."); return true; } @@ -75,7 +75,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command if(NameNode.LOG.isDebugEnabled()) { NameNode.LOG.debug("\n Distributed upgrade for NameNode version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is processing upgrade command: " + + HdfsConstants.LAYOUT_VERSION + " is processing upgrade command: " + command.getAction() + " status = " + getUpgradeStatus() + "%"); } if(currentUpgrades == null) { @@ -96,7 +96,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command curUO.completeUpgrade(); NameNode.LOG.info("\n Distributed upgrade for NameNode version " + curUO.getVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is complete."); + + HdfsConstants.LAYOUT_VERSION + " is complete."); // proceede with the next one currentUpgrades.remove(curUO); if(currentUpgrades.isEmpty()) { // all upgrades are done @@ -110,7 +110,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command public synchronized void completeUpgrade() throws IOException { // set and write new upgrade state into disk - setUpgradeState(false, FSConstants.LAYOUT_VERSION); + setUpgradeState(false, HdfsConstants.LAYOUT_VERSION); namesystem.getFSImage().getStorage().writeAll(); currentUpgrades = null; broadcastCommand = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java index 0872eb22c0..5a75554544 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java @@ -20,7 +20,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeObject; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; @@ -44,8 +44,8 @@ public abstract class UpgradeObjectNamenode extends UpgradeObject { public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command ) throws IOException; - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.NAME_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.NAME_NODE; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index 2ee1866617..a75701ef86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.metrics; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java index 80426605a0..aa98ab19b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java @@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; /** * Information sent by a subordinate name-node to the active name-node diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index e847cfc371..cc33a04d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NNStorage; @@ -53,7 +53,7 @@ public NamespaceInfo() { public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, int duVersion) { - super(FSConstants.LAYOUT_VERSION, nsID, clusterID, cT); + super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT); blockPoolID = bpID; buildVersion = Storage.getBuildVersion(); this.distributedUpgradeVersion = duVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java index 5b8ac59f37..c82494d5ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java @@ -22,15 +22,15 @@ import java.io.IOException; import java.util.Comparator; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.Writable; import com.google.common.base.Function; import com.google.common.collect.ComparisonChain; public class RemoteEditLog implements Writable, Comparable { - private long startTxId = FSConstants.INVALID_TXID; - private long endTxId = FSConstants.INVALID_TXID; + private long startTxId = HdfsConstants.INVALID_TXID; + private long endTxId = HdfsConstants.INVALID_TXID; public RemoteEditLog() { } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java index cdf1d791d2..bf9b68b1b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 91cf9eec58..b4f4e7c4d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -40,9 +40,9 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.RPC; @@ -115,7 +115,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(path, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET); } } @@ -161,7 +161,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, quota, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(path, quota, HdfsConstants.QUOTA_DONT_SET); } } @@ -200,7 +200,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_RESET); + dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET); } } @@ -250,7 +250,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, quota); + dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, quota); } } @@ -288,7 +288,7 @@ public void report() throws IOException { long used = ds.getUsed(); long remaining = ds.getRemaining(); long presentCapacity = used + remaining; - boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET); + boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET); UpgradeStatusReport status = dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS); @@ -361,17 +361,17 @@ public void setSafeMode(String[] argv, int idx) throws IOException { printUsage("-safemode"); return; } - FSConstants.SafeModeAction action; + HdfsConstants.SafeModeAction action; Boolean waitExitSafe = false; if ("leave".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_LEAVE; + action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE; } else if ("enter".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_ENTER; + action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER; } else if ("get".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_GET; + action = HdfsConstants.SafeModeAction.SAFEMODE_GET; } else if ("wait".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_GET; + action = HdfsConstants.SafeModeAction.SAFEMODE_GET; waitExitSafe = true; } else { printUsage("-safemode"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java index c68cef6a1d..617b90026c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import static org.apache.hadoop.fs.FileContextTestHelper.*; import org.apache.hadoop.ipc.RemoteException; import static org.junit.Assert.*; @@ -212,7 +212,7 @@ public void testSetReplication() throws IOException { public void testCreateLinkMaxPathLink() throws IOException { Path dir = new Path(testBaseDir1()); Path file = new Path(testBaseDir1(), "file"); - final int maxPathLen = FSConstants.MAX_PATH_LENGTH; + final int maxPathLen = HdfsConstants.MAX_PATH_LENGTH; final int dirLen = dir.toString().length() + 1; int len = maxPathLen - dirLen; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index a932f881a2..a437fffadd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.AfterClass; @@ -108,11 +108,11 @@ public void testOldRenameWithQuota() throws Exception { Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2"); createFile(src1); createFile(src2); - fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET, - FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET, + HdfsConstants.QUOTA_DONT_SET); fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true); - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); /* * Test1: src does not exceed quota and dst has no quota check and hence * accommodates rename @@ -130,7 +130,7 @@ public void testOldRenameWithQuota() throws Exception { * Test3: src exceeds quota and dst has *no* quota to accommodate rename */ // src1 has no quota to accommodate new rename node - fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET); oldRename(dst1, src1, false, true); } @@ -143,11 +143,11 @@ public void testRenameWithQuota() throws Exception { Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2"); createFile(src1); createFile(src2); - fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET, - FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET, + HdfsConstants.QUOTA_DONT_SET); fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true); - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); /* * Test1: src does not exceed quota and dst has no quota check and hence * accommodates rename @@ -170,7 +170,7 @@ public void testRenameWithQuota() throws Exception { * rename to a destination that does not exist */ // src1 has no quota to accommodate new rename node - fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET); rename(dst1, src1, false, false, true, Rename.NONE); /* @@ -179,9 +179,9 @@ public void testRenameWithQuota() throws Exception { * is same as quota needed by src. */ // src1 has no quota to accommodate new rename node - fs.setQuota(src1.getParent(), 100, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET); createFile(src1); - fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET); rename(dst1, src1, true, true, false, Rename.OVERWRITE); } @@ -208,7 +208,7 @@ public void testEditsLogOldRename() throws Exception { createFile(dst1); // Set quota so that dst1 parent cannot allow under it new files/directories - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); // Free up quota for a subsequent rename fs.delete(dst1, true); oldRename(src1, dst1, true, false); @@ -237,7 +237,7 @@ public void testEditsLogRename() throws Exception { createFile(dst1); // Set quota so that dst1 parent cannot allow under it new files/directories - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); // Free up quota for a subsequent rename fs.delete(dst1, true); rename(src1, dst1, true, true, false, Rename.OVERWRITE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 25585cecbb..c61e65b6c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; @@ -140,8 +140,8 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR DatanodeInfo[] nodes = testBlock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getName()); sock = new Socket(); - sock.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); + sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); return BlockReaderFactory.newBlockReader( sock, targetAddr.toString()+ ":" + block.getBlockId(), block, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 3d8b6f29f5..c7566d2c62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; @@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -670,7 +670,7 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java index 8e19f45641..f82986f331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java @@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog; import org.apache.hadoop.net.DNS; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 89627b71b9..16d2bc2112 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -47,8 +47,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java index d06d9766c3..1613e82ca2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java @@ -23,7 +23,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -85,7 +85,7 @@ public void testAbandonBlock() throws IOException { public void testQuotaUpdatedWhenBlockAbandoned() throws IOException { DistributedFileSystem dfs = (DistributedFileSystem)fs; // Setting diskspace quota to 3MB - dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); + dfs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); // Start writing a file with 2 replicas to ensure each datanode has one. // Block Size is 1MB. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index 1d43ea7e6b..1407fd46a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index 5a778049d5..7a93226e29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -25,8 +25,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import com.google.common.collect.Lists; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 511e9c1b92..cdf3665af1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import java.io.File; import java.io.IOException; @@ -32,8 +32,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java index 0455366e80..f0c20a1ca9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import java.io.File; @@ -27,11 +27,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; /** * This test ensures the appropriate response (successful or failure) from @@ -198,7 +198,7 @@ boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) { return false; } // check #3 - int softwareLV = FSConstants.LAYOUT_VERSION; // will also be Namenode's LV + int softwareLV = HdfsConstants.LAYOUT_VERSION; // will also be Namenode's LV int storedLV = datanodeVer.getLayoutVersion(); if (softwareLV == storedLV && datanodeVer.getCTime() == namenodeVer.getCTime()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index 020bdcfd0d..ebfe785fa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -24,11 +24,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; /** * This test ensures the appropriate response (successful or failure) from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index 58d3f3386f..251f23dee7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import java.io.File; import java.io.IOException; @@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index ec33f769ce..6ad08cd2aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -30,8 +30,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; @@ -290,7 +290,7 @@ private void upgradeAndVerify() throws IOException { DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); DFSClient dfsClient = dfs.dfs; //Safemode will be off only after upgrade is complete. Wait for it. - while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) { + while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) { LOG.info("Waiting for SafeMode to be OFF."); try { Thread.sleep(1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 24c3bc48b1..72faa319b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; @@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; @@ -97,8 +97,8 @@ private void sendRecvData(String testDescription, StringUtils.byteToHexString(sendBuf.toByteArray())); sock = new Socket(); - sock.connect(dnAddr, HdfsConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); + sock.connect(dnAddr, HdfsServerConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); OutputStream out = sock.getOutputStream(); // Should we excuse diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index a0da70c4ef..b6ecb0e4d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java index 58bd57a68a..62565170bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java @@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 5a542a263e..2cde7ed476 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -19,7 +19,7 @@ import java.net.InetSocketAddress; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.DFSClient; import junit.framework.TestCase; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index 61d1adc6f1..06194b8e43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.DataNode; import static org.apache.hadoop.test.MetricsAsserts.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 2a8383d31b..15d7378b9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index 7a013d485d..4425fcfbf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -38,7 +38,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -111,7 +111,7 @@ private void recoverFile(final FileSystem fs) throws Exception { // set the soft limit to be 1 second so that the // namenode triggers lease recovery upon append request - cluster.setLeasePeriod(1000, FSConstants.LEASE_HARDLIMIT_PERIOD); + cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD); // Trying recovery int tries = 60; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index d2dfd7fc65..1d7ff4e6e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -301,7 +301,7 @@ public void testFileCreationError1() throws IOException { // wait for the datanode to be declared dead while (true) { DatanodeInfo[] info = client.datanodeReport( - FSConstants.DatanodeReportType.LIVE); + HdfsConstants.DatanodeReportType.LIVE); if (info.length == 0) { break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java index d4d66b4c7f..56cb4506c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -91,7 +91,7 @@ private static void writeFile(FileSystem fileSys, Path name, int repl, int fileSize, int blockSize) throws IOException { // Create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, - FSConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize); + HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 995006279c..736b765933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.BackupNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index 3e084e1547..7bcc7d796e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol; @@ -129,14 +129,14 @@ public void testBlockSynchronization() throws Exception { filestr = "/foo.safemode"; filepath = new Path(filestr); dfs.create(filepath, (short)1); - cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER); + cluster.getNameNode().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, (short)1); waitLeaseRecovery(cluster); // verify that we still cannot recover the lease LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1); - cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + cluster.getNameNode().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); } finally { if (cluster != null) {cluster.shutdown();} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 8e720fd172..21d7f2dd81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -36,9 +36,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -318,8 +318,8 @@ public void testSoftLeaseRecovery() throws Exception { DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); // Reset default lease periods - cluster.setLeasePeriod(FSConstants.LEASE_SOFTLIMIT_PERIOD, - FSConstants.LEASE_HARDLIMIT_PERIOD); + cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, + HdfsConstants.LEASE_HARDLIMIT_PERIOD); //create a file // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); @@ -432,7 +432,7 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename) cluster.getNameNode(), fileStr); assertFalse("original lease holder should not be the NN", - originalLeaseHolder.equals(HdfsConstants.NAMENODE_LEASE_HOLDER)); + originalLeaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)); // hflush file AppendTestUtil.LOG.info("hflush"); @@ -459,15 +459,15 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename) cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD); // Make sure lease recovery begins. - Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2); + Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2); - assertEquals("lease holder should now be the NN", HdfsConstants.NAMENODE_LEASE_HOLDER, + assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER, NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr)); cluster.restartNameNode(false); assertEquals("lease holder should still be the NN after restart", - HdfsConstants.NAMENODE_LEASE_HOLDER, + HdfsServerConstants.NAMENODE_LEASE_HOLDER, NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr)); // Let the DNs send heartbeats again. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java index 9f915b4506..abe9036bb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java @@ -23,7 +23,7 @@ import java.net.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index 3a3dde8304..11b7f49d9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.datanode.Replica; @@ -111,7 +111,7 @@ public void pipeline_01() throws IOException { assertTrue("Replica on DN " + dn + " shouldn't be null", r != null); assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()", - HdfsConstants.ReplicaState.RBW, r.getState()); + HdfsServerConstants.ReplicaState.RBW, r.getState()); } ofs.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index fbc84f9864..a0727a6c90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; @@ -370,14 +370,14 @@ public void testNamespaceCommands() throws Exception { // 2: set the quota of /nqdir0/qdir1 to be 6 final Path quotaDir1 = new Path("/nqdir0/qdir1"); - dfs.setQuota(quotaDir1, 6, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET); ContentSummary c = dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(), 3); assertEquals(c.getQuota(), 6); // 3: set the quota of /nqdir0/qdir1/qdir20 to be 7 final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20"); - dfs.setQuota(quotaDir2, 7, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET); c = dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(), 2); assertEquals(c.getQuota(), 7); @@ -385,7 +385,7 @@ public void testNamespaceCommands() throws Exception { // 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2 final Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir3)); - dfs.setQuota(quotaDir3, 2, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET); c = dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(), 1); assertEquals(c.getQuota(), 2); @@ -547,13 +547,13 @@ public void testSpaceCommands() throws Exception { // set the quota of /nqdir0/qdir1 to 4 * fileSpace final Path quotaDir1 = new Path("/nqdir0/qdir1"); - dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 4 * fileSpace); + dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace); ContentSummary c = dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceQuota(), 4 * fileSpace); // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20"); - dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 6 * fileSpace); + dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace); c = dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceQuota(), 6 * fileSpace); @@ -561,7 +561,7 @@ public void testSpaceCommands() throws Exception { // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir21)); - dfs.setQuota(quotaDir21, FSConstants.QUOTA_DONT_SET, 2 * fileSpace); + dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace); c = dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceQuota(), 2 * fileSpace); @@ -661,7 +661,7 @@ public void testSpaceCommands() throws Exception { assertEquals(c.getSpaceConsumed(), 4 * fileSpace); // now increase the quota for quotaDir1 - dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 5 * fileSpace); + dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 5 * fileSpace); // Now, appending more than 1 fileLen should result in an error out = dfs.append(file2); hasException = false; @@ -704,8 +704,8 @@ public void testSpaceCommands() throws Exception { assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len); // now increase the quota for quotaDir1 and quotaDir20 - dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace); - dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 10 * fileSpace); + dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace); + dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace); // then increasing replication should be ok. dfs.setReplication(file2, (short)(replication+1)); @@ -735,7 +735,7 @@ public void testSpaceCommands() throws Exception { int sizeFactorC = 4; // Set space quota for subdirectory C - dfs.setQuota(quotaDir2053_C, FSConstants.QUOTA_DONT_SET, + dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET, (sizeFactorC + 1) * fileSpace); c = dfs.getContentSummary(quotaDir2053_C); assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index af2339dac3..eef83e4174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 8c98a20528..73adf8efcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import static org.junit.Assert.*; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java index e08d9f76ea..b230391dd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java @@ -23,7 +23,7 @@ import java.net.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 5771b22abf..2225449f19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -35,15 +35,15 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -504,7 +504,7 @@ public static void corruptFile(File file) throws IOException { * of the Namenode, whether it is running or not. */ public static int getCurrentLayoutVersion() { - return FSConstants.LAYOUT_VERSION; + return HdfsConstants.LAYOUT_VERSION; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 5b0ac31559..34cd784bd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 9ee296c17c..8afedc8bb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index e8193b56d5..565a765b1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -89,8 +89,8 @@ private void addNodes(Iterable nodesToAdd) { for (DatanodeDescriptor dn : nodesToAdd) { cluster.add(dn); dn.updateHeartbeat( - 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index 25a486b166..1eef522f83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; @@ -137,8 +137,8 @@ private static void tryRead(Configuration conf, LocatedBlock lblock, DatanodeInfo[] nodes = lblock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getName()); s = new Socket(); - s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); String file = BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ee84feca81..794b23c652 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; @@ -76,8 +76,8 @@ public class TestReplicationPolicy extends TestCase { } for(int i=0; i args) { void generateInputs(int[] opsPerThread) throws IOException { assert opsPerThread.length == numThreads : "Error opsPerThread.length"; - nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + nameNode.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); // int generatedFileIdx = 0; LOG.info("Generate " + numOpsRequired + " intputs for " + getOpName()); fileNames = new String[numThreads][]; @@ -969,7 +969,7 @@ void generateInputs(int[] ignore) throws IOException { FileNameGenerator nameGenerator; nameGenerator = new FileNameGenerator(getBaseDir(), 100); String clientName = getClientName(007); - nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + nameNode.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); for(int idx=0; idx < nrFiles; idx++) { String fileName = nameGenerator.getNextFileName("ThroughputBench"); nameNode.create(fileName, FsPermission.getDefault(), clientName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index 433670ba4c..708a048b35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -172,7 +172,7 @@ private CheckpointSignature runOperations() throws IOException { long atime = mtime; dfs.setTimes(pathFileCreate, mtime, atime); // OP_SET_QUOTA 14 - dfs.setQuota(pathDirectoryMkdir, 1000L, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(pathDirectoryMkdir, 1000L, HdfsConstants.QUOTA_DONT_SET); // OP_RENAME 15 fc.rename(pathFileCreate, pathFileMoved, Rename.NONE); // OP_CONCAT_DELETE 16 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 72e3bf7230..1e07e50208 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -31,7 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index beb70bd9b1..8f1af840c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java index 4f698c08d3..3454f7f0af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 96d46783fe..2a1551c6f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -45,8 +45,8 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index 94b733cefe..5a4dd660e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -34,7 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.junit.After; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index be78c0df3a..0d2ea934e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.junit.AfterClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 88d57da32a..b305296492 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.log4j.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 4005579386..390894cc71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.StringUtils; /** @@ -148,7 +148,7 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { 0f); cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build(); cluster.getNameNode(). - setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); FileSystem fs = cluster.getFileSystem(); // create two files with one block each @@ -245,7 +245,7 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { // now leave safe mode so that we can clean up cluster.getNameNode(). - setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); util.cleanup(fs, "/srcdat10"); } catch (Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index 498e815733..2c369d9e4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.junit.Test; import com.google.common.base.Joiner; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java index 4719467f92..ed7d3fbd03 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import java.util.Collections; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java index 1f953bbf92..da2bf4e22e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index c68f7eaab6..349fe1ecac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.log4j.Level; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 2734cd5db7..6ba2e81869 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.IMPORT; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import java.io.File; @@ -46,8 +46,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java index 8cbcb21839..1a40159bc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 05311f5359..bad53880eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -39,7 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index ac36029373..7dc5e86e68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java index 11f695d8f4..b2533a0960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java @@ -47,8 +47,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.After; @@ -159,7 +159,7 @@ public void testInternalReleaseLease_UNKNOWN_COMM () throws IOException { new PermissionStatus("test", "test", new FsPermission((short)0777)); mockFileBlocks(2, null, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); releaseLease(fsn, lm, file); fail("FSNamesystem.internalReleaseLease suppose to throw " + @@ -184,8 +184,8 @@ public void testInternalReleaseLease_COMM_COMM () throws IOException { PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.COMMITTED, file, dnd, ps, false); releaseLease(fsn, lm, file); fail("FSNamesystem.internalReleaseLease suppose to throw " + @@ -232,7 +232,7 @@ public void testInternalReleaseLease_1blocks () throws IOException { PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(1, null, HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false); + mockFileBlocks(1, null, HdfsServerConstants.BlockUCState.COMMITTED, file, dnd, ps, false); releaseLease(fsn, lm, file); fail("FSNamesystem.internalReleaseLease suppose to throw " + @@ -257,8 +257,8 @@ public void testInternalReleaseLease_COMM_CONSTRUCTION () throws IOException { PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); assertFalse("False is expected in return in this case", releaseLease(fsn, lm, file)); @@ -278,8 +278,8 @@ public void testCommitBlockSynchronization_BlockNotFound () PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); try { @@ -304,8 +304,8 @@ public void testCommitBlockSynchronization_notUR () PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.COMPLETE, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.COMPLETE, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(lastBlock.isComplete()).thenReturn(true); @@ -332,8 +332,8 @@ public void testCommitBlockSynchronization_WrongGreaterRecoveryID() PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId-100); @@ -360,8 +360,8 @@ public void testCommitBlockSynchronization_WrongLesserRecoveryID() PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId+100); @@ -388,8 +388,8 @@ public void testCommitBlockSynchronization_EqualRecoveryID() PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId); @@ -407,8 +407,8 @@ public void testCommitBlockSynchronization_EqualRecoveryID() } private void mockFileBlocks(int fileBlocksNumber, - HdfsConstants.BlockUCState penUltState, - HdfsConstants.BlockUCState lastState, + HdfsServerConstants.BlockUCState penUltState, + HdfsServerConstants.BlockUCState lastState, Path file, DatanodeDescriptor dnd, PermissionStatus ps, boolean setStoredBlock) throws IOException {