From 6ae2a0d048e133b43249c248a75a4d77d9abb80d Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Fri, 1 May 2015 15:27:28 -0700 Subject: [PATCH] HDFS-8249. Separate HdfsConstants into the client and the server side class. Contributed by Haohui Mai. --- .../org/apache/hadoop/hdfs/HAUtilClient.java | 2 +- .../apache/hadoop/hdfs/protocol/Block.java | 4 +- .../hadoop/hdfs/protocol/HdfsConstants.java | 109 +++++++++++ .../hdfs/protocol/HdfsConstantsClient.java | 45 ----- .../hadoop/hdfs/web/JsonUtilClient.java | 6 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 4 +- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 4 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../bkjournal/BookKeeperJournalManager.java | 14 +- .../bkjournal/EditLogLedgerMetadata.java | 8 +- .../TestBookKeeperEditLogStreams.java | 6 +- .../org/apache/hadoop/hdfs/DFSClient.java | 8 +- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 8 +- .../org/apache/hadoop/hdfs/DataStreamer.java | 5 +- .../apache/hadoop/hdfs/NameNodeProxies.java | 3 +- .../hadoop/hdfs/client/impl/LeaseRenewer.java | 6 +- .../hadoop/hdfs/protocol/ClientProtocol.java | 4 +- .../hadoop/hdfs/protocol/HdfsConstants.java | 175 ------------------ .../SnapshottableDirectoryStatus.java | 2 +- .../protocolPB/ClientNamenodeProtocolPB.java | 2 +- ...amenodeProtocolServerSideTranslatorPB.java | 4 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 6 +- .../qjournal/client/IPCLoggerChannel.java | 4 +- .../hdfs/qjournal/protocol/RequestInfo.java | 4 +- ...JournalProtocolServerSideTranslatorPB.java | 4 +- .../hadoop/hdfs/qjournal/server/Journal.java | 22 +-- .../hdfs/server/balancer/Dispatcher.java | 5 +- .../blockmanagement/BlockIdManager.java | 10 +- .../BlockPlacementPolicyDefault.java | 4 +- .../BlockStoragePolicySuite.java | 19 +- .../server/common/HdfsServerConstants.java | 109 ++++++++--- .../hdfs/server/common/StorageInfo.java | 5 +- .../server/datanode/BlockMetadataHeader.java | 4 +- .../datanode/BlockPoolSliceStorage.java | 20 +- .../hdfs/server/datanode/BlockReceiver.java | 4 +- .../hdfs/server/datanode/BlockSender.java | 8 +- .../hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hdfs/server/datanode/DataStorage.java | 35 ++-- .../hdfs/server/datanode/DataXceiver.java | 16 +- .../server/datanode/DirectoryScanner.java | 4 +- .../fsdataset/impl/BlockPoolSlice.java | 4 +- .../fsdataset/impl/FsDatasetImpl.java | 8 +- .../fsdataset/impl/FsDatasetUtil.java | 4 +- .../hadoop/hdfs/server/mover/Mover.java | 2 +- .../hdfs/server/namenode/BackupNode.java | 8 +- .../namenode/EditLogBackupInputStream.java | 6 +- .../namenode/EditLogFileInputStream.java | 22 +-- .../server/namenode/EditLogInputStream.java | 4 +- .../server/namenode/EditsDoubleBuffer.java | 6 +- .../namenode/FSDirStatAndListingOp.java | 16 +- .../hdfs/server/namenode/FSDirectory.java | 6 +- .../hdfs/server/namenode/FSEditLog.java | 8 +- .../hdfs/server/namenode/FSEditLogLoader.java | 24 +-- .../hdfs/server/namenode/FSEditLogOp.java | 28 +-- .../hadoop/hdfs/server/namenode/FSImage.java | 18 +- .../hdfs/server/namenode/FSImageFormat.java | 7 +- .../namenode/FSImageFormatProtobuf.java | 8 +- ...ImagePreTransactionalStorageInspector.java | 4 +- .../namenode/FSImageStorageInspector.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 19 +- .../server/namenode/FileJournalManager.java | 22 +-- .../hadoop/hdfs/server/namenode/INode.java | 13 +- .../hdfs/server/namenode/INodeDirectory.java | 2 +- .../hdfs/server/namenode/INodeFile.java | 2 +- .../hadoop/hdfs/server/namenode/INodeId.java | 4 +- .../hadoop/hdfs/server/namenode/INodeMap.java | 6 +- .../hdfs/server/namenode/INodesInPath.java | 3 +- .../hdfs/server/namenode/LeaseManager.java | 5 +- .../hdfs/server/namenode/NNStorage.java | 11 +- .../hadoop/hdfs/server/namenode/NameNode.java | 2 +- .../server/namenode/NameNodeRpcServer.java | 10 +- .../namenode/RedundantEditLogInputStream.java | 12 +- .../hdfs/server/namenode/TransferFsImage.java | 8 +- .../server/namenode/ha/BootstrapStandby.java | 6 +- .../server/namenode/ha/EditLogTailer.java | 6 +- .../snapshot/FileWithSnapshotFeature.java | 4 +- .../server/protocol/NNHAStatusHeartbeat.java | 4 +- .../hdfs/server/protocol/NamespaceInfo.java | 4 +- .../hdfs/server/protocol/RemoteEditLog.java | 11 +- .../hadoop/hdfs/tools/StoragePolicyAdmin.java | 4 +- .../OfflineEditsLoader.java | 7 +- .../ImageLoaderCurrent.java | 4 +- .../org/apache/hadoop/fs/TestSymlinkHdfs.java | 3 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 8 +- .../hadoop/hdfs/TestBlockStoragePolicy.java | 39 ++-- .../apache/hadoop/hdfs/TestDFSRollback.java | 6 +- .../hadoop/hdfs/TestDFSStartupVersions.java | 8 +- .../apache/hadoop/hdfs/TestDFSUpgrade.java | 4 +- .../hadoop/hdfs/TestDatanodeRegistration.java | 9 +- .../apache/hadoop/hdfs/TestFileAppend4.java | 5 +- .../apache/hadoop/hdfs/TestFileCreation.java | 5 +- .../org/apache/hadoop/hdfs/TestGetBlocks.java | 4 +- .../org/apache/hadoop/hdfs/TestLease.java | 11 +- .../hadoop/hdfs/TestLeaseRecovery2.java | 5 +- .../apache/hadoop/hdfs/UpgradeUtilities.java | 4 +- .../hdfs/qjournal/server/TestJournalNode.java | 4 +- .../blockmanagement/TestBlockManager.java | 9 +- .../TestReplicationPolicy.java | 42 ++--- .../TestReplicationPolicyConsiderLoad.java | 6 +- .../TestReplicationPolicyWithNodeGroup.java | 46 ++--- .../server/datanode/TestDatanodeRegister.java | 8 +- .../server/datanode/TestDirectoryScanner.java | 6 +- .../hdfs/server/mover/TestStorageMover.java | 8 +- .../namenode/NNThroughputBenchmark.java | 7 +- .../server/namenode/TestAddBlockRetry.java | 12 +- .../hdfs/server/namenode/TestEditLog.java | 6 +- .../namenode/TestEditLogFileInputStream.java | 4 +- .../server/namenode/TestFSEditLogLoader.java | 6 +- .../namenode/TestFSPermissionChecker.java | 6 +- .../server/namenode/TestFileTruncate.java | 5 +- .../hdfs/server/namenode/TestINodeFile.java | 16 +- .../namenode/TestMetadataVersionOutput.java | 4 +- .../namenode/TestNameNodeOptionParsing.java | 6 +- .../namenode/TestTruncateQuotaUpdate.java | 4 +- .../namenode/ha/TestDFSUpgradeWithHA.java | 4 +- .../snapshot/TestOpenFilesWithSnapshot.java | 4 +- .../namenode/snapshot/TestSnapshot.java | 1 - .../apache/hadoop/hdfs/web/TestJsonUtil.java | 4 +- 118 files changed, 664 insertions(+), 728 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java index 7956838500..9f28cfcde7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java @@ -25,7 +25,7 @@ import java.net.URI; -import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.HA_DT_SERVICE_PREFIX; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX; @InterfaceAudience.Private public class HAUtilClient { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java index 11daccc7d6..0dac290cb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java @@ -78,7 +78,7 @@ public static File metaToBlockFile(File metaFile) { public static long getGenerationStamp(String metaFile) { Matcher m = metaFilePattern.matcher(metaFile); return m.matches() ? Long.parseLong(m.group(2)) - : HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP; + : HdfsConstants.GRANDFATHER_GENERATION_STAMP; } /** @@ -100,7 +100,7 @@ public Block(final long blkid, final long len, final long generationStamp) { } public Block(final long blkid) { - this(blkid, 0, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP); + this(blkid, 0, HdfsConstants.GRANDFATHER_GENERATION_STAMP); } public Block(Block blk) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java new file mode 100644 index 0000000000..58c7ea1b89 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; + +@InterfaceAudience.Private +public final class HdfsConstants { + // Long that indicates "leave current quota unchanged" + public static final long QUOTA_DONT_SET = Long.MAX_VALUE; + public static final long QUOTA_RESET = -1L; + public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE; + /** + * URI Scheme for hdfs://namenode/ URIs. + */ + public static final String HDFS_URI_SCHEME = "hdfs"; + public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST"; + public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD"; + public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD"; + // TODO should be conf injected? + public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; + /** + * A special path component contained in the path for a snapshot file/dir + */ + public static final String DOT_SNAPSHOT_DIR = ".snapshot"; + public static final String SEPARATOR_DOT_SNAPSHOT_DIR + = Path.SEPARATOR + DOT_SNAPSHOT_DIR; + public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR + = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR; + + /** + * Generation stamp of blocks that pre-date the introduction + * of a generation stamp. + */ + public static final long GRANDFATHER_GENERATION_STAMP = 0; + /** + * The inode id validation of lease check will be skipped when the request + * uses GRANDFATHER_INODE_ID for backward compatibility. + */ + public static final long GRANDFATHER_INODE_ID = 0; + public static final byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0; + /** + * A prefix put before the namenode URI inside the "service" field + * of a delgation token, indicating that the URI is a logical (HA) + * URI. + */ + public static final String HA_DT_SERVICE_PREFIX = "ha-"; + // The name of the SafeModeException. FileSystem should retry if it sees + // the below exception in RPC + public static final String SAFEMODE_EXCEPTION_CLASS_NAME = + "org.apache.hadoop.hdfs.server.namenode.SafeModeException"; + /** + * HDFS Protocol Names: + */ + public static final String CLIENT_NAMENODE_PROTOCOL_NAME = + "org.apache.hadoop.hdfs.protocol.ClientProtocol"; + + // SafeMode actions + public enum SafeModeAction { + SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET + } + + public enum RollingUpgradeAction { + QUERY, PREPARE, FINALIZE; + + private static final Map MAP + = new HashMap<>(); + static { + MAP.put("", QUERY); + for(RollingUpgradeAction a : values()) { + MAP.put(a.name(), a); + } + } + + /** Covert the given String to a RollingUpgradeAction. */ + public static RollingUpgradeAction fromString(String s) { + return MAP.get(StringUtils.toUpperCase(s)); + } + } + + // type of the datanode report + public enum DatanodeReportType { + ALL, LIVE, DEAD, DECOMMISSIONING + } + + /* Hidden constructor */ + protected HdfsConstants() { + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java deleted file mode 100644 index 00f07e88eb..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; - -@InterfaceAudience.Private -public interface HdfsConstantsClient { - /** - * Generation stamp of blocks that pre-date the introduction - * of a generation stamp. - */ - long GRANDFATHER_GENERATION_STAMP = 0; - /** - * The inode id validation of lease check will be skipped when the request - * uses GRANDFATHER_INODE_ID for backward compatibility. - */ - long GRANDFATHER_INODE_ID = 0; - byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0; - /** - * A prefix put before the namenode URI inside the "service" field - * of a delgation token, indicating that the URI is a logical (HA) - * URI. - */ - String HA_DT_SERVICE_PREFIX = "ha-"; - // The name of the SafeModeException. FileSystem should retry if it sees - // the below exception in RPC - String SAFEMODE_EXCEPTION_CLASS_NAME = "org.apache.hadoop.hdfs.server" + - ".namenode.SafeModeException"; -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index e263a0a74a..ca94840973 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -123,11 +123,11 @@ static HdfsFileStatus toFileStatus(final Map json, boolean includesType) { final long blockSize = ((Number) m.get("blockSize")).longValue(); final short replication = ((Number) m.get("replication")).shortValue(); final long fileId = m.containsKey("fileId") ? - ((Number) m.get("fileId")).longValue() : HdfsConstantsClient.GRANDFATHER_INODE_ID; + ((Number) m.get("fileId")).longValue() : HdfsConstants.GRANDFATHER_INODE_ID; final int childrenNum = getInt(m, "childrenNum", -1); final byte storagePolicy = m.containsKey("storagePolicy") ? (byte) ((Number) m.get("storagePolicy")).longValue() : - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtilClient.string2Bytes(localName), diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index d28f571e18..aebd25a5ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -59,7 +59,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.web.resources.*; @@ -171,7 +171,7 @@ public synchronized void initialize(URI uri, Configuration conf HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT, - HdfsConstantsClient.SAFEMODE_EXCEPTION_CLASS_NAME); + HdfsConstants.SAFEMODE_EXCEPTION_CLASS_NAME); } else { int maxFailoverAttempts = conf.getInt( diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 161f3e91d4..05d806aed1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNS; @@ -2031,7 +2031,7 @@ PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler, } return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0, - HdfsConstants.MAX_PATH_LENGTH, true, false, false, true); + HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true); } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1882df589b..71873a42e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -497,6 +497,9 @@ Release 2.8.0 - UNRELEASED HDFS-8086. Move LeaseRenewer to the hdfs.client.impl package. (Takanobu Asanuma via szetszwo) + HDFS-8249. Separate HdfsConstants into the client and the server side + class. (wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java index 89fa84c61f..21fa4f291a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.contrib.bkjournal; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.JournalManager; @@ -568,7 +568,7 @@ public void selectInputStreams(Collection streams, return; } streams.add(elis); - if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) { + if (elis.getLastTxId() == HdfsServerConstants.INVALID_TXID) { return; } fromTxId = elis.getLastTxId() + 1; @@ -589,7 +589,7 @@ long getNumberOfTransactions(long fromTxId, boolean inProgressOk) long lastTxId = l.getLastTxId(); if (l.isInProgress()) { lastTxId = recoverLastTxId(l, false); - if (lastTxId == HdfsConstants.INVALID_TXID) { + if (lastTxId == HdfsServerConstants.INVALID_TXID) { break; } } @@ -634,7 +634,7 @@ public void recoverUnfinalizedSegments() throws IOException { EditLogLedgerMetadata l = EditLogLedgerMetadata.read(zkc, znode); try { long endTxId = recoverLastTxId(l, true); - if (endTxId == HdfsConstants.INVALID_TXID) { + if (endTxId == HdfsServerConstants.INVALID_TXID) { LOG.error("Unrecoverable corruption has occurred in segment " + l.toString() + " at path " + znode + ". Unable to continue recovery."); @@ -788,10 +788,10 @@ private long recoverLastTxId(EditLogLedgerMetadata l, boolean fence) in = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed); - long endTxId = HdfsConstants.INVALID_TXID; + long endTxId = HdfsServerConstants.INVALID_TXID; FSEditLogOp op = in.readOp(); while (op != null) { - if (endTxId == HdfsConstants.INVALID_TXID + if (endTxId == HdfsServerConstants.INVALID_TXID || op.getTransactionId() == endTxId+1) { endTxId = op.getTransactionId(); } @@ -827,7 +827,7 @@ private List getLedgerList(long fromTxId, try { EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata .read(zkc, legderMetadataPath); - if (editLogLedgerMetadata.getLastTxId() != HdfsConstants.INVALID_TXID + if (editLogLedgerMetadata.getLastTxId() != HdfsServerConstants.INVALID_TXID && editLogLedgerMetadata.getLastTxId() < fromTxId) { // exclude already read closed edits, but include inprogress edits // as this will be handled in caller diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java index 039cb3a612..2d1f8b95c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java @@ -19,13 +19,13 @@ import java.io.IOException; import java.util.Comparator; + +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.KeeperException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -67,7 +67,7 @@ public int compare(EditLogLedgerMetadata o1, this.dataLayoutVersion = dataLayoutVersion; this.ledgerId = ledgerId; this.firstTxId = firstTxId; - this.lastTxId = HdfsConstants.INVALID_TXID; + this.lastTxId = HdfsServerConstants.INVALID_TXID; this.inprogress = true; } @@ -107,7 +107,7 @@ int getDataLayoutVersion() { } void finalizeLedger(long newLastTxId) { - assert this.lastTxId == HdfsConstants.INVALID_TXID; + assert this.lastTxId == HdfsServerConstants.INVALID_TXID; this.lastTxId = newLastTxId; this.inprogress = false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java index ec9692b96d..52e4568e30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java @@ -27,7 +27,7 @@ import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.zookeeper.ZooKeeper; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -68,7 +68,7 @@ public void testEmptyInputStream() throws Exception { lh.close(); EditLogLedgerMetadata metadata = new EditLogLedgerMetadata("/foobar", - HdfsConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234); + HdfsServerConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234); try { new BookKeeperEditLogInputStream(lh, metadata, -1); fail("Shouldn't get this far, should have thrown"); @@ -77,7 +77,7 @@ public void testEmptyInputStream() throws Exception { } metadata = new EditLogLedgerMetadata("/foobar", - HdfsConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234); + HdfsServerConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234); try { new BookKeeperEditLogInputStream(lh, metadata, 0); fail("Shouldn't get this far, should have thrown"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index aaba543e63..99b8d2ce95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -543,10 +543,10 @@ public boolean renewLease() throws IOException { } catch (IOException e) { // Abort if the lease has already expired. final long elapsed = Time.monotonicNow() - getLastLeaseRenewal(); - if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) { + if (elapsed > HdfsServerConstants.LEASE_HARDLIMIT_PERIOD) { LOG.warn("Failed to renew lease for " + clientName + " for " + (elapsed/1000) + " seconds (>= hard-limit =" - + (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) " + + (HdfsServerConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) " + "Closing all files being written ...", e); closeAllFilesBeingWritten(true); } else { @@ -1902,7 +1902,7 @@ public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length) //connect to a datanode IOStreamPair pair = connectToDN(datanodes[j], timeout, lb); out = new DataOutputStream(new BufferedOutputStream(pair.out, - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(pair.in); if (LOG.isDebugEnabled()) { @@ -2067,7 +2067,7 @@ private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(pair.in); new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 2ec1c80d48..37f8c72f6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -72,7 +72,6 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderFactory; -import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; @@ -80,20 +79,17 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; -import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; import com.google.common.annotations.VisibleForTesting; @@ -262,7 +258,7 @@ public static boolean isValidNameForComponent(String component) { * @return true, if the component is reserved */ public static boolean isReservedPathComponent(String component) { - for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) { + for (String reserved : HdfsServerConstants.RESERVED_PATH_COMPONENTS) { if (component.equals(reserved)) { return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 8e874ebfb7..43787ab774 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.util.ByteArrayManager; @@ -1144,7 +1145,7 @@ private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets, unbufOut = saslStreams.out; unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(unbufIn); //send the TRANSFER_BLOCK request @@ -1424,7 +1425,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, unbufOut = saslStreams.out; unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); blockReplyStream = new DataInputStream(unbufIn); // diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index d826755991..77262bcaf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; @@ -417,7 +418,7 @@ private static ClientProtocol createNNProxyWithClientProtocol( RetryPolicy createPolicy = RetryPolicies .retryUpToMaximumCountWithFixedSleep(5, - HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); + HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); Map, RetryPolicy> remoteExceptionToPolicyMap = new HashMap, RetryPolicy>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java index 4cdf1685d6..99323bb5ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java @@ -33,7 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSOutputStream; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.StringUtils; @@ -165,7 +165,7 @@ private synchronized void remove(final LeaseRenewer r) { /** The time in milliseconds that the map became empty. */ private long emptyTime = Long.MAX_VALUE; /** A fixed lease renewal time period in milliseconds */ - private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2; + private long renewal = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD/2; /** A daemon for renewing lease */ private Daemon daemon = null; @@ -372,7 +372,7 @@ public synchronized void closeClient(final DFSClient dfsc) { //update renewal time if (renewal == dfsc.getConf().getHdfsTimeout()/2) { - long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; + long min = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD; for(DFSClient c : dfsclients) { final int timeout = c.getConf().getHdfsTimeout(); if (timeout > 0 && timeout < min) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index bafb02b8d4..55faf1662c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -967,8 +967,8 @@ public ContentSummary getContentSummary(String path) *

* * The quota can have three types of values : (1) 0 or more will set - * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies - * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET} + * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies + * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET} * implies the quota will be reset. Any other value is a runtime error. * * @throws AccessControlException permission denied diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java deleted file mode 100644 index 52259b04fa..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; -import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; -import org.apache.hadoop.hdfs.server.namenode.FSDirectory; -import org.apache.hadoop.util.StringUtils; - -/************************************ - * Some handy constants - * - ************************************/ -@InterfaceAudience.Private -public class HdfsConstants { - /* Hidden constructor */ - protected HdfsConstants() { - } - - /** - * HDFS Protocol Names: - */ - public static final String CLIENT_NAMENODE_PROTOCOL_NAME = - "org.apache.hadoop.hdfs.protocol.ClientProtocol"; - public static final String CLIENT_DATANODE_PROTOCOL_NAME = - "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"; - - - public static final int MIN_BLOCKS_FOR_WRITE = 1; - - // Long that indicates "leave current quota unchanged" - public static final long QUOTA_DONT_SET = Long.MAX_VALUE; - public static final long QUOTA_RESET = -1L; - - // - // Timeouts, constants - // - public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000; - public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD; - public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms - - // We need to limit the length and depth of a path in the filesystem. - // HADOOP-438 - // Currently we set the maximum length to 8k characters and the maximum depth - // to 1k. - public static final int MAX_PATH_LENGTH = 8000; - public static final int MAX_PATH_DEPTH = 1000; - - // TODO should be conf injected? - public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; - public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt( - DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY, - DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT); - // Used for writing header etc. - public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2, - 512); - - public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE; - - // SafeMode actions - public static enum SafeModeAction { - SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; - } - - public static enum RollingUpgradeAction { - QUERY, PREPARE, FINALIZE; - - private static final Map MAP - = new HashMap(); - static { - MAP.put("", QUERY); - for(RollingUpgradeAction a : values()) { - MAP.put(a.name(), a); - } - } - - /** Covert the given String to a RollingUpgradeAction. */ - public static RollingUpgradeAction fromString(String s) { - return MAP.get(StringUtils.toUpperCase(s)); - } - } - - // type of the datanode report - public static enum DatanodeReportType { - ALL, LIVE, DEAD, DECOMMISSIONING - } - - // An invalid transaction ID that will never be seen in a real namesystem. - public static final long INVALID_TXID = -12345; - - // Number of generation stamps reserved for legacy blocks. - public static final long RESERVED_GENERATION_STAMPS_V1 = - 1024L * 1024 * 1024 * 1024; - - /** - * URI Scheme for hdfs://namenode/ URIs. - */ - public static final String HDFS_URI_SCHEME = "hdfs"; - - - /** - * Current layout version for NameNode. - * Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version. - */ - public static final int NAMENODE_LAYOUT_VERSION - = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; - - /** - * Current layout version for DataNode. - * Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version. - */ - public static final int DATANODE_LAYOUT_VERSION - = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION; - - /** - * Path components that are reserved in HDFS. - *

- * .reserved is only reserved under root ("/"). - */ - public static final String[] RESERVED_PATH_COMPONENTS = new String[] { - HdfsConstants.DOT_SNAPSHOT_DIR, - FSDirectory.DOT_RESERVED_STRING - }; - - /** - * A special path component contained in the path for a snapshot file/dir - */ - public static final String DOT_SNAPSHOT_DIR = ".snapshot"; - - public static final byte[] DOT_SNAPSHOT_DIR_BYTES - = DFSUtil.string2Bytes(DOT_SNAPSHOT_DIR); - - public static final String SEPARATOR_DOT_SNAPSHOT_DIR - = Path.SEPARATOR + DOT_SNAPSHOT_DIR; - - public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR - = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR; - - public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST"; - public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD"; - public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD"; - public static final String HOT_STORAGE_POLICY_NAME = "HOT"; - public static final String WARM_STORAGE_POLICY_NAME = "WARM"; - public static final String COLD_STORAGE_POLICY_NAME = "COLD"; - - public static final byte MEMORY_STORAGE_POLICY_ID = 15; - public static final byte ALLSSD_STORAGE_POLICY_ID = 12; - public static final byte ONESSD_STORAGE_POLICY_ID = 10; - public static final byte HOT_STORAGE_POLICY_ID = 7; - public static final byte WARM_STORAGE_POLICY_ID = 5; - public static final byte COLD_STORAGE_POLICY_ID = 2; -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index 518e91a406..3067696eca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -61,7 +61,7 @@ public SnapshottableDirectoryStatus(long modification_time, long access_time, int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, access_time, permission, owner, group, null, localName, inodeId, - childrenNum, null, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED); + childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED); this.snapshotNumber = snapshotNumber; this.snapshotQuota = snapshotQuota; this.parentFullPath = parentFullPath; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java index 4f25694095..0d3796c63c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java @@ -33,7 +33,7 @@ @KerberosInfo( serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) @TokenInfo(DelegationTokenSelector.class) -@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME, +@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME, protocolVersion = 1) /** * Protocol that a clients use to communicate with the NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 563d10b88f..480b3d9bf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -533,7 +533,7 @@ public CompleteResponseProto complete(RpcController controller, boolean result = server.complete(req.getSrc(), req.getClientName(), req.hasLast() ? PBHelper.convert(req.getLast()) : null, - req.hasFileId() ? req.getFileId() : HdfsConstantsClient.GRANDFATHER_INODE_ID); + req.hasFileId() ? req.getFileId() : HdfsConstants.GRANDFATHER_INODE_ID); return CompleteResponseProto.newBuilder().setResult(result).build(); } catch (IOException e) { throw new ServiceException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index b440e60b01..ba6670c0d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -73,10 +73,10 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -1436,12 +1436,12 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) { fs.getFileType().equals(FileType.IS_SYMLINK) ? fs.getSymlink().toByteArray() : null, fs.getPath().toByteArray(), - fs.hasFileId()? fs.getFileId(): HdfsConstantsClient.GRANDFATHER_INODE_ID, + fs.hasFileId()? fs.getFileId(): HdfsConstants.GRANDFATHER_INODE_ID, fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, fs.hasChildrenNum() ? fs.getChildrenNum() : -1, fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null, fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy() - : HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED); + : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED); } public static SnapshottableDirectoryStatus convert( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 6938f571e5..cbfe5b8d48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -33,7 +33,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; @@ -46,6 +45,7 @@ import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.qjournal.server.GetJournalEditServlet; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -92,7 +92,7 @@ public class IPCLoggerChannel implements AsyncLogger { private final ListeningExecutorService parallelExecutor; private long ipcSerial = 0; private long epoch = -1; - private long committedTxId = HdfsConstants.INVALID_TXID; + private long committedTxId = HdfsServerConstants.INVALID_TXID; private final String journalId; private final NamespaceInfo nsInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java index dfd1e4de26..2569aadd46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.qjournal.protocol; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @InterfaceAudience.Private public class RequestInfo { @@ -60,6 +60,6 @@ public long getCommittedTxId() { } public boolean hasCommittedTxId() { - return (committedTxId != HdfsConstants.INVALID_TXID); + return (committedTxId != HdfsServerConstants.INVALID_TXID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java index 9ff63d790a..28f77f26fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java @@ -21,7 +21,6 @@ import java.net.URL; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; @@ -66,6 +65,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; @@ -263,7 +263,7 @@ private RequestInfo convert( reqInfo.getEpoch(), reqInfo.getIpcSerialNumber(), reqInfo.hasCommittedTxId() ? - reqInfo.getCommittedTxId() : HdfsConstants.INVALID_TXID); + reqInfo.getCommittedTxId() : HdfsServerConstants.INVALID_TXID); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 9d11ca59e9..29530554ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -34,7 +34,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; @@ -44,6 +43,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -81,8 +81,8 @@ public class Journal implements Closeable { // Current writing state private EditLogOutputStream curSegment; - private long curSegmentTxId = HdfsConstants.INVALID_TXID; - private long nextTxId = HdfsConstants.INVALID_TXID; + private long curSegmentTxId = HdfsServerConstants.INVALID_TXID; + private long nextTxId = HdfsServerConstants.INVALID_TXID; private long highestWrittenTxId = 0; private final String journalId; @@ -170,7 +170,7 @@ private synchronized void refreshCachedData() { new File(currentDir, LAST_WRITER_EPOCH), 0); this.committedTxnId = new BestEffortLongFile( new File(currentDir, COMMITTED_TXID_FILENAME), - HdfsConstants.INVALID_TXID); + HdfsServerConstants.INVALID_TXID); } /** @@ -191,7 +191,7 @@ private synchronized EditLogFile scanStorageForLatestEdits() throws IOException EditLogFile latestLog = files.remove(files.size() - 1); latestLog.scanLog(); LOG.info("Latest log is " + latestLog); - if (latestLog.getLastTxId() == HdfsConstants.INVALID_TXID) { + if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) { // the log contains no transactions LOG.warn("Latest log " + latestLog + " has no transactions. " + "moving it aside and looking for previous log"); @@ -327,7 +327,7 @@ private void abortCurSegment() throws IOException { curSegment.abort(); curSegment = null; - curSegmentTxId = HdfsConstants.INVALID_TXID; + curSegmentTxId = HdfsServerConstants.INVALID_TXID; } /** @@ -565,7 +565,7 @@ public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId, if (curSegment != null) { curSegment.close(); curSegment = null; - curSegmentTxId = HdfsConstants.INVALID_TXID; + curSegmentTxId = HdfsServerConstants.INVALID_TXID; } checkSync(nextTxId == endTxId + 1, @@ -677,7 +677,7 @@ SegmentStateProto getSegmentInfo(long segmentTxId) if (elf.isInProgress()) { elf.scanLog(); } - if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) { + if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) { LOG.info("Edit log file " + elf + " appears to be empty. " + "Moving it aside..."); elf.moveAsideEmptyFile(); @@ -727,7 +727,7 @@ public synchronized PrepareRecoveryResponseProto prepareRecovery( } builder.setLastWriterEpoch(lastWriterEpoch.get()); - if (committedTxnId.get() != HdfsConstants.INVALID_TXID) { + if (committedTxnId.get() != HdfsServerConstants.INVALID_TXID) { builder.setLastCommittedTxId(committedTxnId.get()); } @@ -1021,7 +1021,7 @@ public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { new File(previousDir, LAST_WRITER_EPOCH), 0); BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile( new File(previousDir, COMMITTED_TXID_FILENAME), - HdfsConstants.INVALID_TXID); + HdfsServerConstants.INVALID_TXID); lastPromisedEpoch = new PersistentLongFile( new File(currentDir, LAST_PROMISED_FILENAME), 0); @@ -1029,7 +1029,7 @@ public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { new File(currentDir, LAST_WRITER_EPOCH), 0); committedTxnId = new BestEffortLongFile( new File(currentDir, COMMITTED_TXID_FILENAME), - HdfsConstants.INVALID_TXID); + HdfsServerConstants.INVALID_TXID); try { lastPromisedEpoch.set(prevLastPromisedEpoch.get()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index 5d02bf9ccb..ba1690526b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; @@ -309,9 +308,9 @@ private void dispatch() { unbufOut = saslStreams.out; unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, - HdfsConstants.IO_FILE_BUFFER_SIZE)); + HdfsServerConstants.IO_FILE_BUFFER_SIZE)); in = new DataInputStream(new BufferedInputStream(unbufIn, - HdfsConstants.IO_FILE_BUFFER_SIZE)); + HdfsServerConstants.IO_FILE_BUFFER_SIZE)); sendRequest(out, eb, accessToken); receiveResponse(in); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java index eac6ed295e..feebd87fa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java @@ -21,8 +21,8 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import java.io.IOException; @@ -56,7 +56,7 @@ public class BlockIdManager { private final SequentialBlockIdGenerator blockIdGenerator; public BlockIdManager(BlockManager blockManager) { - this.generationStampV1Limit = HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP; + this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP; this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager); } @@ -70,7 +70,7 @@ public long upgradeGenerationStampToV2() { Preconditions.checkState(generationStampV2.getCurrentValue() == GenerationStamp.LAST_RESERVED_STAMP); generationStampV2.skipTo(generationStampV1.getCurrentValue() + - HdfsConstants.RESERVED_GENERATION_STAMPS_V1); + HdfsServerConstants.RESERVED_GENERATION_STAMPS_V1); generationStampV1Limit = generationStampV2.getCurrentValue(); return generationStampV2.getCurrentValue(); @@ -83,7 +83,7 @@ public long upgradeGenerationStampToV2() { * @param stamp set generation stamp limit to this value */ public void setGenerationStampV1Limit(long stamp) { - Preconditions.checkState(generationStampV1Limit == HdfsConstantsClient + Preconditions.checkState(generationStampV1Limit == HdfsConstants .GRANDFATHER_GENERATION_STAMP); generationStampV1Limit = stamp; } @@ -204,6 +204,6 @@ public void clear() { generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP); getBlockIdGenerator().setCurrentValue(SequentialBlockIdGenerator .LAST_RESERVED_BLOCK_ID); - generationStampV1Limit = HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP; + generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP; } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index c2752acc3b..620d2a6504 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; @@ -792,7 +792,7 @@ private boolean isGoodTarget(DatanodeStorageInfo storage, } } - final long requiredSize = blockSize * HdfsConstants.MIN_BLOCKS_FOR_WRITE; + final long requiredSize = blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE; final long scheduledSize = blockSize * node.getBlocksScheduled(storage.getStorageType()); final long remaining = node.getRemaining(storage.getStorageType()); if (requiredSize > remaining - scheduledSize) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index 6c352f3706..2a71c2973f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,39 +48,39 @@ public class BlockStoragePolicySuite { public static BlockStoragePolicySuite createDefaultSuite() { final BlockStoragePolicy[] policies = new BlockStoragePolicy[1 << ID_BIT_LENGTH]; - final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID; + final byte lazyPersistId = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID; policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, HdfsConstants.MEMORY_STORAGE_POLICY_NAME, new StorageType[]{StorageType.RAM_DISK, StorageType.DISK}, new StorageType[]{StorageType.DISK}, new StorageType[]{StorageType.DISK}, true); // Cannot be changed on regular files, but inherited. - final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID; + final byte allssdId = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID; policies[allssdId] = new BlockStoragePolicy(allssdId, HdfsConstants.ALLSSD_STORAGE_POLICY_NAME, new StorageType[]{StorageType.SSD}, new StorageType[]{StorageType.DISK}, new StorageType[]{StorageType.DISK}); - final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID; + final byte onessdId = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID; policies[onessdId] = new BlockStoragePolicy(onessdId, HdfsConstants.ONESSD_STORAGE_POLICY_NAME, new StorageType[]{StorageType.SSD, StorageType.DISK}, new StorageType[]{StorageType.SSD, StorageType.DISK}, new StorageType[]{StorageType.SSD, StorageType.DISK}); - final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID; + final byte hotId = HdfsServerConstants.HOT_STORAGE_POLICY_ID; policies[hotId] = new BlockStoragePolicy(hotId, - HdfsConstants.HOT_STORAGE_POLICY_NAME, + HdfsServerConstants.HOT_STORAGE_POLICY_NAME, new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY, new StorageType[]{StorageType.ARCHIVE}); - final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID; + final byte warmId = HdfsServerConstants.WARM_STORAGE_POLICY_ID; policies[warmId] = new BlockStoragePolicy(warmId, - HdfsConstants.WARM_STORAGE_POLICY_NAME, + HdfsServerConstants.WARM_STORAGE_POLICY_NAME, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}); - final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID; + final byte coldId = HdfsServerConstants.COLD_STORAGE_POLICY_ID; policies[coldId] = new BlockStoragePolicy(coldId, - HdfsConstants.COLD_STORAGE_POLICY_NAME, + HdfsServerConstants.COLD_STORAGE_POLICY_NAME, new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY, StorageType.EMPTY_ARRAY); return new BlockStoragePolicySuite(hotId, policies); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 2d267cee04..47b0818956 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -24,9 +24,16 @@ import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.util.StringUtils; /************************************ @@ -35,21 +42,75 @@ ************************************/ @InterfaceAudience.Private -public final class HdfsServerConstants { - /* Hidden constructor */ - private HdfsServerConstants() { } - +public interface HdfsServerConstants { + int MIN_BLOCKS_FOR_WRITE = 1; + // + // Timeouts, constants + // + long LEASE_SOFTLIMIT_PERIOD = 60 * 1000; + long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD; + long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms + // We need to limit the length and depth of a path in the filesystem. + // HADOOP-438 + // Currently we set the maximum length to 8k characters and the maximum depth + // to 1k. + int MAX_PATH_LENGTH = 8000; + int MAX_PATH_DEPTH = 1000; + int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt( + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); + // Used for writing header etc. + int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2, + 512); + // An invalid transaction ID that will never be seen in a real namesystem. + long INVALID_TXID = -12345; + // Number of generation stamps reserved for legacy blocks. + long RESERVED_GENERATION_STAMPS_V1 = + 1024L * 1024 * 1024 * 1024; + /** + * Current layout version for NameNode. + * Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version. + */ + int NAMENODE_LAYOUT_VERSION + = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; + /** + * Current layout version for DataNode. + * Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version. + */ + int DATANODE_LAYOUT_VERSION + = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION; + /** + * Path components that are reserved in HDFS. + *

+ * .reserved is only reserved under root ("/"). + */ + String[] RESERVED_PATH_COMPONENTS = new String[] { + HdfsConstants.DOT_SNAPSHOT_DIR, + FSDirectory.DOT_RESERVED_STRING + }; + byte[] DOT_SNAPSHOT_DIR_BYTES + = DFSUtil.string2Bytes(HdfsConstants.DOT_SNAPSHOT_DIR); + String HOT_STORAGE_POLICY_NAME = "HOT"; + String WARM_STORAGE_POLICY_NAME = "WARM"; + String COLD_STORAGE_POLICY_NAME = "COLD"; + byte MEMORY_STORAGE_POLICY_ID = 15; + byte ALLSSD_STORAGE_POLICY_ID = 12; + byte ONESSD_STORAGE_POLICY_ID = 10; + byte HOT_STORAGE_POLICY_ID = 7; + byte WARM_STORAGE_POLICY_ID = 5; + byte COLD_STORAGE_POLICY_ID = 2; + /** * Type of the node */ - static public enum NodeType { + enum NodeType { NAME_NODE, DATA_NODE, - JOURNAL_NODE; + JOURNAL_NODE } /** Startup options for rolling upgrade. */ - public static enum RollingUpgradeStartupOption{ + enum RollingUpgradeStartupOption{ ROLLBACK, STARTED; public String getOptionString() { @@ -93,7 +154,7 @@ public static String getAllOptionString() { } /** Startup options */ - static public enum StartupOption{ + enum StartupOption{ FORMAT ("-format"), CLUSTERID ("-clusterid"), GENCLUSTERID ("-genclusterid"), @@ -137,7 +198,7 @@ static public enum StartupOption{ // Used only with recovery option private int force = 0; - private StartupOption(String arg) {this.name = arg;} + StartupOption(String arg) {this.name = arg;} public String getName() {return name;} public NamenodeRole toNodeRole() { switch(this) { @@ -221,21 +282,21 @@ static public StartupOption getEnum(String value) { } // Timeouts for communicating with DataNode for streaming writes/reads - public static final int READ_TIMEOUT = 60 * 1000; - public static final int READ_TIMEOUT_EXTENSION = 5 * 1000; - public static final int WRITE_TIMEOUT = 8 * 60 * 1000; - public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline + int READ_TIMEOUT = 60 * 1000; + int READ_TIMEOUT_EXTENSION = 5 * 1000; + int WRITE_TIMEOUT = 8 * 60 * 1000; + int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline /** * Defines the NameNode role. */ - static public enum NamenodeRole { + enum NamenodeRole { NAMENODE ("NameNode"), BACKUP ("Backup Node"), CHECKPOINT("Checkpoint Node"); private String description = null; - private NamenodeRole(String arg) {this.description = arg;} + NamenodeRole(String arg) {this.description = arg;} @Override public String toString() { @@ -246,7 +307,7 @@ public String toString() { /** * Block replica states, which it can go through while being constructed. */ - static public enum ReplicaState { + enum ReplicaState { /** Replica is finalized. The state when replica is not modified. */ FINALIZED(0), /** Replica is being written to. */ @@ -260,7 +321,7 @@ static public enum ReplicaState { private final int value; - private ReplicaState(int v) { + ReplicaState(int v) { value = v; } @@ -286,7 +347,7 @@ public void write(DataOutput out) throws IOException { /** * States, which a block can go through while it is under construction. */ - static public enum BlockUCState { + enum BlockUCState { /** * Block construction completed.
* The block has at least the configured minimal replication number @@ -315,16 +376,16 @@ static public enum BlockUCState { * {@link ReplicaState#FINALIZED} * replicas has yet been reported by data-nodes themselves. */ - COMMITTED; + COMMITTED } - public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode"; - public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000; + String NAMENODE_LEASE_HOLDER = "HDFS_NameNode"; + long NAMENODE_LEASE_RECHECK_INTERVAL = 2000; - public static final String CRYPTO_XATTR_ENCRYPTION_ZONE = + String CRYPTO_XATTR_ENCRYPTION_ZONE = "raw.hdfs.crypto.encryption.zone"; - public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO = + String CRYPTO_XATTR_FILE_ENCRYPTION_INFO = "raw.hdfs.crypto.file.encryption.info"; - public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER = + String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER = "security.hdfs.unreadable.by.superuser"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java index f40b079ffd..50363c9f63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java @@ -26,7 +26,6 @@ import java.util.SortedSet; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature; @@ -222,8 +221,8 @@ public void setServiceLayoutVersion(int lv) { } public int getServiceLayoutVersion() { - return storageType == NodeType.DATA_NODE ? HdfsConstants.DATANODE_LAYOUT_VERSION - : HdfsConstants.NAMENODE_LAYOUT_VERSION; + return storageType == NodeType.DATA_NODE ? HdfsServerConstants.DATANODE_LAYOUT_VERSION + : HdfsServerConstants.NAMENODE_LAYOUT_VERSION; } public Map> getServiceLayoutFeatureMap() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java index 94493aa1d9..04700b88b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java @@ -33,7 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DataChecksum; @@ -85,7 +85,7 @@ public static DataChecksum readDataChecksum(File metaFile) throws IOException { DataInputStream in = null; try { in = new DataInputStream(new BufferedInputStream( - new FileInputStream(metaFile), HdfsConstants.IO_FILE_BUFFER_SIZE)); + new FileInputStream(metaFile), HdfsServerConstants.IO_FILE_BUFFER_SIZE)); return readDataChecksum(in, metaFile); } finally { IOUtils.closeStream(in); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index d26a9a591b..09a2d8bb50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -24,8 +24,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.HardLink; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; @@ -267,7 +267,7 @@ private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOExce LOG.info("Formatting block pool " + blockpoolID + " directory " + bpSdir.getCurrentDir()); bpSdir.clearDirectory(); // create directory - this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; + this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; this.cTime = nsInfo.getCTime(); this.namespaceID = nsInfo.getNamespaceID(); this.blockpoolID = nsInfo.getBlockPoolID(); @@ -361,7 +361,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd, } readProperties(sd); checkVersionUpgradable(this.layoutVersion); - assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION + assert this.layoutVersion >= HdfsServerConstants.DATANODE_LAYOUT_VERSION : "Future version is not allowed"; if (getNamespaceID() != nsInfo.getNamespaceID()) { throw new IOException("Incompatible namespaceIDs in " @@ -375,17 +375,17 @@ private void doTransition(DataNode datanode, StorageDirectory sd, + nsInfo.getBlockPoolID() + "; datanode blockpoolID = " + blockpoolID); } - if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION + if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) { return; // regular startup } - if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) { + if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) { int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd)); LOG.info("Restored " + restored + " block files from trash " + "before the layout upgrade. These blocks will be moved to " + "the previous directory during the upgrade"); } - if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION + if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION || this.cTime < nsInfo.getCTime()) { doUpgrade(datanode, sd, nsInfo); // upgrade return; @@ -425,7 +425,7 @@ void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo) } LOG.info("Upgrading block pool storage directory " + bpSd.getRoot() + ".\n old LV = " + this.getLayoutVersion() + "; old CTime = " - + this.getCTime() + ".\n new LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION + + this.getCTime() + ".\n new LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION + "; new CTime = " + nsInfo.getCTime()); // get /previous directory String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath()); @@ -454,7 +454,7 @@ void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo) // 3. Create new /current with block files hardlinks and VERSION linkAllBlocks(datanode, bpTmpDir, bpCurDir); - this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; + this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; assert this.namespaceID == nsInfo.getNamespaceID() : "Data-node and name-node layout versions must be the same."; this.cTime = nsInfo.getCTime(); @@ -563,13 +563,13 @@ void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo) // the namespace state or can be further upgraded to it. // In another word, we can only roll back when ( storedLV >= software LV) // && ( DN.previousCTime <= NN.ctime) - if (!(prevInfo.getLayoutVersion() >= HdfsConstants.DATANODE_LAYOUT_VERSION && + if (!(prevInfo.getLayoutVersion() >= HdfsServerConstants.DATANODE_LAYOUT_VERSION && prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback throw new InconsistentFSStateException(bpSd.getRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = " + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() + " is newer than the namespace state: LV = " - + HdfsConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime()); + + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime()); } LOG.info("Rolling back storage directory " + bpSd.getRoot() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index c0be956e8e..10692d4dd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -41,13 +41,13 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver; import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -246,7 +246,7 @@ class BlockReceiver implements Closeable { out.getClass()); } this.checksumOut = new DataOutputStream(new BufferedOutputStream( - streams.getChecksumOut(), HdfsConstants.SMALL_BUFFER_SIZE)); + streams.getChecksumOut(), HdfsServerConstants.SMALL_BUFFER_SIZE)); // write data chunk header if creating a new replica if (isCreate) { BlockMetadataHeader.writeHeader(checksumOut, diskChecksum); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index e76b93a5fd..6b958a2acd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -35,8 +35,8 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream; import org.apache.hadoop.hdfs.util.DataTransferThrottler; @@ -105,7 +105,7 @@ class BlockSender implements java.io.Closeable { */ private static final int MIN_BUFFER_WITH_TRANSFERTO = 64*1024; private static final int TRANSFERTO_BUFFER_SIZE = Math.max( - HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO); + HdfsServerConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO); /** the block to read from */ private final ExtendedBlock block; @@ -298,7 +298,7 @@ class BlockSender implements java.io.Closeable { // storage and computes the checksum. if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) { checksumIn = new DataInputStream(new BufferedInputStream( - metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); + metaIn, HdfsServerConstants.IO_FILE_BUFFER_SIZE)); csum = BlockMetadataHeader.readDataChecksum(checksumIn, block); keepMetaInOpen = true; @@ -747,7 +747,7 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream, pktBufSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, - numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE)); + numberOfChunks(HdfsServerConstants.IO_FILE_BUFFER_SIZE)); // Packet size includes both checksum and data pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f042dff6d2..22b1520c7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -2156,7 +2156,7 @@ public void run() { unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(unbufIn); blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, true, DataNode.this, null, cachingStrategy); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 77fcfedb96..089e0329ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; @@ -60,7 +60,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -466,7 +465,7 @@ synchronized void removeVolumes(final Set dirsToRemove) void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, Collection dataDirs, StartupOption startOpt) throws IOException { if (this.initialized) { - LOG.info("DataNode version: " + HdfsConstants.DATANODE_LAYOUT_VERSION + LOG.info("DataNode version: " + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " and NameNode layout version: " + nsInfo.getLayoutVersion()); this.storageDirs = new ArrayList(dataDirs.size()); // mark DN storage is initialized @@ -509,7 +508,7 @@ static void makeBlockPoolDataDir(Collection dataDirs, void format(StorageDirectory sd, NamespaceInfo nsInfo, String datanodeUuid) throws IOException { sd.clearDirectory(); // create directory - this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; + this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; this.clusterID = nsInfo.getClusterID(); this.namespaceID = nsInfo.getNamespaceID(); this.cTime = 0; @@ -664,7 +663,7 @@ private void doTransition( DataNode datanode, } readProperties(sd); checkVersionUpgradable(this.layoutVersion); - assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION : + assert this.layoutVersion >= HdfsServerConstants.DATANODE_LAYOUT_VERSION : "Future version is not allowed"; boolean federationSupported = @@ -696,13 +695,13 @@ private void doTransition( DataNode datanode, DatanodeStorage.isValidStorageId(sd.getStorageUuid()); // regular start up. - if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION) { + if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION) { createStorageID(sd, !haveValidStorageId); return; // regular startup } // do upgrade - if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) { + if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) { doUpgrade(datanode, sd, nsInfo); // upgrade createStorageID(sd, !haveValidStorageId); return; @@ -714,7 +713,7 @@ private void doTransition( DataNode datanode, // failed. throw new IOException("BUG: The stored LV = " + this.getLayoutVersion() + " is newer than the supported LV = " - + HdfsConstants.DATANODE_LAYOUT_VERSION); + + HdfsServerConstants.DATANODE_LAYOUT_VERSION); } /** @@ -749,9 +748,9 @@ void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo) // field and overwrite the file. The upgrade work is handled by // {@link BlockPoolSliceStorage#doUpgrade} LOG.info("Updating layout version from " + layoutVersion + " to " - + HdfsConstants.DATANODE_LAYOUT_VERSION + " for storage " + + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " for storage " + sd.getRoot()); - layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; + layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; writeProperties(sd); return; } @@ -759,7 +758,7 @@ void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo) LOG.info("Upgrading storage directory " + sd.getRoot() + ".\n old LV = " + this.getLayoutVersion() + "; old CTime = " + this.getCTime() - + ".\n new LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION + + ".\n new LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION + "; new CTime = " + nsInfo.getCTime()); File curDir = sd.getCurrentDir(); @@ -790,7 +789,7 @@ void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo) STORAGE_DIR_CURRENT)); // 4. Write version file under /current - layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; + layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; clusterID = nsInfo.getClusterID(); writeProperties(sd); @@ -848,11 +847,11 @@ void doRollback( StorageDirectory sd, // This is a regular startup or a post-federation rollback if (!prevDir.exists()) { if (DataNodeLayoutVersion.supports(LayoutVersion.Feature.FEDERATION, - HdfsConstants.DATANODE_LAYOUT_VERSION)) { - readProperties(sd, HdfsConstants.DATANODE_LAYOUT_VERSION); + HdfsServerConstants.DATANODE_LAYOUT_VERSION)) { + readProperties(sd, HdfsServerConstants.DATANODE_LAYOUT_VERSION); writeProperties(sd); LOG.info("Layout version rolled back to " - + HdfsConstants.DATANODE_LAYOUT_VERSION + " for storage " + + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " for storage " + sd.getRoot()); } return; @@ -862,16 +861,16 @@ void doRollback( StorageDirectory sd, // We allow rollback to a state, which is either consistent with // the namespace state or can be further upgraded to it. - if (!(prevInfo.getLayoutVersion() >= HdfsConstants.DATANODE_LAYOUT_VERSION + if (!(prevInfo.getLayoutVersion() >= HdfsServerConstants.DATANODE_LAYOUT_VERSION && prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback throw new InconsistentFSStateException(sd.getRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = " + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() + " is newer than the namespace state: LV = " - + HdfsConstants.DATANODE_LAYOUT_VERSION + " CTime = " + + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime()); LOG.info("Rolling back storage directory " + sd.getRoot() - + ".\n target LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION + + ".\n target LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION + "; target CTime = " + nsInfo.getCTime()); File tmpDir = sd.getRemovedTmp(); assert !tmpDir.exists() : "removed.tmp directory must not exist."; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 3b014e8525..de25579b83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -191,7 +191,7 @@ public void run() { socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId()); input = new BufferedInputStream(saslStreams.in, - HdfsConstants.SMALL_BUFFER_SIZE); + HdfsServerConstants.SMALL_BUFFER_SIZE); socketOut = saslStreams.out; } catch (InvalidMagicNumberException imne) { if (imne.isHandshake4Encryption()) { @@ -514,7 +514,7 @@ public void readBlock(final ExtendedBlock block, long read = 0; OutputStream baseStream = getOutputStream(); DataOutputStream out = new DataOutputStream(new BufferedOutputStream( - baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); + baseStream, HdfsServerConstants.SMALL_BUFFER_SIZE)); checkAccess(out, true, block, blockToken, Op.READ_BLOCK, BlockTokenIdentifier.AccessMode.READ); @@ -658,7 +658,7 @@ public void writeBlock(final ExtendedBlock block, final DataOutputStream replyOut = new DataOutputStream( new BufferedOutputStream( getOutputStream(), - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE); @@ -717,7 +717,7 @@ public void writeBlock(final ExtendedBlock block, unbufMirrorOut = saslStreams.out; unbufMirrorIn = saslStreams.in; mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(unbufMirrorIn); // Do not propagate allowLazyPersist to downstream DataNodes. @@ -932,7 +932,7 @@ public void blockChecksum(final ExtendedBlock block, .getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream( - new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); + new BufferedInputStream(metadataIn, HdfsServerConstants.IO_FILE_BUFFER_SIZE)); updateCurrentThreadName("Getting checksum for block " + block); try { //read metadata file @@ -1024,7 +1024,7 @@ public void copyBlock(final ExtendedBlock block, // set up response stream OutputStream baseStream = getOutputStream(); reply = new DataOutputStream(new BufferedOutputStream( - baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); + baseStream, HdfsServerConstants.SMALL_BUFFER_SIZE)); // send status first writeSuccessWithChecksumInfo(blockSender, reply); @@ -1132,9 +1132,9 @@ public void replaceBlock(final ExtendedBlock block, unbufProxyIn = saslStreams.in; proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut, - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn, - HdfsConstants.IO_FILE_BUFFER_SIZE)); + HdfsServerConstants.IO_FILE_BUFFER_SIZE)); /* send request to the proxy */ IoeDuringCopyBlockOperation = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index f6b058b053..62885a9127 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.util.Daemon; @@ -305,7 +305,7 @@ public int hashCode() { public long getGenStamp() { return metaSuffix != null ? Block.getGenerationStamp( getMetaFile().getName()) : - HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP; + HdfsConstants.GRANDFATHER_GENERATION_STAMP; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 6daf03944e..414d07857d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; @@ -612,7 +612,7 @@ private long validateIntegrityAndSetLength(File blockFile, long genStamp) { } checksumIn = new DataInputStream( new BufferedInputStream(new FileInputStream(metaFile), - HdfsConstants.IO_FILE_BUFFER_SIZE)); + HdfsServerConstants.IO_FILE_BUFFER_SIZE)); // read and handle the common header here. For now just a version final DataChecksum checksum = BlockMetadataHeader.readDataChecksum( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index b87daecefc..d72b8bfc63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -65,8 +65,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; @@ -957,7 +957,7 @@ private static void computeChecksum(File srcMeta, File dstMeta, File blockFile) } } metaOut = new DataOutputStream(new BufferedOutputStream( - new FileOutputStream(dstMeta), HdfsConstants.SMALL_BUFFER_SIZE)); + new FileOutputStream(dstMeta), HdfsServerConstants.SMALL_BUFFER_SIZE)); BlockMetadataHeader.writeHeader(metaOut, checksum); int offset = 0; @@ -2128,7 +2128,7 @@ public void checkAndUpdate(String bpid, long blockId, File diskFile, final long diskGS = diskMetaFile != null && diskMetaFile.exists() ? Block.getGenerationStamp(diskMetaFile.getName()) : - HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP; + HdfsConstants.GRANDFATHER_GENERATION_STAMP; if (diskFile == null || !diskFile.exists()) { if (memBlockInfo == null) { @@ -2229,7 +2229,7 @@ public void checkAndUpdate(String bpid, long blockId, File diskFile, // as the block file, then use the generation stamp from it long gs = diskMetaFile != null && diskMetaFile.exists() && diskMetaFile.getParent().equals(memFile.getParent()) ? diskGS - : HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP; + : HdfsConstants.GRANDFATHER_GENERATION_STAMP; LOG.warn("Updating generation stamp for block " + blockId + " from " + memBlockInfo.getGenerationStamp() + " to " + gs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java index 193270601c..8c5b4a13b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; /** Utility methods. */ @@ -88,7 +88,7 @@ static long getGenerationStampFromFile(File[] listdir, File blockFile) { return Block.getGenerationStamp(listdir[j].getName()); } FsDatasetImpl.LOG.warn("Block " + blockFile + " does not have a metafile!"); - return HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP; + return HdfsConstants.GRANDFATHER_GENERATION_STAMP; } /** Find the corresponding meta data file from a given block file */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index f1beb759d0..b32220c31e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -330,7 +330,7 @@ private boolean processRecursively(String parent, HdfsFileStatus status) { private boolean processFile(String fullPath, HdfsLocatedFileStatus status) { final byte policyId = status.getStoragePolicy(); // currently we ignore files with unspecified storage policy - if (policyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { + if (policyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { return false; } final BlockStoragePolicy policy = blockStoragePolicies[policyId]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 430f00c881..e6560ee429 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -29,12 +29,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.NameNodeProxies; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; @@ -156,7 +156,7 @@ protected void initialize(Configuration conf) throws IOException { // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( - HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); + HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); // register with the active name-node registerWith(nsInfo); @@ -414,9 +414,9 @@ private static NamespaceInfo handshake(NamenodeProtocol namenode) LOG.error(errorMsg); throw new IOException(errorMsg); } - assert HdfsConstants.NAMENODE_LAYOUT_VERSION == nsInfo.getLayoutVersion() : + assert HdfsServerConstants.NAMENODE_LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Active and backup node layout versions must be the same. Expected: " - + HdfsConstants.NAMENODE_LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); + + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java index 36494374cc..689caccdc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java @@ -21,8 +21,8 @@ import java.io.ByteArrayInputStream; import java.io.IOException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; /** * An implementation of the abstract class {@link EditLogInputStream}, @@ -130,12 +130,12 @@ void clear() throws IOException { @Override public long getFirstTxId() { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } @Override public long getLastTxId() { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 974860caf5..3e21c24289 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -33,9 +33,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.protocol.LayoutVersion; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException; @@ -85,7 +85,7 @@ static private enum State { */ EditLogFileInputStream(File name) throws LogHeaderCorruptException, IOException { - this(name, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); + this(name, HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false); } /** @@ -203,7 +203,7 @@ private FSEditLogOp nextOpImpl(boolean skipBrokenEdits) throws IOException { if ((op != null) && (op.hasTransactionId())) { long txId = op.getTransactionId(); if ((txId >= lastTxId) && - (lastTxId != HdfsConstants.INVALID_TXID)) { + (lastTxId != HdfsServerConstants.INVALID_TXID)) { // // Sometimes, the NameNode crashes while it's writing to the // edit log. In that case, you can end up with an unfinalized edit log @@ -310,7 +310,7 @@ static FSEditLogLoader.EditLogValidation validateEditLog(File file) // If the header is malformed or the wrong value, this indicates a corruption LOG.warn("Log file " + file + " has no valid header", e); return new FSEditLogLoader.EditLogValidation(0, - HdfsConstants.INVALID_TXID, true); + HdfsServerConstants.INVALID_TXID, true); } try { @@ -331,18 +331,18 @@ static FSEditLogLoader.EditLogValidation scanEditLog(File file) } catch (LogHeaderCorruptException e) { LOG.warn("Log file " + file + " has no valid header", e); return new FSEditLogLoader.EditLogValidation(0, - HdfsConstants.INVALID_TXID, true); + HdfsServerConstants.INVALID_TXID, true); } long lastPos = 0; - long lastTxId = HdfsConstants.INVALID_TXID; + long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; try { while (true) { - long txid = HdfsConstants.INVALID_TXID; + long txid = HdfsServerConstants.INVALID_TXID; lastPos = in.getPosition(); try { - if ((txid = in.scanNextOp()) == HdfsConstants.INVALID_TXID) { + if ((txid = in.scanNextOp()) == HdfsServerConstants.INVALID_TXID) { break; } } catch (Throwable t) { @@ -354,7 +354,7 @@ static FSEditLogLoader.EditLogValidation scanEditLog(File file) FSImage.LOG.warn("After resync, position is " + in.getPosition()); continue; } - if (lastTxId == HdfsConstants.INVALID_TXID || txid > lastTxId) { + if (lastTxId == HdfsServerConstants.INVALID_TXID || txid > lastTxId) { lastTxId = txid; } numValid++; @@ -382,12 +382,12 @@ static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion) "Reached EOF when reading log header"); } if (verifyLayoutVersion && - (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version + (logVersion < HdfsServerConstants.NAMENODE_LAYOUT_VERSION || // future version logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported throw new LogHeaderCorruptException( "Unexpected version of the file system log file: " + logVersion + ". Current version = " - + HdfsConstants.NAMENODE_LAYOUT_VERSION + "."); + + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + "."); } return logVersion; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java index ac58616592..a4377cddcd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java @@ -19,7 +19,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import java.io.Closeable; import java.io.IOException; @@ -112,7 +112,7 @@ public void resync() { */ protected long scanNextOp() throws IOException { FSEditLogOp next = readOp(); - return next != null ? next.txid : HdfsConstants.INVALID_TXID; + return next != null ? next.txid : HdfsServerConstants.INVALID_TXID; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index f1da3fb9b0..76a9eacc55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -21,7 +21,7 @@ import java.io.OutputStream; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; @@ -141,7 +141,7 @@ public TxnBuffer(int initBufferSize) { } public void writeOp(FSEditLogOp op) throws IOException { - if (firstTxId == HdfsConstants.INVALID_TXID) { + if (firstTxId == HdfsServerConstants.INVALID_TXID) { firstTxId = op.txid; } else { assert op.txid > firstTxId; @@ -153,7 +153,7 @@ public void writeOp(FSEditLogOp op) throws IOException { @Override public DataOutputBuffer reset() { super.reset(); - firstTxId = HdfsConstants.INVALID_TXID; + firstTxId = HdfsServerConstants.INVALID_TXID; numTxns = 0; return this; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index e77ddaeb35..c636d933ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -139,7 +138,7 @@ static ContentSummary getContentSummary( } private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) { - return inodePolicy != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? inodePolicy : + return inodePolicy != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? inodePolicy : parentPolicy; } @@ -175,7 +174,7 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, if (targetNode == null) return null; byte parentStoragePolicy = isSuperUser ? - targetNode.getStoragePolicyID() : HdfsConstantsClient + targetNode.getStoragePolicyID() : HdfsConstants .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; if (!targetNode.isDirectory()) { @@ -202,7 +201,7 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, INode cur = contents.get(startChild+i); byte curPolicy = isSuperUser && !cur.isSymlink()? cur.getLocalStoragePolicyID(): - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; INodeAttributes nodeAttrs = getINodeAttributes( fsd, src, cur.getLocalNameBytes(), cur, snapshot); @@ -265,7 +264,7 @@ private static DirectoryListing getSnapshotsListing( listing[i] = createFileStatus( fsd, sRoot.getLocalNameBytes(), sRoot, nodeAttrs, - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, INodesInPath.fromINode(sRoot)); } @@ -293,8 +292,8 @@ static HdfsFileStatus getFileInfo( } byte policyId = includeStoragePolicy && !i.isSymlink() ? - i.getStoragePolicyID() : HdfsConstantsClient - .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + i.getStoragePolicyID() : + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; INodeAttributes nodeAttrs = getINodeAttributes( fsd, path, HdfsFileStatus.EMPTY_NAME, i, src.getPathSnapshotId()); return createFileStatus( @@ -316,7 +315,7 @@ static HdfsFileStatus getFileInfo( if (fsd.getINode4DotSnapshot(srcs) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED); + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED); } return null; } @@ -330,7 +329,6 @@ static HdfsFileStatus getFileInfo( } } - /** * create an hdfs file status from an inode * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 0189f63117..bf538ed21c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; @@ -60,6 +59,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.util.ByteArray; @@ -742,7 +742,7 @@ public EnumCounters getStorageTypeDeltas(byte storagePolicyID, EnumCounters typeSpaceDeltas = new EnumCounters(StorageType.class); // Storage type and its quota are only available when storage policy is set - if (storagePolicyID != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { + if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { BlockStoragePolicy storagePolicy = getBlockManager().getStoragePolicy(storagePolicyID); if (oldRep != newRep) { @@ -884,7 +884,7 @@ static void verifyQuota(INodesInPath iip, int pos, QuotaCounts deltas, /** Verify if the inode name is legal. */ void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException { - if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) { + if (Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) { String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name."; if (!namesystem.isImageLoaded()) { s += " Please rename it before upgrade."; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index df9c58528f..bda827aee1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -43,9 +43,9 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -159,7 +159,7 @@ private enum State { // the first txid of the log that's currently open for writing. // If this value is N, we are currently writing to edits_inprogress_N - private long curSegmentTxId = HdfsConstants.INVALID_TXID; + private long curSegmentTxId = HdfsServerConstants.INVALID_TXID; // the time of printing the statistics to the log file. private long lastPrintTime; @@ -1338,7 +1338,7 @@ public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) { return; } - assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op + assert curSegmentTxId == HdfsServerConstants.INVALID_TXID || // on format this is no-op minTxIdToKeep <= curSegmentTxId : "cannot purge logs older than txid " + minTxIdToKeep + " when current segment starts at " + curSegmentTxId; @@ -1602,7 +1602,7 @@ private void checkForGaps(List streams, long fromTxId, EditLogInputStream elis = iter.next(); if (elis.getFirstTxId() > txId) break; long next = elis.getLastTxId(); - if (next == HdfsConstants.INVALID_TXID) { + if (next == HdfsServerConstants.INVALID_TXID) { if (!inProgressOk) { throw new RuntimeException("inProgressOk = false, but " + "selectInputStreams returned an in-progress edit " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index d234cf22eb..eaa2e77c76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -35,16 +35,16 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.XAttrSetFlag; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; @@ -302,7 +302,7 @@ private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion, long lastInodeId) throws IOException { long inodeId = inodeIdFromOp; - if (inodeId == HdfsConstantsClient.GRANDFATHER_INODE_ID) { + if (inodeId == HdfsConstants.GRANDFATHER_INODE_ID) { if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_INODE_ID, logVersion)) { throw new IOException("The layout version " + logVersion @@ -322,7 +322,7 @@ private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion, @SuppressWarnings("deprecation") private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, StartupOption startOpt, int logVersion, long lastInodeId) throws IOException { - long inodeId = HdfsConstantsClient.GRANDFATHER_INODE_ID; + long inodeId = HdfsConstants.GRANDFATHER_INODE_ID; if (LOG.isTraceEnabled()) { LOG.trace("replaying edit log: " + op); } @@ -380,7 +380,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile, - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, stat); @@ -400,7 +400,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile, - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat)); @@ -473,7 +473,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file, - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId, appendOp.rpcCallId, new LastBlockWithStatus(lb, stat)); @@ -1095,7 +1095,7 @@ private void check203UpgradeFailure(int logVersion, Throwable e) // The editlog must be emptied by restarting the namenode, before proceeding // with the upgrade. if (Storage.is203LayoutVersion(logVersion) - && logVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) { + && logVersion != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) { String msg = "During upgrade failed to load the editlog version " + logVersion + " from release 0.20.203. Please go back to the old " + " release and restart the namenode. This empties the editlog " @@ -1112,7 +1112,7 @@ private void check203UpgradeFailure(int logVersion, Throwable e) */ static EditLogValidation validateEditLog(EditLogInputStream in) { long lastPos = 0; - long lastTxId = HdfsConstants.INVALID_TXID; + long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; FSEditLogOp op = null; while (true) { @@ -1129,7 +1129,7 @@ static EditLogValidation validateEditLog(EditLogInputStream in) { FSImage.LOG.warn("After resync, position is " + in.getPosition()); continue; } - if (lastTxId == HdfsConstants.INVALID_TXID + if (lastTxId == HdfsServerConstants.INVALID_TXID || op.getTransactionId() > lastTxId) { lastTxId = op.getTransactionId(); } @@ -1140,7 +1140,7 @@ static EditLogValidation validateEditLog(EditLogInputStream in) { static EditLogValidation scanEditLog(EditLogInputStream in) { long lastPos = 0; - long lastTxId = HdfsConstants.INVALID_TXID; + long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; FSEditLogOp op = null; while (true) { @@ -1157,7 +1157,7 @@ static EditLogValidation scanEditLog(EditLogInputStream in) { FSImage.LOG.warn("After resync, position is " + in.getPosition()); continue; } - if (lastTxId == HdfsConstants.INVALID_TXID + if (lastTxId == HdfsServerConstants.INVALID_TXID || op.getTransactionId() > lastTxId) { lastTxId = op.getTransactionId(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 0daf12fd82..c8e565e2c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -101,13 +101,13 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; @@ -148,7 +148,7 @@ public abstract class FSEditLogOp { int rpcCallId; final void reset() { - txid = HdfsConstants.INVALID_TXID; + txid = HdfsServerConstants.INVALID_TXID; rpcClientId = RpcConstants.DUMMY_CLIENT_ID; rpcCallId = RpcConstants.INVALID_CALL_ID; resetSubFields(); @@ -241,16 +241,16 @@ protected FSEditLogOp(FSEditLogOpCodes opCode) { } public long getTransactionId() { - Preconditions.checkState(txid != HdfsConstants.INVALID_TXID); + Preconditions.checkState(txid != HdfsServerConstants.INVALID_TXID); return txid; } public String getTransactionIdStr() { - return (txid == HdfsConstants.INVALID_TXID) ? "(none)" : "" + txid; + return (txid == HdfsServerConstants.INVALID_TXID) ? "(none)" : "" + txid; } public boolean hasTransactionId() { - return (txid != HdfsConstants.INVALID_TXID); + return (txid != HdfsServerConstants.INVALID_TXID); } public void setTransactionId(long txid) { @@ -433,7 +433,7 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin private AddCloseOp(FSEditLogOpCodes opCode) { super(opCode); - storagePolicyId = HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND); } @@ -577,7 +577,7 @@ void readFields(DataInputStream in, int logVersion) this.inodeId = in.readLong(); } else { // The inodeId should be updated when this editLogOp is applied - this.inodeId = HdfsConstantsClient.GRANDFATHER_INODE_ID; + this.inodeId = HdfsConstants.GRANDFATHER_INODE_ID; } if ((-17 < logVersion && length != 4) || (logVersion <= -17 && length != 5 && !NameNodeLayoutVersion.supports( @@ -635,7 +635,7 @@ void readFields(DataInputStream in, int logVersion) NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) { this.storagePolicyId = FSImageSerialization.readByte(in); } else { - this.storagePolicyId = HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + this.storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; } // read clientId and callId readRpcIds(in, logVersion); @@ -1652,7 +1652,7 @@ void readFields(DataInputStream in, int logVersion) throws IOException { this.inodeId = FSImageSerialization.readLong(in); } else { // This id should be updated when this editLogOp is applied - this.inodeId = HdfsConstantsClient.GRANDFATHER_INODE_ID; + this.inodeId = HdfsConstants.GRANDFATHER_INODE_ID; } this.path = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( @@ -2545,7 +2545,7 @@ void readFields(DataInputStream in, int logVersion) this.inodeId = FSImageSerialization.readLong(in); } else { // This id should be updated when the editLogOp is applied - this.inodeId = HdfsConstantsClient.GRANDFATHER_INODE_ID; + this.inodeId = HdfsConstants.GRANDFATHER_INODE_ID; } this.path = FSImageSerialization.readString(in); this.value = FSImageSerialization.readString(in); @@ -4689,7 +4689,7 @@ private FSEditLogOp decodeOp() throws IOException { // Read the txid op.setTransactionId(in.readLong()); } else { - op.setTransactionId(HdfsConstants.INVALID_TXID); + op.setTransactionId(HdfsServerConstants.INVALID_TXID); } op.readFields(in, logVersion); @@ -4712,13 +4712,13 @@ public long scanOp() throws IOException { try { opCodeByte = in.readByte(); // op code } catch (EOFException e) { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte); if (opCode == OP_INVALID) { verifyTerminator(); - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } int length = in.readInt(); // read the length of the op @@ -4730,7 +4730,7 @@ public long scanOp() throws IOException { return txid; } else { FSEditLogOp op = decodeOp(); - return op == null ? HdfsConstants.INVALID_TXID : op.getTransactionId(); + return op == null ? HdfsServerConstants.INVALID_TXID : op.getTransactionId(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 433b05a28b..6a54967d7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -44,9 +44,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -230,7 +230,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, if (startOpt == StartupOption.METADATAVERSION) { System.out.println("HDFS Image Version: " + layoutVersion); System.out.println("Software format version: " + - HdfsConstants.NAMENODE_LAYOUT_VERSION); + HdfsServerConstants.NAMENODE_LAYOUT_VERSION); return false; } @@ -241,11 +241,11 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, && startOpt != StartupOption.UPGRADEONLY && !RollingUpgradeStartupOption.STARTED.matches(startOpt) && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION - && layoutVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) { + && layoutVersion != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) { throw new IOException( "\nFile system image contains an old layout version " + storage.getLayoutVersion() + ".\nAn upgrade to version " - + HdfsConstants.NAMENODE_LAYOUT_VERSION + " is required.\n" + + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + " is required.\n" + "Please restart NameNode with the \"" + RollingUpgradeStartupOption.STARTED.getOptionString() + "\" option if a rolling upgrade is already started;" @@ -397,7 +397,7 @@ void doUpgrade(FSNamesystem target) throws IOException { long oldCTime = storage.getCTime(); storage.cTime = now(); // generate new cTime for the state int oldLV = storage.getLayoutVersion(); - storage.layoutVersion = HdfsConstants.NAMENODE_LAYOUT_VERSION; + storage.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION; List errorSDs = Collections.synchronizedList(new ArrayList()); @@ -458,11 +458,11 @@ void doRollback(FSNamesystem fsns) throws IOException { boolean canRollback = false; FSImage prevState = new FSImage(conf); try { - prevState.getStorage().layoutVersion = HdfsConstants.NAMENODE_LAYOUT_VERSION; + prevState.getStorage().layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION; for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); if (!NNUpgradeUtil.canRollBack(sd, storage, prevState.getStorage(), - HdfsConstants.NAMENODE_LAYOUT_VERSION)) { + HdfsServerConstants.NAMENODE_LAYOUT_VERSION)) { continue; } LOG.info("Can perform rollback for " + sd); @@ -473,7 +473,7 @@ void doRollback(FSNamesystem fsns) throws IOException { // If HA is enabled, check if the shared log can be rolled back as well. editLog.initJournalsForWrite(); boolean canRollBackSharedEditLog = editLog.canRollBackSharedLog( - prevState.getStorage(), HdfsConstants.NAMENODE_LAYOUT_VERSION); + prevState.getStorage(), HdfsServerConstants.NAMENODE_LAYOUT_VERSION); if (canRollBackSharedEditLog) { LOG.info("Can perform rollback for shared edit log."); canRollback = true; @@ -839,7 +839,7 @@ private long loadEdits(Iterable editStreams, lastAppliedTxId = loader.getLastAppliedTxId(); } // If we are in recovery mode, we may have skipped over some txids. - if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) { + if (editIn.getLastTxId() != HdfsServerConstants.INVALID_TXID) { lastAppliedTxId = editIn.getLastTxId(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 906213e119..d1d007fd2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; @@ -1046,10 +1047,10 @@ public Snapshot getSnapshot(DataInput in) throws IOException { @VisibleForTesting public static void useDefaultRenameReservedPairs() { renameReservedMap.clear(); - for (String key: HdfsConstants.RESERVED_PATH_COMPONENTS) { + for (String key: HdfsServerConstants.RESERVED_PATH_COMPONENTS) { renameReservedMap.put( key, - key + "." + HdfsConstants.NAMENODE_LAYOUT_VERSION + "." + key + "." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + "." + "UPGRADE_RENAMED"); } } @@ -1147,7 +1148,7 @@ private static byte[] renameReservedComponentOnUpgrade(byte[] component, final int layoutVersion) { // If the LV doesn't support snapshots, we're doing an upgrade if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) { - if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) { + if (Arrays.equals(component, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES)) { Preconditions.checkArgument( renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR), RESERVED_ERROR_MSG); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 3ee848af24..24afcae4e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -42,11 +42,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection; @@ -143,7 +143,7 @@ public static final class Loader implements FSImageFormat.AbstractLoader { private long imgTxId; /** * Whether the image's layout version must be the same with - * {@link HdfsConstants#NAMENODE_LAYOUT_VERSION}. This is only set to true + * {@link HdfsServerConstants#NAMENODE_LAYOUT_VERSION}. This is only set to true * when we're doing (rollingUpgrade rollback). */ private final boolean requireSameLayoutVersion; @@ -192,10 +192,10 @@ private void loadInternal(RandomAccessFile raFile, FileInputStream fin) } FileSummary summary = FSImageUtil.loadSummary(raFile); if (requireSameLayoutVersion && summary.getLayoutVersion() != - HdfsConstants.NAMENODE_LAYOUT_VERSION) { + HdfsServerConstants.NAMENODE_LAYOUT_VERSION) { throw new IOException("Image version " + summary.getLayoutVersion() + " is not equal to the software version " + - HdfsConstants.NAMENODE_LAYOUT_VERSION); + HdfsServerConstants.NAMENODE_LAYOUT_VERSION); } FileChannel channel = fin.getChannel(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java index d5243e0e30..3cd5ae1aee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java @@ -34,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; @@ -180,7 +180,7 @@ List getLatestImages() throws IOException { FSImageFile file = new FSImageFile(latestNameSD, NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE), - HdfsConstants.INVALID_TXID); + HdfsServerConstants.INVALID_TXID); LinkedList ret = new LinkedList(); ret.add(file); return ret; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java index cb1351bb11..dfda34ebef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java @@ -21,9 +21,9 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; /** @@ -69,7 +69,7 @@ static class FSImageFile { private final File file; FSImageFile(StorageDirectory sd, File file, long txId) { - assert txId >= 0 || txId == HdfsConstants.INVALID_TXID + assert txId >= 0 || txId == HdfsServerConstants.INVALID_TXID : "Invalid txid on " + file +": " + txId; this.sd = sd; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 809d5945e3..72e1d32eca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -184,7 +184,6 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; @@ -212,6 +211,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; @@ -1021,7 +1021,7 @@ private void loadFSImage(StartupOption startOpt) throws IOException { private void updateStorageVersionForRollingUpgrade(final long layoutVersion, StartupOption startOpt) throws IOException { boolean rollingStarted = RollingUpgradeStartupOption.STARTED - .matches(startOpt) && layoutVersion > HdfsConstants + .matches(startOpt) && layoutVersion > HdfsServerConstants .NAMENODE_LAYOUT_VERSION; boolean rollingRollback = RollingUpgradeStartupOption.ROLLBACK .matches(startOpt); @@ -3234,7 +3234,7 @@ FileState analyzeFileState(String src, Block previousBlock = ExtendedBlock.getLocalBlock(previous); final INode inode; final INodesInPath iip; - if (fileId == HdfsConstantsClient.GRANDFATHER_INODE_ID) { + if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. // In this case, we have to try to resolve the path and hope it // hasn't changed or been deleted since the file was opened for write. @@ -3354,7 +3354,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, //check lease final INode inode; - if (fileId == HdfsConstantsClient.GRANDFATHER_INODE_ID) { + if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. // In this case, we have to try to resolve the path and hope it // hasn't changed or been deleted since the file was opened for write. @@ -3411,7 +3411,7 @@ boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder) final INode inode; final INodesInPath iip; - if (fileId == HdfsConstantsClient.GRANDFATHER_INODE_ID) { + if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. // In this case, we have to try to resolve the path and hope it // hasn't changed or been deleted since the file was opened for write. @@ -3530,7 +3530,7 @@ private boolean completeFileInternal(String src, String holder, Block last, final INodesInPath iip; INode inode = null; try { - if (fileId == HdfsConstantsClient.GRANDFATHER_INODE_ID) { + if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. // In this case, we have to try to resolve the path and hope it // hasn't changed or been deleted since the file was opened for write. @@ -3981,7 +3981,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength) checkNameNodeSafeMode("Cannot fsync file " + src); src = dir.resolvePath(pc, src, pathComponents); final INode inode; - if (fileId == HdfsConstantsClient.GRANDFATHER_INODE_ID) { + if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. // In this case, we have to try to resolve the path and hope it // hasn't changed or been deleted since the file was opened for write. @@ -4439,7 +4439,8 @@ String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBl //remove lease, close file finalizeINodeFileUnderConstruction(src, pendingFile, - Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID)); + Snapshot.findLatestSnapshot(pendingFile, + Snapshot.CURRENT_STATE_ID)); return src; } @@ -5731,7 +5732,7 @@ public void setBlockTotal() { SafeModeInfo safeMode = this.safeMode; if (safeMode == null) return; - safeMode.setBlockTotal((int)getCompleteBlocksTotal()); + safeMode.setBlockTotal((int) getCompleteBlocksTotal()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index b760dd80ec..ebd747549e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -32,7 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -286,7 +286,7 @@ private static List matchEditLogs(File[] filesInStorage, try { long startTxId = Long.parseLong(inProgressEditsMatch.group(1)); ret.add( - new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, true)); + new EditLogFile(f, startTxId, HdfsServerConstants.INVALID_TXID, true)); continue; } catch (NumberFormatException nfe) { LOG.error("In-progress edits file " + f + " has improperly " + @@ -301,7 +301,7 @@ private static List matchEditLogs(File[] filesInStorage, if (staleInprogressEditsMatch.matches()) { try { long startTxId = Long.parseLong(staleInprogressEditsMatch.group(1)); - ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, + ret.add(new EditLogFile(f, startTxId, HdfsServerConstants.INVALID_TXID, true)); continue; } catch (NumberFormatException nfe) { @@ -348,7 +348,7 @@ static void addStreamsToCollectionFromFiles(Collection elfs, } } if (elf.lastTxId < fromTxId) { - assert elf.lastTxId != HdfsConstants.INVALID_TXID; + assert elf.lastTxId != HdfsServerConstants.INVALID_TXID; if (LOG.isDebugEnabled()) { LOG.debug("passing over " + elf + " because it ends at " + elf.lastTxId + ", but we only care about transactions " + @@ -391,7 +391,7 @@ synchronized public void recoverUnfinalizedSegments() throws IOException { throw new CorruptionException("In-progress edit log file is corrupt: " + elf); } - if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) { + if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) { // If the file has a valid header (isn't corrupt) but contains no // transactions, we likely just crashed after opening the file and // writing the header, but before syncing any transactions. Safe to @@ -480,19 +480,19 @@ public int compare(EditLogFile a, EditLogFile b) { EditLogFile(File file, long firstTxId, long lastTxId) { this(file, firstTxId, lastTxId, false); - assert (lastTxId != HdfsConstants.INVALID_TXID) + assert (lastTxId != HdfsServerConstants.INVALID_TXID) && (lastTxId >= firstTxId); } EditLogFile(File file, long firstTxId, long lastTxId, boolean isInProgress) { - assert (lastTxId == HdfsConstants.INVALID_TXID && isInProgress) - || (lastTxId != HdfsConstants.INVALID_TXID && lastTxId >= firstTxId); - assert (firstTxId > 0) || (firstTxId == HdfsConstants.INVALID_TXID); + assert (lastTxId == HdfsServerConstants.INVALID_TXID && isInProgress) + || (lastTxId != HdfsServerConstants.INVALID_TXID && lastTxId >= firstTxId); + assert (firstTxId > 0) || (firstTxId == HdfsServerConstants.INVALID_TXID); assert file != null; Preconditions.checkArgument(!isInProgress || - lastTxId == HdfsConstants.INVALID_TXID); + lastTxId == HdfsServerConstants.INVALID_TXID); this.firstTxId = firstTxId; this.lastTxId = lastTxId; @@ -552,7 +552,7 @@ void moveAsideTrashFile(long markerTxid) throws IOException { } public void moveAsideEmptyFile() throws IOException { - assert lastTxId == HdfsConstants.INVALID_TXID; + assert lastTxId == HdfsServerConstants.INVALID_TXID; renameSelf(".empty"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index cf0325e49f..409967eecb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -511,7 +510,7 @@ public final boolean isQuotaSet() { */ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { final byte storagePolicyId = isSymlink() ? - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID); } @@ -555,7 +554,7 @@ public abstract QuotaCounts computeQuotaUsage( public final QuotaCounts computeQuotaUsage( BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache) { final byte storagePolicyId = isSymlink() ? - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, counts, useCache, Snapshot.CURRENT_STATE_ID); } @@ -712,7 +711,7 @@ public final INode setAccessTime(long accessTime, int latestSnapshotId) { /** * @return the storage policy directly specified on the INode. Return - * {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} if no policy has + * {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} if no policy has * been specified. */ public abstract byte getLocalStoragePolicyID(); @@ -721,13 +720,13 @@ public final INode setAccessTime(long accessTime, int latestSnapshotId) { * Get the storage policy ID while computing quota usage * @param parentStoragePolicyId the storage policy ID of the parent directory * @return the storage policy ID of this INode. Note that for an - * {@link INodeSymlink} we return {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} + * {@link INodeSymlink} we return {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} * instead of throwing Exception */ public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) { byte localId = isSymlink() ? - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getLocalStoragePolicyID(); - return localId != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getLocalStoragePolicyID(); + return localId != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? localId : parentStoragePolicyId; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 12fa7aaee0..098594da6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -45,7 +45,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; -import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; /** * Directory INode class. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index e9d3644c0b..110bd71145 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java index 5344ca7ac9..00b33cd06a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java @@ -20,7 +20,7 @@ import java.io.FileNotFoundException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.SequentialNumber; /** @@ -44,7 +44,7 @@ public class INodeId extends SequentialNumber { */ public static void checkId(long requestId, INode inode) throws FileNotFoundException { - if (requestId != HdfsConstantsClient.GRANDFATHER_INODE_ID && requestId != inode.getId()) { + if (requestId != HdfsConstants.GRANDFATHER_INODE_ID && requestId != inode.getId()) { throw new FileNotFoundException( "ID mismatch. Request id and saved id: " + requestId + " , " + inode.getId() + " for file " + inode.getFullPathName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java index 9a1e1f4ec6..7b1332b910 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; @@ -124,12 +124,12 @@ public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, @Override public byte getStoragePolicyID(){ - return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + return HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; } @Override public byte getLocalStoragePolicyID() { - return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + return HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index f1892c5e87..72ca6ff683 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -49,7 +50,7 @@ public class INodesInPath { */ private static boolean isDotSnapshotDir(byte[] pathComponent) { return pathComponent != null && - Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); + Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); } static INodesInPath fromINode(INode inode) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 9ce8ebc650..c6a92beb2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -36,7 +36,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.Daemon; @@ -72,8 +71,8 @@ public class LeaseManager { private final FSNamesystem fsnamesystem; - private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; - private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD; + private long softLimit = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD; + private long hardLimit = HdfsServerConstants.LEASE_HARDLIMIT_PERIOD; // // Used for handling lock-leases diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index dbb2c50da9..26a13bd653 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -37,7 +37,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; @@ -129,7 +128,7 @@ public boolean isOfType(StorageDirType type) { * recent fsimage file. This does not include any transactions * that have since been written to the edit log. */ - protected volatile long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID; + protected volatile long mostRecentCheckpointTxId = HdfsServerConstants.INVALID_TXID; /** * Time of the last checkpoint, in milliseconds since the epoch. @@ -558,7 +557,7 @@ private void format(StorageDirectory sd) throws IOException { */ public void format(NamespaceInfo nsInfo) throws IOException { Preconditions.checkArgument(nsInfo.getLayoutVersion() == 0 || - nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION, + nsInfo.getLayoutVersion() == HdfsServerConstants.NAMENODE_LAYOUT_VERSION, "Bad layout version: %s", nsInfo.getLayoutVersion()); this.setStorageInfo(nsInfo); @@ -577,7 +576,7 @@ public static NamespaceInfo newNamespaceInfo() } public void format() throws IOException { - this.layoutVersion = HdfsConstants.NAMENODE_LAYOUT_VERSION; + this.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION; for (Iterator it = dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); @@ -634,7 +633,7 @@ void readProperties(StorageDirectory sd, StartupOption startupOption) "storage directory " + sd.getRoot().getAbsolutePath()); } props.setProperty("layoutVersion", - Integer.toString(HdfsConstants.NAMENODE_LAYOUT_VERSION)); + Integer.toString(HdfsServerConstants.NAMENODE_LAYOUT_VERSION)); } setFieldsFromProperties(props, sd); } @@ -657,7 +656,7 @@ private void setDeprecatedPropertiesForUpgrade(Properties props) { * This should only be used during upgrades. */ String getDeprecatedProperty(String prop) { - assert getLayoutVersion() > HdfsConstants.NAMENODE_LAYOUT_VERSION : + assert getLayoutVersion() > HdfsServerConstants.NAMENODE_LAYOUT_VERSION : "getDeprecatedProperty should only be done when loading " + "storage from past versions during upgrade."; return deprecatedProperties.get(prop); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 132b93e1c1..979378aa84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -486,7 +486,7 @@ public static InetSocketAddress getAddress(URI filesystemURI) { public static URI getUri(InetSocketAddress namenode) { int port = namenode.getPort(); String portString = port == DEFAULT_PORT ? "" : (":"+port); - return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + namenode.getHostName()+portString); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 83686e0263..331160920b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_DEPTH; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_LENGTH; import static org.apache.hadoop.util.Time.now; import java.io.FileNotFoundException; @@ -87,7 +87,6 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSLimitException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -117,6 +116,7 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; @@ -1500,9 +1500,9 @@ public synchronized HAServiceStatus getServiceStatus() * @throws IOException on layout version mismatch */ void verifyLayoutVersion(int version) throws IOException { - if (version != HdfsConstants.NAMENODE_LAYOUT_VERSION) + if (version != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) throw new IncorrectVersionException( - HdfsConstants.NAMENODE_LAYOUT_VERSION, version, "data node"); + HdfsServerConstants.NAMENODE_LAYOUT_VERSION, version, "data node"); } private void verifySoftwareVersion(DatanodeRegistration dnReg) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java index 674a957449..33be8b0140 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java @@ -23,7 +23,7 @@ import java.util.Comparator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; @@ -88,8 +88,8 @@ static private enum State { RedundantEditLogInputStream(Collection streams, long startTxId) { this.curIdx = 0; - this.prevTxId = (startTxId == HdfsConstants.INVALID_TXID) ? - HdfsConstants.INVALID_TXID : (startTxId - 1); + this.prevTxId = (startTxId == HdfsServerConstants.INVALID_TXID) ? + HdfsServerConstants.INVALID_TXID : (startTxId - 1); this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL; this.prevException = null; // EditLogInputStreams in a RedundantEditLogInputStream must be finalized, @@ -97,9 +97,9 @@ static private enum State { EditLogInputStream first = null; for (EditLogInputStream s : streams) { Preconditions.checkArgument(s.getFirstTxId() != - HdfsConstants.INVALID_TXID, "invalid first txid in stream: %s", s); + HdfsServerConstants.INVALID_TXID, "invalid first txid in stream: %s", s); Preconditions.checkArgument(s.getLastTxId() != - HdfsConstants.INVALID_TXID, "invalid last txid in stream: %s", s); + HdfsServerConstants.INVALID_TXID, "invalid last txid in stream: %s", s); if (first == null) { first = s; } else { @@ -172,7 +172,7 @@ protected FSEditLogOp nextOp() throws IOException { switch (state) { case SKIP_UNTIL: try { - if (prevTxId != HdfsConstants.INVALID_TXID) { + if (prevTxId != HdfsServerConstants.INVALID_TXID) { LOG.info("Fast-forwarding stream '" + streams[curIdx].getName() + "' to transaction ID " + (prevTxId + 1)); streams[curIdx].skipUntil(prevTxId + 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 0d327580c2..041c3cb9ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -44,7 +44,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; @@ -336,7 +336,7 @@ public static void copyFileToStream(OutputStream out, File localfile, private static void copyFileToStream(OutputStream out, File localfile, FileInputStream infile, DataTransferThrottler throttler, Canceler canceler) throws IOException { - byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; + byte buf[] = new byte[HdfsServerConstants.IO_FILE_BUFFER_SIZE]; try { CheckpointFaultInjector.getInstance() .aboutToSendFile(localfile); @@ -345,7 +345,7 @@ private static void copyFileToStream(OutputStream out, File localfile, shouldSendShortFile(localfile)) { // Test sending image shorter than localfile long len = localfile.length(); - buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; + buf = new byte[(int)Math.min(len/2, HdfsServerConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); @@ -510,7 +510,7 @@ private static MD5Hash receiveFile(String url, List localPaths, } int num = 1; - byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; + byte[] buf = new byte[HdfsServerConstants.IO_FILE_BUFFER_SIZE]; while (num > 0) { num = stream.read(buf); if (num > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 9d8f2f819f..0accf5307c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.NameNodeProxies; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -167,7 +167,7 @@ private int doRun() throws IOException { if (!checkLayoutVersion(nsInfo)) { LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion() + ") does not match " + "this node's layout version (" - + HdfsConstants.NAMENODE_LAYOUT_VERSION + ")"); + + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ")"); return ERR_CODE_INVALID_VERSION; } @@ -366,7 +366,7 @@ private boolean checkLogsAvailableForRead(FSImage image, long imageTxId, } private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException { - return (nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION); + return (nsInfo.getLayoutVersion() == HdfsServerConstants.NAMENODE_LAYOUT_VERSION); } private void parseConfAndFindOtherNN() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index 1897d8db7c..38aa358726 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -31,9 +31,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.EditLogInputException; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.FSEditLog; @@ -75,12 +75,12 @@ public class EditLogTailer { /** * The last transaction ID at which an edit log roll was initiated. */ - private long lastRollTriggerTxId = HdfsConstants.INVALID_TXID; + private long lastRollTriggerTxId = HdfsServerConstants.INVALID_TXID; /** * The highest transaction ID loaded by the Standby. */ - private long lastLoadedTxnId = HdfsConstants.INVALID_TXID; + private long lastLoadedTxnId = HdfsServerConstants.INVALID_TXID; /** * The last time we successfully loaded a non-zero number of edits from the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java index 7baf7dcbb9..c4406a0f55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java @@ -21,7 +21,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.AclFeature; @@ -151,7 +151,7 @@ public QuotaCounts updateQuotaAndCollectBlocks(BlockStoragePolicySuite bsps, INo BlockStoragePolicy bsp = null; EnumCounters typeSpaces = new EnumCounters(StorageType.class); - if (storagePolicyID != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { + if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { bsp = bsps.getPolicy(file.getStoragePolicyID()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java index faaf8f4e4f..d06d0dbe8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java @@ -20,14 +20,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @InterfaceAudience.Private @InterfaceStability.Evolving public class NNHAStatusHeartbeat { private final HAServiceState state; - private long txid = HdfsConstants.INVALID_TXID; + private long txid = HdfsServerConstants.INVALID_TXID; public NNHAStatusHeartbeat(HAServiceState state, long txid) { this.state = state; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index a7439a0e17..dfdf449e7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; @@ -83,7 +83,7 @@ public NamespaceInfo(int nsID, String clusterID, String bpID, public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, String buildVersion, String softwareVersion, long capabilities) { - super(HdfsConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT, + super(HdfsServerConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT, NodeType.NAME_NODE); blockPoolID = bpID; this.buildVersion = buildVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java index 4b191f29f1..1d26bc4983 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; - import com.google.common.base.Function; import com.google.common.collect.ComparisonChain; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; public class RemoteEditLog implements Comparable { - private long startTxId = HdfsConstants.INVALID_TXID; - private long endTxId = HdfsConstants.INVALID_TXID; + private long startTxId = HdfsServerConstants.INVALID_TXID; + private long endTxId = HdfsServerConstants.INVALID_TXID; private boolean isInProgress = false; public RemoteEditLog() { @@ -33,7 +32,7 @@ public RemoteEditLog() { public RemoteEditLog(long startTxId, long endTxId) { this.startTxId = startTxId; this.endTxId = endTxId; - this.isInProgress = (endTxId == HdfsConstants.INVALID_TXID); + this.isInProgress = (endTxId == HdfsServerConstants.INVALID_TXID); } public RemoteEditLog(long startTxId, long endTxId, boolean inProgress) { @@ -90,7 +89,7 @@ public int hashCode() { @Override public Long apply(RemoteEditLog log) { if (null == log) { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } return log.getStartTxId(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java index ba1fd0f961..e3bdffaa4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.tools.TableListing; import org.apache.hadoop.util.StringUtils; @@ -151,7 +151,7 @@ public int run(Configuration conf, List args) throws IOException { return 2; } byte storagePolicyId = status.getStoragePolicy(); - if (storagePolicyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { + if (storagePolicyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { System.out.println("The storage policy of " + path + " is unspecified"); return 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java index 0ce1e78614..73d179814b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java @@ -22,8 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; @@ -49,8 +48,8 @@ static OfflineEditsLoader createLoader(OfflineEditsVisitor visitor, OfflineEditsLoader loader = null; try { file = new File(inputFileName); - elis = new EditLogFileInputStream(file, HdfsConstants.INVALID_TXID, - HdfsConstants.INVALID_TXID, false); + elis = new EditLogFileInputStream(file, HdfsServerConstants.INVALID_TXID, + HdfsServerConstants.INVALID_TXID, false); loader = new OfflineEditsBinaryLoader(visitor, elis, flags); } finally { if ((loader == null) && (elis != null)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index 3693239c79..f2c7427b51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -687,7 +687,7 @@ private void processINode(DataInputStream in, ImageVisitor v, final String pathName = readINodePath(in, parentName); v.visit(ImageElement.INODE_PATH, pathName); - long inodeId = HdfsConstantsClient.GRANDFATHER_INODE_ID; + long inodeId = HdfsConstants.GRANDFATHER_INODE_ID; if (supportInodeId) { inodeId = in.readLong(); v.visit(ImageElement.INODE_ID, inodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java index c98ba66f38..6d7ef5548b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; @@ -259,7 +260,7 @@ public void testSetReplication() throws IOException { public void testCreateLinkMaxPathLink() throws IOException { Path dir = new Path(testBaseDir1()); Path file = new Path(testBaseDir1(), "file"); - final int maxPathLen = HdfsConstants.MAX_PATH_LENGTH; + final int maxPathLen = HdfsServerConstants.MAX_PATH_LENGTH; final int dirLen = dir.toString().length() + 1; int len = maxPathLen - dirLen; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 681e9bd53b..a8df9917d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -950,7 +951,7 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request @@ -1222,7 +1223,7 @@ public static void runOperations(MiniDFSCluster cluster, s2.close(); // OP_SET_STORAGE_POLICY 45 filesystem.setStoragePolicy(pathFileCreate, - HdfsConstants.HOT_STORAGE_POLICY_NAME); + HdfsServerConstants.HOT_STORAGE_POLICY_NAME); // OP_RENAME_OLD 1 final Path pathFileMoved = new Path("/file_moved"); filesystem.rename(pathFileCreate, pathFileMoved); @@ -1689,8 +1690,7 @@ public static void addDataNodeLayoutVersion(final int lv, final String descripti modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); field.setInt(null, lv); - // Override {@link HdfsConstants#DATANODE_LAYOUT_VERSION} - field = HdfsConstants.class.getField("DATANODE_LAYOUT_VERSION"); + field = HdfsServerConstants.class.getField("DATANODE_LAYOUT_VERSION"); field.setAccessible(true); modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); field.setInt(null, lv); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 89c8e11945..9621dc8869 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; import java.io.File; import java.io.FileNotFoundException; @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.*; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -67,12 +68,12 @@ public class TestBlockStoragePolicy { static final long FILE_LEN = 1024; static final short REPLICATION = 3; - static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID; - static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID; - static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID; - static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID; - static final byte ALLSSD = HdfsConstants.ALLSSD_STORAGE_POLICY_ID; - static final byte LAZY_PERSIST = HdfsConstants.MEMORY_STORAGE_POLICY_ID; + static final byte COLD = HdfsServerConstants.COLD_STORAGE_POLICY_ID; + static final byte WARM = HdfsServerConstants.WARM_STORAGE_POLICY_ID; + static final byte HOT = HdfsServerConstants.HOT_STORAGE_POLICY_ID; + static final byte ONESSD = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID; + static final byte ALLSSD = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID; + static final byte LAZY_PERSIST = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID; @Test (timeout=300000) public void testConfigKeyEnabled() throws IOException { @@ -83,7 +84,7 @@ public void testConfigKeyEnabled() throws IOException { try { cluster.waitActive(); cluster.getFileSystem().setStoragePolicy(new Path("/"), - HdfsConstants.COLD_STORAGE_POLICY_NAME); + HdfsServerConstants.COLD_STORAGE_POLICY_NAME); } finally { cluster.shutdown(); } @@ -103,7 +104,7 @@ public void testConfigKeyDisabled() throws IOException { try { cluster.waitActive(); cluster.getFileSystem().setStoragePolicy(new Path("/"), - HdfsConstants.COLD_STORAGE_POLICY_NAME); + HdfsServerConstants.COLD_STORAGE_POLICY_NAME); } finally { cluster.shutdown(); } @@ -860,15 +861,15 @@ public void testSetStoragePolicy() throws Exception { final Path invalidPath = new Path("/invalidPath"); try { - fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME); + fs.setStoragePolicy(invalidPath, HdfsServerConstants.WARM_STORAGE_POLICY_NAME); Assert.fail("Should throw a FileNotFoundException"); } catch (FileNotFoundException e) { GenericTestUtils.assertExceptionContains(invalidPath.toString(), e); } - fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME); - fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME); - fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooFile, HdfsServerConstants.COLD_STORAGE_POLICY_NAME); + fs.setStoragePolicy(barDir, HdfsServerConstants.WARM_STORAGE_POLICY_NAME); + fs.setStoragePolicy(barFile2, HdfsServerConstants.HOT_STORAGE_POLICY_NAME); dirList = fs.getClient().listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME).getPartialListing(); @@ -916,7 +917,7 @@ public void testSetStoragePolicyWithSnapshot() throws Exception { DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L); DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L); - fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooDir, HdfsServerConstants.WARM_STORAGE_POLICY_NAME); HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME, true).getPartialListing(); @@ -928,7 +929,7 @@ public void testSetStoragePolicyWithSnapshot() throws Exception { // take snapshot SnapshotTestHelper.createSnapshot(fs, dir, "s1"); // change the storage policy of fooFile1 - fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooFile1, HdfsServerConstants.COLD_STORAGE_POLICY_NAME); fooList = fs.getClient().listPaths(fooDir.toString(), HdfsFileStatus.EMPTY_NAME).getPartialListing(); @@ -951,7 +952,7 @@ public void testSetStoragePolicyWithSnapshot() throws Exception { HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD); // change the storage policy of foo dir - fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME); // /dir/foo is now hot dirList = fs.getClient().listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME, true).getPartialListing(); @@ -1068,7 +1069,7 @@ private void testChangeFileRep(String policyName, byte policyId, */ @Test public void testChangeHotFileRep() throws Exception { - testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT, + testChangeFileRep(HdfsServerConstants.HOT_STORAGE_POLICY_NAME, HOT, new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK}, new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK, @@ -1082,7 +1083,7 @@ public void testChangeHotFileRep() throws Exception { */ @Test public void testChangeWarmRep() throws Exception { - testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM, + testChangeFileRep(HdfsServerConstants.WARM_STORAGE_POLICY_NAME, WARM, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE, @@ -1095,7 +1096,7 @@ public void testChangeWarmRep() throws Exception { */ @Test public void testChangeColdRep() throws Exception { - testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD, + testChangeFileRep(HdfsServerConstants.COLD_STORAGE_POLICY_NAME, COLD, new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE}, new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 68687edea1..3fe562608e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -191,7 +191,7 @@ public void testRollback() throws Exception { UpgradeUtilities.getCurrentBlockPoolID(cluster)); // Put newer layout version in current. storageInfo = new StorageInfo( - HdfsConstants.DATANODE_LAYOUT_VERSION - 1, + HdfsServerConstants.DATANODE_LAYOUT_VERSION - 1, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), @@ -277,7 +277,7 @@ public void testRollback() throws Exception { UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous"); - storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION, + storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java index 889e57973c..3a27be6104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; @@ -89,7 +89,7 @@ private static class StorageData { */ private StorageData[] initializeVersions() throws Exception { int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION; - int layoutVersionCur = HdfsConstants.DATANODE_LAYOUT_VERSION; + int layoutVersionCur = HdfsServerConstants.DATANODE_LAYOUT_VERSION; int layoutVersionNew = Integer.MIN_VALUE; int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null); int namespaceIdOld = Integer.MIN_VALUE; @@ -200,7 +200,7 @@ boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) { return false; } // check #3 - int softwareLV = HdfsConstants.DATANODE_LAYOUT_VERSION; + int softwareLV = HdfsServerConstants.DATANODE_LAYOUT_VERSION; int storedLV = datanodeVer.getLayoutVersion(); if (softwareLV == storedLV && datanodeVer.getCTime() == namenodeVer.getCTime()) @@ -252,7 +252,7 @@ public void testVersions() throws Exception { .startupOption(StartupOption.REGULAR) .build(); StorageData nameNodeVersion = new StorageData( - HdfsConstants.NAMENODE_LAYOUT_VERSION, + HdfsServerConstants.NAMENODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index f0a094ed12..544537c0fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -37,9 +37,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; @@ -307,7 +307,7 @@ public void testUpgrade() throws Exception { UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); cluster = createCluster(); baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); - storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION, + storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 15b3fb1fb3..211e6aac85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -22,9 +22,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -33,7 +33,6 @@ import org.apache.hadoop.util.VersionInfo; import org.junit.Test; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.security.Permission; @@ -180,7 +179,7 @@ public void testChangeStorageID() throws Exception { .getCTime(); StorageInfo mockStorageInfo = mock(StorageInfo.class); doReturn(nnCTime).when(mockStorageInfo).getCTime(); - doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo) + doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo) .getLayoutVersion(); DatanodeRegistration dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.getVersion()); @@ -225,7 +224,7 @@ public void testRegistrationWithDifferentSoftwareVersions() throws Exception { doReturn(nnCTime).when(mockStorageInfo).getCTime(); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); - doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn("127.0.0.1").when(mockDnReg).getIpAddr(); doReturn(123).when(mockDnReg).getXferPort(); doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid(); @@ -274,7 +273,7 @@ public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() doReturn(nnCTime).when(mockStorageInfo).getCTime(); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); - doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid(); doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index ca25018fc9..969f2b584e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -33,15 +33,14 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -111,7 +110,7 @@ private void recoverFile(final FileSystem fs) throws Exception { // set the soft limit to be 1 second so that the // namenode triggers lease recovery upon append request - cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD); + cluster.setLeasePeriod(1000, HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); // Trying recovery int tries = 60; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 0a7b71240e..f56ff9eb7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -537,7 +536,7 @@ public void testFileCreationError2() throws IOException { // add one block to the file LocatedBlock location = client.getNamenode().addBlock(file1.toString(), - client.clientName, null, null, HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null); System.out.println("testFileCreationError2: " + "Added block " + location.getBlock()); @@ -588,7 +587,7 @@ public void testFileCreationError3() throws IOException { createFile(dfs, f, 3); try { cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName, - null, null, HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + null, null, HdfsConstants.GRANDFATHER_INODE_ID, null); fail(); } catch(IOException ioe) { FileSystem.LOG.info("GOOD!", ioe); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java index 8ff86551ec..103151eaa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -278,7 +278,7 @@ public void testBlockKey() { for (int i = 0; i < blkids.length; i++) { Block b = new Block(blkids[i], 0, - HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP); + HdfsConstants.GRANDFATHER_GENERATION_STAMP); Long v = map.get(b); System.out.println(b + " => " + v); assertEquals(blkids[i], v.longValue()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 9b5a7c03a3..88dbd5e786 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -18,13 +18,10 @@ package org.apache.hadoop.hdfs; import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyList; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyShort; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyObject; -import static org.mockito.Matchers.anyShort; -import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; @@ -32,12 +29,10 @@ import java.io.DataOutputStream; import java.io.IOException; import java.security.PrivilegedExceptionAction; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; @@ -47,8 +42,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.EnumSetWritable; @@ -103,7 +98,7 @@ public void testLeaseAbort() throws Exception { // make it look like the soft limit has been exceeded. LeaseRenewer originalRenewer = dfs.getLeaseRenewer(); dfs.lastLeaseRenewal = Time.monotonicNow() - - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000; + - HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD - 1000; try { dfs.renewLease(); } catch (IOException e) {} @@ -119,7 +114,7 @@ public void testLeaseAbort() throws Exception { // make it look like the hard limit has been exceeded. dfs.lastLeaseRenewal = Time.monotonicNow() - - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000; + - HdfsServerConstants.LEASE_HARDLIMIT_PERIOD - 1000; dfs.renewLease(); // this should not work. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 6d981fbfd1..b8aac28e72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -38,7 +38,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -333,8 +332,8 @@ public void testSoftLeaseRecovery() throws Exception { DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); // Reset default lease periods - cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, - HdfsConstants.LEASE_HARDLIMIT_PERIOD); + cluster.setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, + HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); //create a file // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index e9891bfe59..9ac58ba65b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -37,9 +37,9 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LayoutVersion; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; @@ -533,7 +533,7 @@ public static void corruptFile(File file, * of the Namenode, whether it is running or not. */ public static int getCurrentNameNodeLayoutVersion() { - return HdfsConstants.NAMENODE_LAYOUT_VERSION; + return HdfsServerConstants.NAMENODE_LAYOUT_VERSION; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 2e84499e4b..2115671780 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -34,11 +34,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -185,7 +185,7 @@ public void testHttpServer() throws Exception { byte[] retrievedViaHttp = DFSTestUtil.urlGetBytes(new URL(urlRoot + "/getJournal?segmentTxId=1&jid=" + journalId)); byte[] expected = Bytes.concat( - Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION), + Ints.toByteArray(HdfsServerConstants.NAMENODE_LAYOUT_VERSION), (new byte[] { 0, 0, 0, 0 }), // layout flags section EDITS_DATA); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 91abb2a5f5..9ce16f219a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -88,7 +88,8 @@ public class TestBlockManager { @Before public void setupMockCluster() throws IOException { Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "need to set a dummy value here so it assumes a multi-rack cluster"); + conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, + "need to set a dummy value here so it assumes a multi-rack cluster"); fsn = Mockito.mock(FSNamesystem.class); Mockito.doReturn(true).when(fsn).hasWriteLock(); bm = new BlockManager(fsn, conf); @@ -111,8 +112,8 @@ private void addNodes(Iterable nodesToAdd) { for (DatanodeDescriptor dn : nodesToAdd) { cluster.add(dn); dn.getStorageInfos()[0].setUtilizationForTesting( - 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L); + 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L); dn.updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 296003ffb4..1d6dad8638 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -50,8 +50,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -138,8 +138,8 @@ public static void setupCluster() throws Exception { } for (int i=0; i < NUM_OF_DATANODES; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } @@ -162,8 +162,8 @@ private static boolean isOnSameRack(DatanodeStorageInfo left, DatanodeDescriptor @Test public void testChooseTarget1() throws Exception { updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 4, 0); // overloaded DatanodeStorageInfo[] targets; @@ -193,8 +193,8 @@ public void testChooseTarget1() throws Exception { assertFalse(isOnSameRack(targets[0], targets[2])); updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas) { @@ -316,8 +316,8 @@ public void testChooseTarget2() throws Exception { public void testChooseTarget3() throws Exception { // make data node 0 to be not qualified to choose updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); // no space DatanodeStorageInfo[] targets; @@ -350,8 +350,8 @@ public void testChooseTarget3() throws Exception { assertFalse(isOnSameRack(targets[1], targets[3])); updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } /** @@ -367,8 +367,8 @@ public void testChoooseTarget4() throws Exception { // make data node 0 & 1 to be not qualified to choose: not enough disk space for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } DatanodeStorageInfo[] targets; @@ -395,8 +395,8 @@ public void testChoooseTarget4() throws Exception { for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } @@ -459,8 +459,8 @@ public void testChooseTarget6() throws Exception { bm.getDatanodeManager().getNetworkTopology().add(newDn); bm.getDatanodeManager().getHeartbeatManager().addDatanode(newDn); updateHeartbeatWithUsage(newDn, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); // Try picking three nodes. Only two should return. excludedNodes.clear(); @@ -506,8 +506,8 @@ public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { // make data node 0 & 1 to be not qualified to choose: not enough disk space for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } final LogVerificationAppender appender = new LogVerificationAppender(); @@ -531,8 +531,8 @@ public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java index d514768a70..7ff2930fc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -93,8 +93,8 @@ public static void setupCluster() throws IOException { dnrList.add(dnr); dnManager.registerDatanode(dnr); dataNodes[i].getStorageInfos()[0].setUtilizationForTesting( - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L); dataNodes[i].updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]), 0L, 0L, 0, 0, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 7708ddc8c4..ed54aeb571 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; @@ -191,8 +191,8 @@ private static void updateHeartbeatWithUsage(DatanodeDescriptor dn, private static void setupDataNodeCapacity() { for(int i=0; i chosenNodes = new ArrayList(); chosenNodes.add(storagesInBoundaryCase[0]); @@ -735,8 +735,8 @@ public void testChooseMoreTargetsThanNodeGroups() throws Exception { for(int i=0; i chosenNodes = new ArrayList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index 3c167431f5..216ff3d177 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -29,8 +29,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; @@ -67,7 +67,7 @@ public void setUp() throws IOException { // Return a a good software version. doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion(); // Return a good layout version for now. - doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo) + doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo) .getLayoutVersion(); DatanodeProtocolClientSideTranslatorPB fakeDnProt = @@ -104,12 +104,12 @@ public void testSoftwareVersionDifferences() throws Exception { @Test public void testDifferentLayoutVersions() throws Exception { // We expect no exceptions to be thrown when the layout versions match. - assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION, + assertEquals(HdfsServerConstants.NAMENODE_LAYOUT_VERSION, actor.retrieveNamespaceInfo().getLayoutVersion()); // We expect an exception to be thrown when the NN reports a layout version // different from that of the DN. - doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo) + doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo) .getLayoutVersion(); try { actor.retrieveNamespaceInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index fa7a838316..150b5e4a1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; @@ -405,7 +405,7 @@ public void runTest(int parallelism) throws Exception { // Test2: block metafile is missing long blockId = deleteMetaFile(); scan(totalBlocks, 1, 1, 0, 0, 1); - verifyGenStamp(blockId, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP); + verifyGenStamp(blockId, HdfsConstants.GRANDFATHER_GENERATION_STAMP); scan(totalBlocks, 0, 0, 0, 0, 0); // Test3: block file is missing @@ -420,7 +420,7 @@ public void runTest(int parallelism) throws Exception { blockId = createBlockFile(); totalBlocks++; scan(totalBlocks, 1, 1, 0, 1, 0); - verifyAddition(blockId, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP, 0); + verifyAddition(blockId, HdfsConstants.GRANDFATHER_GENERATION_STAMP, 0); scan(totalBlocks, 0, 0, 0, 0, 0); // Test5: A metafile exists for which there is no block file and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 7eaf5c710e..a5d584825d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -54,6 +53,7 @@ import org.apache.hadoop.hdfs.server.balancer.TestBalancer; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -98,9 +98,9 @@ public class TestStorageMover { DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L); DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite(); - HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME); - WARM = DEFAULT_POLICIES.getPolicy(HdfsConstants.WARM_STORAGE_POLICY_NAME); - COLD = DEFAULT_POLICIES.getPolicy(HdfsConstants.COLD_STORAGE_POLICY_NAME); + HOT = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.HOT_STORAGE_POLICY_NAME); + WARM = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.WARM_STORAGE_POLICY_NAME); + COLD = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.COLD_STORAGE_POLICY_NAME); TestBalancer.initTestSetup(); Dispatcher.setDelayAfterErrors(1000L); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 360261d56e..db0185d1f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; @@ -596,7 +595,7 @@ long executeOp(int daemonId, int inputIdx, String clientName) long end = Time.now(); for(boolean written = !closeUponCreate; !written; written = nameNodeProto.complete(fileNames[daemonId][inputIdx], - clientName, null, HdfsConstantsClient.GRANDFATHER_INODE_ID)); + clientName, null, HdfsConstants.GRANDFATHER_INODE_ID)); return end-start; } @@ -1142,7 +1141,7 @@ void generateInputs(int[] ignore) throws IOException { new EnumSetWritable(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE, null); ExtendedBlock lastBlock = addBlocks(fileName, clientName); - nameNodeProto.complete(fileName, clientName, lastBlock, HdfsConstantsClient.GRANDFATHER_INODE_ID); + nameNodeProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID); } // prepare block reports for(int idx=0; idx < nrDatanodes; idx++) { @@ -1155,7 +1154,7 @@ private ExtendedBlock addBlocks(String fileName, String clientName) ExtendedBlock prevBlock = null; for(int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, - prevBlock, null, HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 2d29a68f35..5a4134c0ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; @@ -89,14 +89,14 @@ public void testRetryAddBlockWhileInChooseTarget() throws Exception { LOG.info("Starting first addBlock for " + src); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; DatanodeStorageInfo targets[] = ns.getNewBlockTargets( - src, HdfsConstantsClient.GRANDFATHER_INODE_ID, "clientName", + src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, null, null, onRetryBlock); assertNotNull("Targets must be generated", targets); // run second addBlock() LOG.info("Starting second addBlock for " + src); nn.addBlock(src, "clientName", null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); assertTrue("Penultimate block must be complete", checkFileProgress(src, false)); LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE); @@ -106,7 +106,7 @@ public void testRetryAddBlockWhileInChooseTarget() throws Exception { // continue first addBlock() LocatedBlock newBlock = ns.storeAllocatedBlock( - src, HdfsConstantsClient.GRANDFATHER_INODE_ID, "clientName", null, targets); + src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets); assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock()); // check locations @@ -144,14 +144,14 @@ public void testAddBlockRetryShouldReturnBlockWithLocations() // start first addBlock() LOG.info("Starting first addBlock for " + src); LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); assertTrue("Block locations should be present", lb1.getLocations().length > 0); cluster.restartNameNode(); nameNodeRpc = cluster.getNameNodeRpc(); LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock()); assertTrue("Wrong locations with retry", lb2.getLocations().length > 0); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 6d8d205cb8..1e42e34532 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -68,9 +68,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; @@ -880,12 +880,12 @@ public EditLogByteInputStream(byte[] data) throws IOException { @Override public long getFirstTxId() { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } @Override public long getLastTxId() { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java index c3d2997136..c0eb890536 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java @@ -29,7 +29,7 @@ import java.net.URL; import java.util.EnumMap; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.Test; @@ -51,7 +51,7 @@ public void testReadURL() throws Exception { URL url = new URL("http://localhost/fakeLog"); EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url, - HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); + HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false); // Read the edit log and verify that we got all of the data. EnumMap> counts = FSImageTestUtil .countEditLogOpTypes(elis); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 833ef95e64..bc55d1271d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -365,7 +365,7 @@ public void testValidateEditLogWithCorruptBody() throws IOException { truncateFile(logFile, txOffset); validation = EditLogFileInputStream.validateEditLog(logFile); long expectedEndTxId = (txId == 0) ? - HdfsConstants.INVALID_TXID : (txId - 1); + HdfsServerConstants.INVALID_TXID : (txId - 1); assertEquals("Failed when corrupting txid " + txId + " txn opcode " + "at " + txOffset, expectedEndTxId, validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); @@ -383,7 +383,7 @@ public void testValidateEmptyEditLog() throws IOException { EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile); assertTrue(!validation.hasCorruptHeader()); - assertEquals(HdfsConstants.INVALID_TXID, validation.getEndTxId()); + assertEquals(HdfsServerConstants.INVALID_TXID, validation.getEndTxId()); } private static final Map byteToEnum = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java index d11b783f51..9903906337 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -47,7 +47,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -430,7 +430,7 @@ private static INodeDirectory createINodeDirectory(INodeDirectory parent, PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, FsPermission.createImmutable(perm)); INodeDirectory inodeDirectory = new INodeDirectory( - HdfsConstantsClient.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L); + HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L); parent.addChild(inodeDirectory); return inodeDirectory; } @@ -439,7 +439,7 @@ private static INodeFile createINodeFile(INodeDirectory parent, String name, String owner, String group, short perm) throws IOException { PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, FsPermission.createImmutable(perm)); - INodeFile inodeFile = new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, + INodeFile inodeFile = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION, PREFERRED_BLOCK_SIZE); parent.addChild(inodeFile); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index fbcc73f5c3..bd19a5b413 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -650,8 +649,8 @@ public void testTruncateFailure() throws IOException { checkBlockRecovery(p); NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) - .setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, - HdfsConstants.LEASE_HARDLIMIT_PERIOD); + .setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, + HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); checkFullFile(p, newLength, contents); fs.delete(p, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 4e6c59a12c..b45d2f6dcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -86,12 +86,12 @@ public class TestINodeFile { private long preferredBlockSize = 1024; INodeFile createINodeFile(short replication, long preferredBlockSize) { - return new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, + return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, preferredBlockSize); } private static INodeFile createINodeFile(byte storagePolicyID) { - return new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, + return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, (short)3, 1024L, storagePolicyID); } @@ -194,9 +194,9 @@ public void testGetFullPathName() { INodeFile inf = createINodeFile(replication, preferredBlockSize); inf.setLocalName(DFSUtil.string2Bytes("f")); - INodeDirectory root = new INodeDirectory(HdfsConstantsClient.GRANDFATHER_INODE_ID, + INodeDirectory root = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, INodeDirectory.ROOT_NAME, perm, 0L); - INodeDirectory dir = new INodeDirectory(HdfsConstantsClient.GRANDFATHER_INODE_ID, + INodeDirectory dir = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, DFSUtil.string2Bytes("d"), perm, 0L); assertEquals("f", inf.getFullPathName()); @@ -345,7 +345,7 @@ public void testValueOf () throws IOException { {//cast from INodeFileUnderConstruction final INode from = new INodeFile( - HdfsConstantsClient.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, + HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L); from.asFile().toUnderConstruction("client", "machine"); @@ -363,7 +363,7 @@ public void testValueOf () throws IOException { } {//cast from INodeDirectory - final INode from = new INodeDirectory(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, + final INode from = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L); //cast to INodeFile, should fail @@ -1108,7 +1108,7 @@ public void testFilesInGetListingOps() throws Exception { @Test public void testFileUnderConstruction() { replication = 3; - final INodeFile file = new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, + final INodeFile file = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L); assertFalse(file.isUnderConstruction()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java index 03c75577ab..4e4ed0c396 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java @@ -22,8 +22,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.junit.After; import org.junit.Test; @@ -79,7 +79,7 @@ public void testMetadataVersionOutput() throws IOException { assertExceptionContains("ExitException", e); } /* Check if meta data version is printed correctly. */ - final String verNumStr = HdfsConstants.NAMENODE_LAYOUT_VERSION + ""; + final String verNumStr = HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ""; assertTrue(baos.toString("UTF-8"). contains("HDFS Image Version: " + verNumStr)); assertTrue(baos.toString("UTF-8"). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java index a3582ce929..7ee49a9652 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java @@ -22,7 +22,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.junit.Assert; @@ -70,11 +70,11 @@ public void testUpgrade() { opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved"}); assertEquals(StartupOption.UPGRADE, opt); assertEquals( - ".snapshot." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".snapshot." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals( - ".reserved." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".reserved." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap.get(".reserved")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java index a4cb97fc88..49d01c11b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.junit.After; import org.junit.Assert; @@ -68,7 +68,7 @@ public void setUp() throws Exception { dfs.mkdirs(dir); dfs.setQuota(dir, Long.MAX_VALUE - 1, DISKQUOTA); dfs.setQuotaByStorageType(dir, StorageType.DISK, DISKQUOTA); - dfs.setStoragePolicy(dir, HdfsConstants.HOT_STORAGE_POLICY_NAME); + dfs.setStoragePolicy(dir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java index a77b435198..cb7cb35046 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java @@ -38,10 +38,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder; import org.apache.hadoop.hdfs.qjournal.server.Journal; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -304,7 +304,7 @@ private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster) BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox .getInternalState(journal1, "committedTxnId"); return committedTxnId != null ? committedTxnId.get() : - HdfsConstants.INVALID_TXID; + HdfsServerConstants.INVALID_TXID; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 29c6b10bac..f0c5482659 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; @@ -196,7 +196,7 @@ public void testOpenFilesWithRename() throws Exception { String clientName = fs.getClient().getClientName(); // create one empty block nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); fs.createSnapshot(path, "s2"); fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index b20e2ad4de..abceea4b9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index a5d5087b98..391f1903f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.util.Time; import org.codehaus.jackson.map.ObjectMapper; @@ -65,7 +65,7 @@ public void testHdfsFileStatus() throws IOException { final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), - HdfsConstantsClient.GRANDFATHER_INODE_ID, 0, null, (byte) 0); + HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0); final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus);