From 2749fc677c295e4ac5f4b1ebd5dbd6092370a857 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Tue, 9 Sep 2014 11:32:54 -0700 Subject: [PATCH 1/5] Newly moved block replica been invalidated and deleted in TestBalancer. Contributed by Binglin Chang. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/server/balancer/Balancer.java | 8 +++++--- .../apache/hadoop/hdfs/server/balancer/TestBalancer.java | 5 +++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2686306dbd..978f1b8722 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -634,6 +634,9 @@ Release 2.6.0 - UNRELEASED HDFS-6951. Correctly persist raw namespace xattrs to edit log and fsimage. (clamb via wang) + HDFS-6506. Newly moved block replica been invalidated and deleted in + TestBalancer. (Binglin Chang via cnauroth) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 7661d25ee7..79e2647d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -537,9 +537,11 @@ private ExitStatus run(int iteration, Formatter formatter, */ static int run(Collection namenodes, final Parameters p, Configuration conf) throws IOException, InterruptedException { - final long sleeptime = 2000*conf.getLong( - DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, - DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT); + final long sleeptime = + conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, + DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 + + conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000; LOG.info("namenodes = " + namenodes); LOG.info("parameters = " + p); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index d509668bdc..72597d2b75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -72,7 +72,7 @@ public class TestBalancer { ((Log4JLogger)Balancer.LOG).getLogger().setLevel(Level.ALL); } - final static long CAPACITY = 500L; + final static long CAPACITY = 5000L; final static String RACK0 = "/rack0"; final static String RACK1 = "/rack1"; final static String RACK2 = "/rack2"; @@ -85,7 +85,7 @@ public class TestBalancer { static final long TIMEOUT = 40000L; //msec static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5% static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta - static final int DEFAULT_BLOCK_SIZE = 10; + static final int DEFAULT_BLOCK_SIZE = 100; private static final Random r = new Random(); static { @@ -96,6 +96,7 @@ static void initConf(Configuration conf) { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); SimulatedFSDataset.setFactory(conf); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); } From 9b8104575444ed2de9b44fe902f86f7395f249ed Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Tue, 9 Sep 2014 14:06:27 -0700 Subject: [PATCH 2/5] Add missing YARN-1471 to the CHANGES.txt --- hadoop-yarn-project/CHANGES.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index d54fcd6fef..d799c28839 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -15,6 +15,9 @@ Trunk - Unreleased YARN-524 TestYarnVersionInfo failing if generated properties doesn't include an SVN URL. (stevel) + YARN-1471. The SLS simulator is not running the preemption policy + for CapacityScheduler (Carlo Curino via cdouglas) + YARN-2216 TestRMApplicationHistoryWriter sometimes fails in trunk. (Zhijie Shen via xgong) From 3e8f353c8e36b1467af4a8a421097afa512b324c Mon Sep 17 00:00:00 2001 From: cnauroth Date: Tue, 9 Sep 2014 14:16:41 -0700 Subject: [PATCH 3/5] HADOOP-10925. Change attribution in CHANGES.txt from trunk to 2.6.0. --- hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 0417b0a638..75ad7db5b0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -319,9 +319,6 @@ Trunk (Unreleased) HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File System. (Shanyu Zhao via cnauroth) - HADOOP-10925. Compilation fails in native link0 function on Windows. - (cnauroth) - HADOOP-11002. shell escapes are incompatible with previous releases (aw) HADOOP-10996. Stop violence in the *_HOME (aw) @@ -774,6 +771,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11071. KMSClientProvider should drain the local generated EEK cache on key rollover. (tucu) + HADOOP-10925. Compilation fails in native link0 function on Windows. + (cnauroth) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES From 28d99db99236ff2a6e4a605802820e2b512225f9 Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Tue, 9 Sep 2014 15:13:07 -0700 Subject: [PATCH 4/5] YARN-2526. SLS can deadlock when all the threads are taken by AMSimulators. (Wei Yan via kasha) --- .../yarn/sls/appmaster/MRAMSimulator.java | 43 ++++++++++--------- hadoop-yarn-project/CHANGES.txt | 3 ++ 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java index fb702059ad..da267a1c1b 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java @@ -179,26 +179,8 @@ public AllocateResponse run() throws Exception { return rm.getApplicationMasterService().allocate(request); } }); - - // waiting until the AM container is allocated - while (true) { - if (response != null && ! response.getAllocatedContainers().isEmpty()) { - // get AM container - Container container = response.getAllocatedContainers().get(0); - se.getNmMap().get(container.getNodeId()) - .addNewContainer(container, -1L); - // start AM container - amContainer = container; - LOG.debug(MessageFormat.format("Application {0} starts its " + - "AM container ({1}).", appId, amContainer.getId())); - isAMContainerRunning = true; - break; - } - // this sleep time is different from HeartBeat - Thread.sleep(1000); - // send out empty request - sendContainerRequest(); - response = responseQueue.take(); + if (response != null) { + responseQueue.put(response); } } @@ -206,6 +188,26 @@ public AllocateResponse run() throws Exception { @SuppressWarnings("unchecked") protected void processResponseQueue() throws InterruptedException, YarnException, IOException { + // Check whether receive the am container + if (!isAMContainerRunning) { + if (!responseQueue.isEmpty()) { + AllocateResponse response = responseQueue.take(); + if (response != null + && !response.getAllocatedContainers().isEmpty()) { + // Get AM container + Container container = response.getAllocatedContainers().get(0); + se.getNmMap().get(container.getNodeId()) + .addNewContainer(container, -1L); + // Start AM container + amContainer = container; + LOG.debug(MessageFormat.format("Application {0} starts its " + + "AM container ({1}).", appId, amContainer.getId())); + isAMContainerRunning = true; + } + } + return; + } + while (! responseQueue.isEmpty()) { AllocateResponse response = responseQueue.take(); @@ -262,6 +264,7 @@ protected void processResponseQueue() LOG.debug(MessageFormat.format("Application {0} sends out event " + "to clean up its AM container.", appId)); isFinished = true; + break; } // check allocated containers diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index d799c28839..7eaf1c805d 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -302,6 +302,9 @@ Release 2.6.0 - UNRELEASED YARN-2519. Credential Provider related unit tests failed on Windows. (Xiaoyu Yao via cnauroth) + YARN-2526. SLS can deadlock when all the threads are taken by AMSimulators. + (Wei Yan via kasha) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES From 05af0ff4be871ddbb4c4cb4f0b5b506ecee36fb8 Mon Sep 17 00:00:00 2001 From: Konstantin V Shvachko Date: Tue, 9 Sep 2014 17:30:10 -0700 Subject: [PATCH 5/5] Revert HDFS-6940. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 - .../server/blockmanagement/BlockManager.java | 23 ++-------- .../blockmanagement/DatanodeManager.java | 6 +-- .../blockmanagement/HostFileManager.java | 4 -- .../hdfs/server/namenode/FSNamesystem.java | 46 +++++++++---------- .../hdfs/server/namenode/NameNodeAdapter.java | 2 +- 6 files changed, 26 insertions(+), 57 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 978f1b8722..4efd1dcb0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -449,8 +449,6 @@ Release 2.6.0 - UNRELEASED HDFS-6376. Distcp data between two HA clusters requires another configuration. (Dave Marion and Haohui Mai via jing9) - HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv) - HDFS-6943. Improve NN allocateBlock log to include replicas' datanode IPs. (Ming Ma via wheat9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 6176188353..8470680a98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -164,7 +164,7 @@ public int getPendingDataNodeMessageCount() { final BlocksMap blocksMap; /** Replication thread. */ - Daemon replicationThread; + final Daemon replicationThread = new Daemon(new ReplicationMonitor()); /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); @@ -263,7 +263,6 @@ public BlockManager(final Namesystem namesystem, final FSClusterStats stats, this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); - setReplicationMonitor(new ReplicationMonitor()); final long pendingPeriod = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, @@ -395,23 +394,7 @@ private static BlockTokenSecretManager createBlockTokenSecretManager( lifetimeMin*60*1000L, 0, null, encryptionAlgorithm); } } - - public long getReplicationRecheckInterval() { - return replicationRecheckInterval; - } - - public AtomicLong excessBlocksCount() { - return excessBlocksCount; - } - - public void clearInvalidateBlocks() { - invalidateBlocks.clear(); - } - - void setReplicationMonitor(Runnable replicationMonitor) { - replicationThread = new Daemon(replicationMonitor); - } - + public void setBlockPoolId(String blockPoolId) { if (isBlockTokenEnabled()) { blockTokenSecretManager.setBlockPoolId(blockPoolId); @@ -1633,7 +1616,7 @@ else if (excessBlocks != null && excessBlocks.contains(block)) { * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ - void processPendingReplications() { + private void processPendingReplications() { Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { namesystem.writeLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 55d616f699..709f060d23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1053,7 +1053,7 @@ private void refreshHostsReader(Configuration conf) throws IOException { * 3. Added to exclude --> start decommission. * 4. Removed from exclude --> stop decommission. */ - void refreshDatanodes() { + private void refreshDatanodes() { for(DatanodeDescriptor node : datanodeMap.values()) { // Check if not include. if (!hostFileManager.isIncluded(node)) { @@ -1586,9 +1586,5 @@ public void clearPendingCachingCommands() { public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) { this.shouldSendCachingCommands = shouldSendCachingCommands; } - - public HostFileManager getHostFileManager() { - return this.hostFileManager; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java index 7db23e4150..0b8d6c5bc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java @@ -129,10 +129,6 @@ synchronized boolean hasIncludes() { void refresh(String includeFile, String excludeFile) throws IOException { HostSet newIncludes = readFile("included", includeFile); HostSet newExcludes = readFile("excluded", excludeFile); - setHosts(newIncludes, newExcludes); - } - - void setHosts(HostSet newIncludes, HostSet newExcludes) { synchronized (this) { includes = newIncludes; excludes = newExcludes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a6b98a559d..c1744f6421 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -978,7 +978,7 @@ private List initAuditLoggers(Configuration conf) { return Collections.unmodifiableList(auditLoggers); } - protected void loadFSImage(StartupOption startOpt) throws IOException { + private void loadFSImage(StartupOption startOpt) throws IOException { final FSImage fsImage = getFSImage(); // format before starting up if requested @@ -1026,7 +1026,7 @@ protected void loadFSImage(StartupOption startOpt) throws IOException { imageLoadComplete(); } - protected void startSecretManager() { + private void startSecretManager() { if (dtSecretManager != null) { try { dtSecretManager.startThreads(); @@ -1038,7 +1038,7 @@ protected void startSecretManager() { } } - protected void startSecretManagerIfNecessary() { + private void startSecretManagerIfNecessary() { boolean shouldRun = shouldUseDelegationTokens() && !isInSafeMode() && getEditLog().isOpenForWrite(); boolean running = dtSecretManager.isRunning(); @@ -1188,7 +1188,7 @@ public boolean inTransitionToActive() { return haEnabled && inActiveState() && startingActiveService; } - protected boolean shouldUseDelegationTokens() { + private boolean shouldUseDelegationTokens() { return UserGroupInformation.isSecurityEnabled() || alwaysUseDelegationTokensForTests; } @@ -2729,7 +2729,6 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src, * @throws UnresolvedLinkException * @throws IOException */ - protected LocatedBlock prepareFileForWrite(String src, INodeFile file, String leaseHolder, String clientMachine, boolean writeToEditLog, @@ -3186,7 +3185,6 @@ FileState analyzeFileState(String src, return new FileState(pendingFile, src); } - protected LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs, long offset) throws IOException { LocatedBlock lBlk = new LocatedBlock( @@ -3304,8 +3302,8 @@ boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder) return true; } - protected INodeFile checkLease(String src, String holder, INode inode, - long fileId) + private INodeFile checkLease(String src, String holder, INode inode, + long fileId) throws LeaseExpiredException, FileNotFoundException { assert hasReadLock(); final String ident = src + " (inode " + fileId + ")"; @@ -4422,7 +4420,7 @@ Lease reassignLeaseInternal(Lease lease, String src, String newHolder, return leaseManager.reassignLease(lease, src, newHolder); } - protected void commitOrCompleteLastBlock(final INodeFile fileINode, + private void commitOrCompleteLastBlock(final INodeFile fileINode, final Block commitBlock) throws IOException { assert hasWriteLock(); Preconditions.checkArgument(fileINode.isUnderConstruction()); @@ -4818,7 +4816,6 @@ String getRegistrationID() { * @return an array of datanode commands * @throws IOException */ - protected HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress, int failedVolumes) @@ -4868,8 +4865,8 @@ void checkAvailableResources() { * @param file * @param logRetryCache */ - protected void persistBlocks(String path, INodeFile file, - boolean logRetryCache) { + private void persistBlocks(String path, INodeFile file, + boolean logRetryCache) { assert hasWriteLock(); Preconditions.checkArgument(file.isUnderConstruction()); getEditLog().logUpdateBlocks(path, file, logRetryCache); @@ -5300,7 +5297,7 @@ void setBalancerBandwidth(long bandwidth) throws IOException { * @param path * @param file */ - protected void persistNewBlock(String path, INodeFile file) { + private void persistNewBlock(String path, INodeFile file) { Preconditions.checkArgument(file.isUnderConstruction()); getEditLog().logAddBlock(path, file); if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -7178,7 +7175,7 @@ private void logReassignLease(String leaseHolder, String src, * * @return true if delegation token operation is allowed */ - protected boolean isAllowedDelegationTokenOp() throws IOException { + private boolean isAllowedDelegationTokenOp() throws IOException { AuthenticationMethod authMethod = getConnectionAuthenticationMethod(); if (UserGroupInformation.isSecurityEnabled() && (authMethod != AuthenticationMethod.KERBEROS) @@ -7345,13 +7342,7 @@ public String getLiveNodes() { final List live = new ArrayList(); blockManager.getDatanodeManager().fetchDatanodes(live, null, true); for (DatanodeDescriptor node : live) { - info.put(node.getHostName(), getLiveNodeInfo(node)); - } - return JSON.toString(info); - } - - protected Map getLiveNodeInfo(DatanodeDescriptor node) { - return ImmutableMap.builder() + Map innerinfo = ImmutableMap.builder() .put("infoAddr", node.getInfoAddr()) .put("infoSecureAddr", node.getInfoSecureAddr()) .put("xferaddr", node.getXferAddr()) @@ -7369,6 +7360,10 @@ protected Map getLiveNodeInfo(DatanodeDescriptor node) { .put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()) .put("volfails", node.getVolumeFailures()) .build(); + + info.put(node.getHostName(), innerinfo); + } + return JSON.toString(info); } /** @@ -7653,16 +7648,17 @@ public ReentrantReadWriteLock getFsLockForTests() { public ReentrantLock getLongReadLockForTests() { return fsLock.longReadLock; } + + @VisibleForTesting + public SafeModeInfo getSafeModeInfoForTests() { + return safeMode; + } @VisibleForTesting public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { this.nnResourceChecker = nnResourceChecker; } - public SafeModeInfo getSafeModeInfo() { - return safeMode; - } - @Override public boolean isAvoidingStaleDataNodesForWrite() { return this.blockManager.getDatanodeManager() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index d65d1ff5be..c32ed67d6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -223,7 +223,7 @@ public static FSEditLogOp createMkdirOp(String path) { * if safemode is not running. */ public static int getSafeModeSafeBlocks(NameNode nn) { - SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo(); + SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests(); if (smi == null) { return -1; }