From 45c52ef9246d2c97f120c7010cc5196c7307cce7 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 16 Jun 2014 22:47:58 +0000 Subject: [PATCH] HDFS-6518. TestCacheDirectives#testExceedsCapacity should take FSN read lock when accessing pendingCached list. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1603016 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++++ .../server/namenode/TestCacheDirectives.java | 17 +++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5b300cad23..ac701cf263 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -443,6 +443,10 @@ Release 2.5.0 - UNRELEASED HDFS-6499. Use NativeIO#renameTo instead of File#renameTo in FileJournalManager. (Yongjun Zhang via atm) + HDFS-6518. TestCacheDirectives#testExceedsCapacity should + take FSN read lock when accessing pendingCached list. + (wang) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 8ef3887dd5..bb8ef96916 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -1408,12 +1408,17 @@ public class TestCacheDirectives { */ private void checkPendingCachedEmpty(MiniDFSCluster cluster) throws Exception { - final DatanodeManager datanodeManager = - cluster.getNamesystem().getBlockManager().getDatanodeManager(); - for (DataNode dn : cluster.getDataNodes()) { - DatanodeDescriptor descriptor = - datanodeManager.getDatanode(dn.getDatanodeId()); - Assert.assertTrue(descriptor.getPendingCached().isEmpty()); + cluster.getNamesystem().readLock(); + try { + final DatanodeManager datanodeManager = + cluster.getNamesystem().getBlockManager().getDatanodeManager(); + for (DataNode dn : cluster.getDataNodes()) { + DatanodeDescriptor descriptor = + datanodeManager.getDatanode(dn.getDatanodeId()); + Assert.assertTrue(descriptor.getPendingCached().isEmpty()); + } + } finally { + cluster.getNamesystem().readUnlock(); } }