diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a5398891e3..91bbe7b01a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -141,6 +141,9 @@ Trunk (unreleased changes) HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using NetUtils. (Hari Mankude via suresh) + HDFS-3002. TestNameNodeMetrics need not wait for metrics update. + (suresh) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 8626584c55..c993f6c9ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -103,12 +103,6 @@ private void createFile(Path file, long fileLen, short replicas) throws IOExcept DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong()); } - private void updateMetrics() throws Exception { - // Wait for metrics update (corresponds to dfs.namenode.replication.interval - // for some block related metrics to get updated) - Thread.sleep(1000); - } - private void readFile(FileSystem fileSys,Path name) throws IOException { //Just read file so that getNumBlockLocations are incremented DataInputStream stm = fileSys.open(name); @@ -125,7 +119,6 @@ public void testFileAdd() throws Exception { createFile(file, 3200, (short)3); final long blockCount = 32; int blockCapacity = namesystem.getBlockCapacity(); - updateMetrics(); assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS)); MetricsRecordBuilder rb = getMetrics(NN_METRICS); @@ -140,7 +133,6 @@ public void testFileAdd() throws Exception { while (threshold < blockCount) { blockCapacity <<= 1; } - updateMetrics(); long filesTotal = file.depth() + 1; // Add 1 for root rb = getMetrics(NS_METRICS); assertGauge("FilesTotal", filesTotal, rb); @@ -150,7 +142,6 @@ public void testFileAdd() throws Exception { filesTotal--; // reduce the filecount for deleted file waitForDeletion(); - updateMetrics(); rb = getMetrics(NS_METRICS); assertGauge("FilesTotal", filesTotal, rb); assertGauge("BlocksTotal", 0L, rb); @@ -174,7 +165,7 @@ public void testCorruptBlock() throws Exception { cluster.getNameNode(), file.toString(), 0, 1).get(0); bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], "TEST"); - updateMetrics(); + Thread.sleep(1000); // Wait for block to be marked corrupt MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("CorruptBlocks", 1L, rb); assertGauge("PendingReplicationBlocks", 1L, rb); @@ -196,7 +187,6 @@ public void testExcessBlocks() throws Exception { createFile(file, 100, (short)2); long totalBlocks = 1; NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1); - updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("ExcessBlocks", totalBlocks, rb); fs.delete(file, true); @@ -214,7 +204,7 @@ public void testMissingBlock() throws Exception { cluster.getNameNode(), file.toString(), 0, 1).get(0); bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], "TEST"); - updateMetrics(); + Thread.sleep(1000); // Wait for block to be marked corrupt MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("UnderReplicatedBlocks", 1L, rb); assertGauge("MissingBlocks", 1L, rb); @@ -236,7 +226,6 @@ public void testRenameMetrics() throws Exception { Path target = getTestPath("target"); createFile(target, 100, (short)1); fs.rename(src, target, Rename.OVERWRITE); - updateMetrics(); MetricsRecordBuilder rb = getMetrics(NN_METRICS); assertCounter("FilesRenamed", 1L, rb); assertCounter("FilesDeleted", 1L, rb); @@ -264,7 +253,6 @@ public void testGetBlockLocationMetric() throws Exception { //Perform create file operation createFile(file1_Path,100,(short)2); - updateMetrics(); //Create file does not change numGetBlockLocations metric //expect numGetBlockLocations = 0 for previous and current interval @@ -273,14 +261,12 @@ public void testGetBlockLocationMetric() throws Exception { // Open and read file operation increments GetBlockLocations // Perform read file operation on earlier created file readFile(fs, file1_Path); - updateMetrics(); // Verify read file operation has incremented numGetBlockLocations by 1 assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS)); // opening and reading file twice will increment numGetBlockLocations by 2 readFile(fs, file1_Path); readFile(fs, file1_Path); - updateMetrics(); assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS)); } @@ -298,7 +284,6 @@ public void testTransactionAndCheckpointMetrics() throws Exception { assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS)); fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp")); - updateMetrics(); assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS)); assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS)); @@ -306,7 +291,6 @@ public void testTransactionAndCheckpointMetrics() throws Exception { assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS)); cluster.getNameNodeRpc().rollEditLog(); - updateMetrics(); assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS)); assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS)); @@ -316,7 +300,6 @@ public void testTransactionAndCheckpointMetrics() throws Exception { cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(); cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE); - updateMetrics(); long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime", getMetrics(NS_METRICS));