HDFS-3002. TestNameNodeMetrics need not wait for metrics update. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293482 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-02-24 23:52:20 +00:00
parent 4e64d7b447
commit 5293e5d1c5
2 changed files with 5 additions and 19 deletions

View File

@ -141,6 +141,9 @@ Trunk (unreleased changes)
HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using
NetUtils. (Hari Mankude via suresh) NetUtils. (Hari Mankude via suresh)
HDFS-3002. TestNameNodeMetrics need not wait for metrics update.
(suresh)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong) namenode state. (Tomasz Nykiel via hairong)

View File

@ -103,12 +103,6 @@ private void createFile(Path file, long fileLen, short replicas) throws IOExcept
DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong()); DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
} }
private void updateMetrics() throws Exception {
// Wait for metrics update (corresponds to dfs.namenode.replication.interval
// for some block related metrics to get updated)
Thread.sleep(1000);
}
private void readFile(FileSystem fileSys,Path name) throws IOException { private void readFile(FileSystem fileSys,Path name) throws IOException {
//Just read file so that getNumBlockLocations are incremented //Just read file so that getNumBlockLocations are incremented
DataInputStream stm = fileSys.open(name); DataInputStream stm = fileSys.open(name);
@ -125,7 +119,6 @@ public void testFileAdd() throws Exception {
createFile(file, 3200, (short)3); createFile(file, 3200, (short)3);
final long blockCount = 32; final long blockCount = 32;
int blockCapacity = namesystem.getBlockCapacity(); int blockCapacity = namesystem.getBlockCapacity();
updateMetrics();
assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS)); assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
MetricsRecordBuilder rb = getMetrics(NN_METRICS); MetricsRecordBuilder rb = getMetrics(NN_METRICS);
@ -140,7 +133,6 @@ public void testFileAdd() throws Exception {
while (threshold < blockCount) { while (threshold < blockCount) {
blockCapacity <<= 1; blockCapacity <<= 1;
} }
updateMetrics();
long filesTotal = file.depth() + 1; // Add 1 for root long filesTotal = file.depth() + 1; // Add 1 for root
rb = getMetrics(NS_METRICS); rb = getMetrics(NS_METRICS);
assertGauge("FilesTotal", filesTotal, rb); assertGauge("FilesTotal", filesTotal, rb);
@ -150,7 +142,6 @@ public void testFileAdd() throws Exception {
filesTotal--; // reduce the filecount for deleted file filesTotal--; // reduce the filecount for deleted file
waitForDeletion(); waitForDeletion();
updateMetrics();
rb = getMetrics(NS_METRICS); rb = getMetrics(NS_METRICS);
assertGauge("FilesTotal", filesTotal, rb); assertGauge("FilesTotal", filesTotal, rb);
assertGauge("BlocksTotal", 0L, rb); assertGauge("BlocksTotal", 0L, rb);
@ -174,7 +165,7 @@ public void testCorruptBlock() throws Exception {
cluster.getNameNode(), file.toString(), 0, 1).get(0); cluster.getNameNode(), file.toString(), 0, 1).get(0);
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"TEST"); "TEST");
updateMetrics(); Thread.sleep(1000); // Wait for block to be marked corrupt
MetricsRecordBuilder rb = getMetrics(NS_METRICS); MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("CorruptBlocks", 1L, rb); assertGauge("CorruptBlocks", 1L, rb);
assertGauge("PendingReplicationBlocks", 1L, rb); assertGauge("PendingReplicationBlocks", 1L, rb);
@ -196,7 +187,6 @@ public void testExcessBlocks() throws Exception {
createFile(file, 100, (short)2); createFile(file, 100, (short)2);
long totalBlocks = 1; long totalBlocks = 1;
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1); NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS); MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", totalBlocks, rb); assertGauge("ExcessBlocks", totalBlocks, rb);
fs.delete(file, true); fs.delete(file, true);
@ -214,7 +204,7 @@ public void testMissingBlock() throws Exception {
cluster.getNameNode(), file.toString(), 0, 1).get(0); cluster.getNameNode(), file.toString(), 0, 1).get(0);
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"TEST"); "TEST");
updateMetrics(); Thread.sleep(1000); // Wait for block to be marked corrupt
MetricsRecordBuilder rb = getMetrics(NS_METRICS); MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("UnderReplicatedBlocks", 1L, rb); assertGauge("UnderReplicatedBlocks", 1L, rb);
assertGauge("MissingBlocks", 1L, rb); assertGauge("MissingBlocks", 1L, rb);
@ -236,7 +226,6 @@ public void testRenameMetrics() throws Exception {
Path target = getTestPath("target"); Path target = getTestPath("target");
createFile(target, 100, (short)1); createFile(target, 100, (short)1);
fs.rename(src, target, Rename.OVERWRITE); fs.rename(src, target, Rename.OVERWRITE);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NN_METRICS); MetricsRecordBuilder rb = getMetrics(NN_METRICS);
assertCounter("FilesRenamed", 1L, rb); assertCounter("FilesRenamed", 1L, rb);
assertCounter("FilesDeleted", 1L, rb); assertCounter("FilesDeleted", 1L, rb);
@ -264,7 +253,6 @@ public void testGetBlockLocationMetric() throws Exception {
//Perform create file operation //Perform create file operation
createFile(file1_Path,100,(short)2); createFile(file1_Path,100,(short)2);
updateMetrics();
//Create file does not change numGetBlockLocations metric //Create file does not change numGetBlockLocations metric
//expect numGetBlockLocations = 0 for previous and current interval //expect numGetBlockLocations = 0 for previous and current interval
@ -273,14 +261,12 @@ public void testGetBlockLocationMetric() throws Exception {
// Open and read file operation increments GetBlockLocations // Open and read file operation increments GetBlockLocations
// Perform read file operation on earlier created file // Perform read file operation on earlier created file
readFile(fs, file1_Path); readFile(fs, file1_Path);
updateMetrics();
// Verify read file operation has incremented numGetBlockLocations by 1 // Verify read file operation has incremented numGetBlockLocations by 1
assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS)); assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
// opening and reading file twice will increment numGetBlockLocations by 2 // opening and reading file twice will increment numGetBlockLocations by 2
readFile(fs, file1_Path); readFile(fs, file1_Path);
readFile(fs, file1_Path); readFile(fs, file1_Path);
updateMetrics();
assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS)); assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
} }
@ -298,7 +284,6 @@ public void testTransactionAndCheckpointMetrics() throws Exception {
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS)); assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp")); fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS)); assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS)); assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
@ -306,7 +291,6 @@ public void testTransactionAndCheckpointMetrics() throws Exception {
assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS)); assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().rollEditLog(); cluster.getNameNodeRpc().rollEditLog();
updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS)); assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS)); assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
@ -316,7 +300,6 @@ public void testTransactionAndCheckpointMetrics() throws Exception {
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace(); cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
updateMetrics();
long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime", long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
getMetrics(NS_METRICS)); getMetrics(NS_METRICS));