From 99a68a14237b4cd1936ba5e9468d25d35dad594c Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 28 Mar 2012 20:37:34 +0000 Subject: [PATCH] HDFS-3155. Clean up FSDataset implemenation related code. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1306582 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hdfs/server/datanode/BlockReceiver.java | 7 +- .../hdfs/server/datanode/BlockSender.java | 17 ++- .../hdfs/server/datanode/DataStorage.java | 4 +- .../hdfs/server/datanode/DatanodeUtil.java | 2 +- .../hdfs/server/datanode/FSDataset.java | 4 +- .../server/datanode/ReplicaUnderRecovery.java | 10 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 7 +- .../hadoop/hdfs/TestLeaseRecovery2.java | 8 +- .../org/apache/hadoop/hdfs/TestPipelines.java | 17 +-- .../hdfs/server/datanode/DataNodeAdapter.java | 111 ------------------ .../server/datanode/DataNodeTestUtils.java | 90 +++++++++++++- .../server/datanode/SimulatedFSDataset.java | 1 - .../hdfs/server/datanode/TestBlockReport.java | 25 ++-- .../namenode/ha/HAStressTestHarness.java | 6 +- .../hdfs/server/namenode/ha/HATestUtil.java | 4 +- .../server/namenode/ha/TestDNFencing.java | 10 +- .../namenode/ha/TestPipelinesFailover.java | 11 +- .../server/namenode/ha/TestStandbyIsHot.java | 6 +- 19 files changed, 150 insertions(+), 192 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 10100b875a..7a2dea9583 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -277,6 +277,8 @@ Release 2.0.0 - UNRELEASED HDFS-3129. NetworkTopology: add test that getLeaf should check for invalid topologies (Colin Patrick McCabe via eli) + HDFS-3155. Clean up FSDataset implemenation related code. (szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 1449b88f8f..72591e018e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -358,9 +358,8 @@ class BlockReceiver implements Closeable { * This does not verify the original checksums, under the assumption * that they have already been validated. */ - private void translateChunks( byte[] dataBuf, int dataOff, int len, - byte[] checksumBuf, int checksumOff ) - throws IOException { + private void translateChunks( byte[] dataBuf, int dataOff, int len, + byte[] checksumBuf, int checksumOff ) { if (len == 0) return; int numChunks = (len - 1)/bytesPerChecksum + 1; @@ -702,7 +701,7 @@ class BlockReceiver implements Closeable { return lastPacketInBlock?-1:len; } - private void dropOsCacheBehindWriter(long offsetInBlock) throws IOException { + private void dropOsCacheBehindWriter(long offsetInBlock) { try { if (outFd != null && offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 53ee5b7c06..6a830dbbf9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -111,10 +111,6 @@ class BlockSender implements java.io.Closeable { /** the block to read from */ private final ExtendedBlock block; - /** the replica to read from */ - private final Replica replica; - /** The visible length of a replica. */ - private final long replicaVisibleLength; /** Stream to read block data from */ private InputStream blockIn; /** updated while using transferTo() */ @@ -189,17 +185,18 @@ class BlockSender implements java.io.Closeable { this.readaheadLength = datanode.getDnConf().readaheadLength; this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads; + final Replica replica; + final long replicaVisibleLength; synchronized(datanode.data) { - this.replica = getReplica(block, datanode); - this.replicaVisibleLength = replica.getVisibleLength(); + replica = getReplica(block, datanode); + replicaVisibleLength = replica.getVisibleLength(); } // if there is a write in progress ChunkChecksum chunkChecksum = null; if (replica instanceof ReplicaBeingWritten) { - long minEndOffset = startOffset + length; - waitForMinLength((ReplicaBeingWritten)replica, minEndOffset); - ReplicaInPipeline rip = (ReplicaInPipeline) replica; - chunkChecksum = rip.getLastChecksumAndDataLen(); + final ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica; + waitForMinLength(rbw, startOffset + length); + chunkChecksum = rbw.getLastChecksumAndDataLen(); } if (replica.getGenerationStamp() < block.getGenerationStamp()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 64349d86c4..16244c725b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -760,8 +760,8 @@ public class DataStorage extends Storage { /** * Add bpStorage into bpStorageMap */ - private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage) - throws IOException { + private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage + ) { if (!this.bpStorageMap.containsKey(bpID)) { this.bpStorageMap.put(bpID, bpStorage); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java index c59929edd6..e3eaa6126e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java @@ -32,7 +32,7 @@ class DatanodeUtil { static final String UNLINK_BLOCK_SUFFIX = ".unlinked"; - private final static String DISK_ERROR = "Possible disk error on file creation: "; + private static final String DISK_ERROR = "Possible disk error: "; /** Get the cause of an I/O exception if caused by a possible disk error * @param ioe an I/O exception diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index 3a4a4b612e..f8699630f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -1800,7 +1800,7 @@ public class FSDataset implements FsDatasetSpi { ReplicaInfo replicaInfo) throws IOException { FinalizedReplica newReplicaInfo = null; if (replicaInfo.getState() == ReplicaState.RUR && - ((ReplicaUnderRecovery)replicaInfo).getOrignalReplicaState() == + ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == ReplicaState.FINALIZED) { newReplicaInfo = (FinalizedReplica) ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica(); @@ -2036,7 +2036,7 @@ public class FSDataset implements FsDatasetSpi { ReplicaState replicaState = dinfo.getState(); if (replicaState == ReplicaState.FINALIZED || (replicaState == ReplicaState.RUR && - ((ReplicaUnderRecovery)dinfo).getOrignalReplicaState() == + ((ReplicaUnderRecovery)dinfo).getOriginalReplica().getState() == ReplicaState.FINALIZED)) { v.clearPath(bpid, parent); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java index 2e15e6fce5..d5bbf04227 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java @@ -86,14 +86,6 @@ class ReplicaUnderRecovery extends ReplicaInfo { ReplicaInfo getOriginalReplica() { return original; } - - /** - * Get the original replica's state - * @return the original replica's state - */ - ReplicaState getOrignalReplicaState() { - return original.getState(); - } @Override //ReplicaInfo boolean isUnlinked() { @@ -170,6 +162,6 @@ class ReplicaUnderRecovery extends ReplicaInfo { ReplicaRecoveryInfo createInfo() { return new ReplicaRecoveryInfo(original.getBlockId(), original.getBytesOnDisk(), original.getGenerationStamp(), - getOrignalReplicaState()); + original.getState()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 6ab3f0ce66..6717a01dab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -1664,7 +1663,7 @@ public class MiniDFSCluster { public void triggerBlockReports() throws IOException { for (DataNode dn : getDataNodes()) { - DataNodeAdapter.triggerBlockReport(dn); + DataNodeTestUtils.triggerBlockReport(dn); } } @@ -1672,14 +1671,14 @@ public class MiniDFSCluster { public void triggerDeletionReports() throws IOException { for (DataNode dn : getDataNodes()) { - DataNodeAdapter.triggerDeletionReport(dn); + DataNodeTestUtils.triggerDeletionReport(dn); } } public void triggerHeartbeats() throws IOException { for (DataNode dn : getDataNodes()) { - DataNodeAdapter.triggerHeartbeat(dn); + DataNodeTestUtils.triggerHeartbeat(dn); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 0222e18579..a374e50d49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -454,7 +454,7 @@ public class TestLeaseRecovery2 { // Make sure the DNs don't send a heartbeat for a while, so the blocks // won't actually get completed during lease recovery. for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.setHeartbeatsDisabledForTests(dn, true); + DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); } // set the hard limit to be 1 second @@ -474,7 +474,7 @@ public class TestLeaseRecovery2 { // Let the DNs send heartbeats again. for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.setHeartbeatsDisabledForTests(dn, false); + DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false); } cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index 1dc0b1ebd4..0d2ebc96af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.Random; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -26,22 +33,16 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; -import java.io.IOException; -import java.util.List; -import java.util.Random; - public class TestPipelines { public static final Log LOG = LogFactory.getLog(TestPipelines.class); @@ -105,7 +106,7 @@ public class TestPipelines { String bpid = cluster.getNamesystem().getBlockPoolId(); for (DataNode dn : cluster.getDataNodes()) { - Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0) + Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0) .getBlock().getBlockId()); assertTrue("Replica on DN " + dn + " shouldn't be null", r != null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java deleted file mode 100644 index 6ab878c561..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode; - -import java.io.IOException; - -import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.mockito.Mockito; - -import com.google.common.base.Preconditions; - -/** - * WARNING!! This is TEST ONLY class: it never has to be used - * for ANY development purposes. - * - * This is a utility class to expose DataNode functionality for - * unit and functional tests. - */ -public class DataNodeAdapter { - /** - * Fetch a copy of ReplicaInfo from a datanode by block id - * @param dn datanode to retrieve a replicainfo object from - * @param bpid Block pool Id - * @param blkId id of the replica's block - * @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo} - */ - public static ReplicaInfo fetchReplicaInfo (final DataNode dn, - final String bpid, - final long blkId) { - return ((FSDataset)dn.data).fetchReplicaInfo(bpid, blkId); - } - - public static void setHeartbeatsDisabledForTests(DataNode dn, - boolean heartbeatsDisabledForTests) { - dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests); - } - - public static void triggerDeletionReport(DataNode dn) throws IOException { - for (BPOfferService bpos : dn.getAllBpOs()) { - bpos.triggerDeletionReportForTests(); - } - } - - public static void triggerHeartbeat(DataNode dn) throws IOException { - for (BPOfferService bpos : dn.getAllBpOs()) { - bpos.triggerHeartbeatForTests(); - } - } - - public static void triggerBlockReport(DataNode dn) throws IOException { - for (BPOfferService bpos : dn.getAllBpOs()) { - bpos.triggerBlockReportForTests(); - } - } - - public static long getPendingAsyncDeletions(DataNode dn) { - FSDataset fsd = (FSDataset)dn.getFSDataset(); - return fsd.asyncDiskService.countPendingDeletions(); - } - - /** - * Insert a Mockito spy object between the given DataNode and - * the given NameNode. This can be used to delay or wait for - * RPC calls on the datanode->NN path. - */ - public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( - DataNode dn, NameNode nn) { - String bpid = nn.getNamesystem().getBlockPoolId(); - - BPOfferService bpos = null; - for (BPOfferService thisBpos : dn.getAllBpOs()) { - if (thisBpos.getBlockPoolId().equals(bpid)) { - bpos = thisBpos; - break; - } - } - Preconditions.checkArgument(bpos != null, - "No such bpid: %s", bpid); - - BPServiceActor bpsa = null; - for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { - if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { - bpsa = thisBpsa; - break; - } - } - Preconditions.checkArgument(bpsa != null, - "No service actor to NN at %s", nn.getServiceRpcAddress()); - - DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); - DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); - bpsa.setNameNode(spy); - return spy; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 1a871dd35e..726c5d3ce3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -24,8 +24,13 @@ import java.io.IOException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.mockito.Mockito; + +import com.google.common.base.Preconditions; /** * Utility class for accessing package-private DataNode information during tests. @@ -42,6 +47,64 @@ public class DataNodeTestUtils { return dn.getDNRegistrationForBP(bpid); } + public static void setHeartbeatsDisabledForTests(DataNode dn, + boolean heartbeatsDisabledForTests) { + dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests); + } + + public static void triggerDeletionReport(DataNode dn) throws IOException { + for (BPOfferService bpos : dn.getAllBpOs()) { + bpos.triggerDeletionReportForTests(); + } + } + + public static void triggerHeartbeat(DataNode dn) throws IOException { + for (BPOfferService bpos : dn.getAllBpOs()) { + bpos.triggerHeartbeatForTests(); + } + } + + public static void triggerBlockReport(DataNode dn) throws IOException { + for (BPOfferService bpos : dn.getAllBpOs()) { + bpos.triggerBlockReportForTests(); + } + } + + /** + * Insert a Mockito spy object between the given DataNode and + * the given NameNode. This can be used to delay or wait for + * RPC calls on the datanode->NN path. + */ + public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN( + DataNode dn, NameNode nn) { + String bpid = nn.getNamesystem().getBlockPoolId(); + + BPOfferService bpos = null; + for (BPOfferService thisBpos : dn.getAllBpOs()) { + if (thisBpos.getBlockPoolId().equals(bpid)) { + bpos = thisBpos; + break; + } + } + Preconditions.checkArgument(bpos != null, + "No such bpid: %s", bpid); + + BPServiceActor bpsa = null; + for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) { + if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) { + bpsa = thisBpsa; + break; + } + } + Preconditions.checkArgument(bpsa != null, + "No service actor to NN at %s", nn.getServiceRpcAddress()); + + DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy(); + DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN); + bpsa.setNameNode(spy); + return spy; + } + /** * This method is used for testing. * Examples are adding and deleting blocks directly. @@ -53,18 +116,37 @@ public class DataNodeTestUtils { return dn.getFSDataset(); } + public static FSDataset getFsDatasetImpl(DataNode dn) { + return (FSDataset)dn.getFSDataset(); + } + public static File getFile(DataNode dn, String bpid, long bid) { - return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid); + return getFsDatasetImpl(dn).getFile(bpid, bid); } public static File getBlockFile(DataNode dn, String bpid, Block b ) throws IOException { - return ((FSDataset)dn.getFSDataset()).getBlockFile(bpid, b); + return getFsDatasetImpl(dn).getBlockFile(bpid, b); } public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks ) throws IOException { - ReplicaInfo info = ((FSDataset)dn.getFSDataset()).getReplicaInfo(block); - return info.unlinkBlock(numLinks); + return getFsDatasetImpl(dn).getReplicaInfo(block).unlinkBlock(numLinks); + } + + public static long getPendingAsyncDeletions(DataNode dn) { + return getFsDatasetImpl(dn).asyncDiskService.countPendingDeletions(); + } + + /** + * Fetch a copy of ReplicaInfo from a datanode by block id + * @param dn datanode to retrieve a replicainfo object from + * @param bpid Block pool Id + * @param blkId id of the replica's block + * @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo} + */ + public static ReplicaInfo fetchReplicaInfo(final DataNode dn, + final String bpid, final long blkId) { + return getFsDatasetImpl(dn).fetchReplicaInfo(bpid, blkId); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index a12ac722a7..8d9ee07ea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java index 38c631381e..985900030e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java @@ -17,6 +17,17 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -40,27 +51,17 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.log4j.Level; import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; -import java.io.File; -import java.io.FilenameFilter; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.CountDownLatch; - /** * This test simulates a variety of situations when blocks are being * intentionally orrupted, unexpectedly modified, and so on before a block @@ -561,7 +562,7 @@ public class TestBlockReport { // from this node. DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeAdapter.spyOnBposToNN(dn, nn); + DataNodeTestUtils.spyOnBposToNN(dn, nn); Mockito.doAnswer(delayer) .when(spy).blockReport( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java index 39667eddf1..81c45f3789 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; @@ -83,8 +83,8 @@ public class HAStressTestHarness { @Override public void doAnAction() throws Exception { for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.triggerDeletionReport(dn); - DataNodeAdapter.triggerHeartbeat(dn); + DataNodeTestUtils.triggerDeletionReport(dn); + DataNodeTestUtils.triggerHeartbeat(dn); } for (int i = 0; i < 2; i++) { NameNode nn = cluster.getNameNode(i); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index bf919cea7f..7224b430d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; @@ -96,7 +96,7 @@ public abstract class HATestUtil { @Override public Boolean get() { for (DataNode dn : cluster.getDataNodes()) { - if (DataNodeAdapter.getPendingAsyncDeletions(dn) > 0) { + if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) { return false; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index ea769c057e..5e657ded48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.io.PrintWriter; @@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -61,7 +60,6 @@ import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.log4j.Level; import org.junit.After; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; @@ -72,9 +70,7 @@ import com.google.common.collect.Lists; public class TestDNFencing { - protected static final Log LOG = LogFactory.getLog( - TestDNFencing.class); - private static final String TEST_FILE_DATA = "hello highly available world"; + protected static final Log LOG = LogFactory.getLog(TestDNFencing.class); private static final String TEST_FILE = "/testStandbyIsHot"; private static final Path TEST_FILE_PATH = new Path(TEST_FILE); private static final int SMALL_BLOCK = 1024; @@ -497,7 +493,7 @@ public class TestDNFencing { DataNode dn = cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy = - DataNodeAdapter.spyOnBposToNN(dn, nn2); + DataNodeTestUtils.spyOnBposToNN(dn, nn2); Mockito.doAnswer(delayer) .when(spy).blockReport( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index c9bae53a28..815be59359 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -45,7 +48,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; @@ -54,9 +57,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; - import org.apache.log4j.Level; - import org.junit.Test; import org.mockito.Mockito; @@ -297,7 +298,7 @@ public class TestPipelinesFailover { // active. DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = - DataNodeAdapter.spyOnBposToNN(primaryDN, nn0); + DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); // Delay the commitBlockSynchronization call DelayAnswer delayer = new DelayAnswer(LOG); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java index ce5814b0dd..ddfd573b4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java @@ -35,14 +35,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -225,7 +225,7 @@ public class TestStandbyIsHot { LOG.info("Got " + numReplicas + " locs: " + locs); if (numReplicas > expectedReplicas) { for (DataNode dn : cluster.getDataNodes()) { - DataNodeAdapter.triggerDeletionReport(dn); + DataNodeTestUtils.triggerDeletionReport(dn); } } return numReplicas == expectedReplicas;