HDFS-9251. Refactor TestWriteToReplica and TestFsDatasetImpl to avoid explicitly creating Files in the tests code. (lei)
This commit is contained in:
parent
9cb5d35353
commit
71e533a153
@ -1540,6 +1540,9 @@ Release 2.8.0 - UNRELEASED
|
||||
HDFS-9250. Add Precondition check to LocatedBlock#addCachedLoc.
|
||||
(Xiao Chen via wang)
|
||||
|
||||
HDFS-9251. Refactor TestWriteToReplica and TestFsDatasetImpl to avoid
|
||||
explicitly creating Files in the tests code. (lei)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
@ -24,6 +24,7 @@
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
@ -137,4 +138,58 @@ interface MaterializedReplica {
|
||||
*/
|
||||
MaterializedReplica getMaterializedReplica(ExtendedBlock block)
|
||||
throws ReplicaNotFoundException;
|
||||
|
||||
/**
|
||||
* Create a finalized replica and add it into the FsDataset.
|
||||
*/
|
||||
Replica createFinalizedReplica(ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Create a finalized replica on a particular volume, and add it into
|
||||
* the FsDataset.
|
||||
*/
|
||||
Replica createFinalizedReplica(FsVolumeSpi volume, ExtendedBlock block)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Create a {@link ReplicaInPipeline} and add it into the FsDataset.
|
||||
*/
|
||||
Replica createReplicaInPipeline(ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Create a {@link ReplicaInPipeline} and add it into the FsDataset.
|
||||
*/
|
||||
Replica createReplicaInPipeline(FsVolumeSpi volume, ExtendedBlock block)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Create a {@link ReplicaBeingWritten} and add it into the FsDataset.
|
||||
*/
|
||||
Replica createRBW(ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Create a {@link ReplicaBeingWritten} on the particular volume, and add it
|
||||
* into the FsDataset.
|
||||
*/
|
||||
Replica createRBW(FsVolumeSpi volume, ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Create a {@link ReplicaWaitingToBeRecovered} object and add it into the
|
||||
* FsDataset.
|
||||
*/
|
||||
Replica createReplicaWaitingToBeRecovered(ExtendedBlock block)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Create a {@link ReplicaWaitingToBeRecovered} on the particular volume,
|
||||
* and add it into the FsDataset.
|
||||
*/
|
||||
Replica createReplicaWaitingToBeRecovered(
|
||||
FsVolumeSpi volume, ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Create a {@link ReplicaUnderRecovery} object and add it into the FsDataset.
|
||||
*/
|
||||
Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId)
|
||||
throws IOException;
|
||||
}
|
||||
|
@ -23,10 +23,20 @@
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
@ -176,4 +186,103 @@ public MaterializedReplica getMaterializedReplica(ExtendedBlock block)
|
||||
blockFile, block.getGenerationStamp());
|
||||
return new FsDatasetImplMaterializedReplica(blockFile, metaFile);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createFinalizedReplica(ExtendedBlock block)
|
||||
throws IOException {
|
||||
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
|
||||
return createFinalizedReplica(volumes.get(0), block);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createFinalizedReplica(FsVolumeSpi volume, ExtendedBlock block)
|
||||
throws IOException {
|
||||
FsVolumeImpl vol = (FsVolumeImpl) volume;
|
||||
ReplicaInfo info = new FinalizedReplica(block.getLocalBlock(), vol,
|
||||
vol.getCurrentDir().getParentFile());
|
||||
dataset.volumeMap.add(block.getBlockPoolId(), info);
|
||||
info.getBlockFile().createNewFile();
|
||||
info.getMetaFile().createNewFile();
|
||||
return info;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createReplicaInPipeline(ExtendedBlock block)
|
||||
throws IOException {
|
||||
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
|
||||
return createReplicaInPipeline(volumes.get(0), block);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createReplicaInPipeline(
|
||||
FsVolumeSpi volume, ExtendedBlock block) throws IOException {
|
||||
FsVolumeImpl vol = (FsVolumeImpl) volume;
|
||||
ReplicaInPipeline rip = new ReplicaInPipeline(
|
||||
block.getBlockId(), block.getGenerationStamp(), volume,
|
||||
vol.createTmpFile(
|
||||
block.getBlockPoolId(), block.getLocalBlock()).getParentFile(),
|
||||
0);
|
||||
dataset.volumeMap.add(block.getBlockPoolId(), rip);
|
||||
return rip;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Replica createRBW(ExtendedBlock eb) throws IOException {
|
||||
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
|
||||
return createRBW(volumes.get(0), eb);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createRBW(FsVolumeSpi volume, ExtendedBlock eb)
|
||||
throws IOException {
|
||||
FsVolumeImpl vol = (FsVolumeImpl) volume;
|
||||
final String bpid = eb.getBlockPoolId();
|
||||
final Block block = eb.getLocalBlock();
|
||||
ReplicaBeingWritten rbw = new ReplicaBeingWritten(
|
||||
eb.getLocalBlock(), volume,
|
||||
vol.createRbwFile(bpid, block).getParentFile(), null);
|
||||
rbw.getBlockFile().createNewFile();
|
||||
rbw.getMetaFile().createNewFile();
|
||||
dataset.volumeMap.add(bpid, rbw);
|
||||
return rbw;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createReplicaWaitingToBeRecovered(ExtendedBlock eb)
|
||||
throws IOException {
|
||||
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
|
||||
return createReplicaInPipeline(volumes.get(0), eb);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createReplicaWaitingToBeRecovered(
|
||||
FsVolumeSpi volume, ExtendedBlock eb) throws IOException {
|
||||
FsVolumeImpl vol = (FsVolumeImpl) volume;
|
||||
final String bpid = eb.getBlockPoolId();
|
||||
final Block block = eb.getLocalBlock();
|
||||
ReplicaWaitingToBeRecovered rwbr =
|
||||
new ReplicaWaitingToBeRecovered(eb.getLocalBlock(), volume,
|
||||
vol.createRbwFile(bpid, block).getParentFile());
|
||||
dataset.volumeMap.add(bpid, rwbr);
|
||||
return rwbr;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Replica createReplicaUnderRecovery(
|
||||
ExtendedBlock block, long recoveryId) throws IOException {
|
||||
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
|
||||
FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
|
||||
ReplicaUnderRecovery rur = new ReplicaUnderRecovery(new FinalizedReplica(
|
||||
block.getLocalBlock(), volume, volume.getCurrentDir().getParentFile()),
|
||||
recoveryId
|
||||
);
|
||||
dataset.volumeMap.add(block.getBlockPoolId(), rur);
|
||||
return rur;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -365,12 +365,14 @@ public void testAddVolumeFailureReleasesInUseLock() throws IOException {
|
||||
|
||||
@Test
|
||||
public void testDeletingBlocks() throws IOException {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
|
||||
FsDatasetImpl ds = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
|
||||
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
|
||||
ds.addBlockPool(BLOCKPOOL, conf);
|
||||
FsVolumeImpl vol;
|
||||
try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
|
||||
vol = (FsVolumeImpl)volumes.get(0);
|
||||
@ -378,15 +380,11 @@ public void testDeletingBlocks() throws IOException {
|
||||
|
||||
ExtendedBlock eb;
|
||||
ReplicaInfo info;
|
||||
List<Block> blockList = new ArrayList<Block>();
|
||||
List<Block> blockList = new ArrayList<>();
|
||||
for (int i = 1; i <= 63; i++) {
|
||||
eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
|
||||
info = new FinalizedReplica(
|
||||
eb.getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
|
||||
ds.volumeMap.add(BLOCKPOOL, info);
|
||||
info.getBlockFile().createNewFile();
|
||||
info.getMetaFile().createNewFile();
|
||||
blockList.add(info);
|
||||
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
|
||||
blockList.add(eb.getLocalBlock());
|
||||
}
|
||||
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
|
||||
try {
|
||||
@ -398,12 +396,8 @@ public void testDeletingBlocks() throws IOException {
|
||||
|
||||
blockList.clear();
|
||||
eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
|
||||
info = new FinalizedReplica(
|
||||
eb.getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
|
||||
ds.volumeMap.add(BLOCKPOOL, info);
|
||||
info.getBlockFile().createNewFile();
|
||||
info.getMetaFile().createNewFile();
|
||||
blockList.add(info);
|
||||
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
|
||||
blockList.add(eb.getLocalBlock());
|
||||
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
|
@ -20,7 +20,6 @@
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
@ -35,16 +34,11 @@
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
@ -70,12 +64,12 @@ public void testClose() throws Exception {
|
||||
try {
|
||||
cluster.waitActive();
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
|
||||
FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
|
||||
|
||||
// set up replicasMap
|
||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
|
||||
ExtendedBlock[] blocks = setup(bpid, dataSet);
|
||||
ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
|
||||
|
||||
// test close
|
||||
testClose(dataSet, blocks);
|
||||
@ -91,11 +85,11 @@ public void testAppend() throws Exception {
|
||||
try {
|
||||
cluster.waitActive();
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
|
||||
FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
|
||||
|
||||
// set up replicasMap
|
||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
ExtendedBlock[] blocks = setup(bpid, dataSet);
|
||||
ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
|
||||
|
||||
// test append
|
||||
testAppend(bpid, dataSet, blocks);
|
||||
@ -115,7 +109,7 @@ public void testWriteToRbw() throws Exception {
|
||||
|
||||
// set up replicasMap
|
||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
ExtendedBlock[] blocks = setup(bpid, dataSet);
|
||||
ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
|
||||
|
||||
// test writeToRbw
|
||||
testWriteToRbw(dataSet, blocks);
|
||||
@ -135,7 +129,7 @@ public void testWriteToTemporary() throws Exception {
|
||||
|
||||
// set up replicasMap
|
||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
ExtendedBlock[] blocks = setup(bpid, dataSet);
|
||||
ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
|
||||
|
||||
// test writeToTemporary
|
||||
testWriteToTemporary(dataSet, blocks);
|
||||
@ -149,11 +143,12 @@ public void testWriteToTemporary() throws Exception {
|
||||
* on which to run the tests.
|
||||
*
|
||||
* @param bpid Block pool ID to generate blocks for
|
||||
* @param dataSet Namespace in which to insert blocks
|
||||
* @param testUtils FsDatasetTestUtils provides white box access to FsDataset.
|
||||
* @return Contrived blocks for further testing.
|
||||
* @throws IOException
|
||||
*/
|
||||
private ExtendedBlock[] setup(String bpid, FsDatasetImpl dataSet) throws IOException {
|
||||
private ExtendedBlock[] setup(String bpid, FsDatasetTestUtils testUtils)
|
||||
throws IOException {
|
||||
// setup replicas map
|
||||
|
||||
ExtendedBlock[] blocks = new ExtendedBlock[] {
|
||||
@ -161,59 +156,36 @@ private ExtendedBlock[] setup(String bpid, FsDatasetImpl dataSet) throws IOExcep
|
||||
new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
|
||||
new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
|
||||
};
|
||||
|
||||
ReplicaMap replicasMap = dataSet.volumeMap;
|
||||
try (FsDatasetSpi.FsVolumeReferences references =
|
||||
dataSet.getFsVolumeReferences()) {
|
||||
FsVolumeImpl vol = (FsVolumeImpl) references.get(0);
|
||||
ReplicaInfo replicaInfo = new FinalizedReplica(
|
||||
blocks[FINALIZED].getLocalBlock(), vol,
|
||||
vol.getCurrentDir().getParentFile());
|
||||
replicasMap.add(bpid, replicaInfo);
|
||||
replicaInfo.getBlockFile().createNewFile();
|
||||
replicaInfo.getMetaFile().createNewFile();
|
||||
|
||||
replicasMap.add(bpid, new ReplicaInPipeline(
|
||||
blocks[TEMPORARY].getBlockId(),
|
||||
blocks[TEMPORARY].getGenerationStamp(), vol,
|
||||
vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock())
|
||||
.getParentFile(), 0));
|
||||
testUtils.createFinalizedReplica(blocks[FINALIZED]);
|
||||
testUtils.createReplicaInPipeline(blocks[TEMPORARY]);
|
||||
testUtils.createRBW(blocks[RBW]);
|
||||
testUtils.createReplicaWaitingToBeRecovered(blocks[RWR]);
|
||||
testUtils.createReplicaUnderRecovery(blocks[RUR], 2007);
|
||||
|
||||
replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
|
||||
vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(),
|
||||
null);
|
||||
replicasMap.add(bpid, replicaInfo);
|
||||
replicaInfo.getBlockFile().createNewFile();
|
||||
replicaInfo.getMetaFile().createNewFile();
|
||||
|
||||
replicasMap.add(bpid, new ReplicaWaitingToBeRecovered(
|
||||
blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
|
||||
blocks[RWR].getLocalBlock()).getParentFile()));
|
||||
replicasMap
|
||||
.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
|
||||
.getLocalBlock(), vol, vol.getCurrentDir().getParentFile()),
|
||||
2007));
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
|
||||
private void testAppend(String bpid, FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
|
||||
private void testAppend(String bpid, FsDatasetSpi<?> dataSet,
|
||||
ExtendedBlock[] blocks) throws IOException {
|
||||
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
|
||||
final FsVolumeImpl v = (FsVolumeImpl)dataSet.volumeMap.get(
|
||||
bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
|
||||
long available = v.getCapacity()-v.getDfsUsed();
|
||||
long expectedLen = blocks[FINALIZED].getNumBytes();
|
||||
try {
|
||||
v.onBlockFileDeletion(bpid, -available);
|
||||
blocks[FINALIZED].setNumBytes(expectedLen+100);
|
||||
dataSet.append(blocks[FINALIZED], newGS, expectedLen);
|
||||
Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
|
||||
} catch (DiskOutOfSpaceException e) {
|
||||
Assert.assertTrue(e.getMessage().startsWith(
|
||||
"Insufficient space for appending to "));
|
||||
final FsVolumeSpi v = dataSet.getVolume(blocks[FINALIZED]);
|
||||
if (v instanceof FsVolumeImpl) {
|
||||
FsVolumeImpl fvi = (FsVolumeImpl) v;
|
||||
long available = fvi.getCapacity() - fvi.getDfsUsed();
|
||||
long expectedLen = blocks[FINALIZED].getNumBytes();
|
||||
try {
|
||||
fvi.onBlockFileDeletion(bpid, -available);
|
||||
blocks[FINALIZED].setNumBytes(expectedLen + 100);
|
||||
dataSet.append(blocks[FINALIZED], newGS, expectedLen);
|
||||
Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
|
||||
} catch (DiskOutOfSpaceException e) {
|
||||
Assert.assertTrue(e.getMessage().startsWith(
|
||||
"Insufficient space for appending to "));
|
||||
}
|
||||
fvi.onBlockFileDeletion(bpid, available);
|
||||
blocks[FINALIZED].setNumBytes(expectedLen);
|
||||
}
|
||||
v.onBlockFileDeletion(bpid, available);
|
||||
blocks[FINALIZED].setNumBytes(expectedLen);
|
||||
|
||||
newGS = blocks[RBW].getGenerationStamp()+1;
|
||||
dataSet.append(blocks[FINALIZED], newGS,
|
||||
@ -317,7 +289,7 @@ private void testAppend(String bpid, FsDatasetImpl dataSet, ExtendedBlock[] bloc
|
||||
}
|
||||
}
|
||||
|
||||
private void testClose(FsDatasetImpl dataSet, ExtendedBlock [] blocks) throws IOException {
|
||||
private void testClose(FsDatasetSpi<?> dataSet, ExtendedBlock [] blocks) throws IOException {
|
||||
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
|
||||
dataSet.recoverClose(blocks[FINALIZED], newGS,
|
||||
blocks[FINALIZED].getNumBytes()); // successful
|
||||
@ -544,28 +516,27 @@ public void testReplicaMapAfterDatanodeRestart() throws Exception {
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.
|
||||
getFSDataset(dn);
|
||||
ReplicaMap replicaMap = dataSet.volumeMap;
|
||||
|
||||
List<FsVolumeImpl> volumes = null;
|
||||
List<FsVolumeSpi> volumes = null;
|
||||
try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
|
||||
// number of volumes should be 2 - [data1, data2]
|
||||
assertEquals("number of volumes is wrong", 2, referredVols.size());
|
||||
volumes = new ArrayList<>(referredVols.size());
|
||||
for (FsVolumeSpi vol : referredVols) {
|
||||
volumes.add((FsVolumeImpl) vol);
|
||||
volumes.add(vol);
|
||||
}
|
||||
}
|
||||
ArrayList<String> bpList = new ArrayList<String>(Arrays.asList(
|
||||
cluster.getNamesystem(0).getBlockPoolId(),
|
||||
ArrayList<String> bpList = new ArrayList<>(Arrays.asList(
|
||||
cluster.getNamesystem(0).getBlockPoolId(),
|
||||
cluster.getNamesystem(1).getBlockPoolId()));
|
||||
|
||||
Assert.assertTrue("Cluster should have 2 block pools",
|
||||
bpList.size() == 2);
|
||||
|
||||
createReplicas(bpList, volumes, replicaMap);
|
||||
createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
|
||||
ReplicaMap oldReplicaMap = new ReplicaMap(this);
|
||||
oldReplicaMap.addAll(replicaMap);
|
||||
|
||||
oldReplicaMap.addAll(dataSet.volumeMap);
|
||||
|
||||
cluster.restartDataNode(0);
|
||||
cluster.waitActive();
|
||||
dn = cluster.getDataNodes().get(0);
|
||||
@ -622,48 +593,29 @@ private void testEqualityOfReplicaMap(ReplicaMap oldReplicaMap, ReplicaMap
|
||||
}
|
||||
}
|
||||
|
||||
private void createReplicas(List<String> bpList, List<FsVolumeImpl> volumes,
|
||||
ReplicaMap volumeMap) throws IOException {
|
||||
Assert.assertTrue("Volume map can't be null" , volumeMap != null);
|
||||
|
||||
private void createReplicas(List<String> bpList, List<FsVolumeSpi> volumes,
|
||||
FsDatasetTestUtils testUtils) throws IOException {
|
||||
// Here we create all different type of replicas and add it
|
||||
// to volume map.
|
||||
// Created all type of ReplicaInfo, each under Blkpool corresponding volume
|
||||
long id = 1; // This variable is used as both blockId and genStamp
|
||||
for (String bpId: bpList) {
|
||||
for (FsVolumeImpl volume: volumes) {
|
||||
ReplicaInfo finalizedReplica = new FinalizedReplica(id, 1, id, volume,
|
||||
DatanodeUtil.idToBlockDir(volume.getFinalizedDir(bpId), id));
|
||||
volumeMap.add(bpId, finalizedReplica);
|
||||
id++;
|
||||
|
||||
ReplicaInfo rbwReplica = new ReplicaBeingWritten(id, 1, id, volume,
|
||||
volume.getRbwDir(bpId), null, 100);
|
||||
volumeMap.add(bpId, rbwReplica);
|
||||
for (FsVolumeSpi volume: volumes) {
|
||||
ExtendedBlock eb = new ExtendedBlock(bpId, id, 1, id);
|
||||
testUtils.createFinalizedReplica(volume, eb);
|
||||
id++;
|
||||
|
||||
ReplicaInfo rwrReplica = new ReplicaWaitingToBeRecovered(id, 1, id,
|
||||
volume, volume.getRbwDir(bpId));
|
||||
volumeMap.add(bpId, rwrReplica);
|
||||
eb = new ExtendedBlock(bpId, id, 1, id);
|
||||
testUtils.createRBW(volume, eb);
|
||||
id++;
|
||||
|
||||
ReplicaInfo ripReplica = new ReplicaInPipeline(id, id, volume,
|
||||
volume.getTmpDir(bpId), 0);
|
||||
volumeMap.add(bpId, ripReplica);
|
||||
|
||||
eb = new ExtendedBlock(bpId, id, 1, id);
|
||||
testUtils.createReplicaWaitingToBeRecovered(volume, eb);
|
||||
id++;
|
||||
|
||||
eb = new ExtendedBlock(bpId, id, 1, id);
|
||||
testUtils.createReplicaInPipeline(volume, eb);
|
||||
id++;
|
||||
}
|
||||
}
|
||||
|
||||
for (String bpId: bpList) {
|
||||
for (ReplicaInfo replicaInfo: volumeMap.replicas(bpId)) {
|
||||
File parentFile = replicaInfo.getBlockFile().getParentFile();
|
||||
if (!parentFile.exists()) {
|
||||
if (!parentFile.mkdirs()) {
|
||||
throw new IOException("Failed to mkdirs " + parentFile);
|
||||
}
|
||||
}
|
||||
replicaInfo.getBlockFile().createNewFile();
|
||||
replicaInfo.getMetaFile().createNewFile();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user