HADOOP-10682. Replace FsDatasetImpl object lock with a separate lock object. (Chen Liang)

This commit is contained in:
Arpit Agarwal 2016-08-08 12:02:53 -07:00
parent 625585950a
commit 8c0638471f
10 changed files with 535 additions and 446 deletions

View File

@ -115,6 +115,7 @@
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.net.DomainPeerServer;
@ -2877,7 +2878,7 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b,
final BlockConstructionStage stage;
//get replica information
synchronized(data) {
try(AutoCloseableLock lock = data.acquireDatasetLock()) {
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
b.getBlockId());
if (null == storedBlock) {

View File

@ -44,6 +44,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -583,7 +584,7 @@ private void scan() {
Map<String, ScanInfo[]> diskReport = getDiskReport();
// Hold FSDataset lock to prevent further changes to the block map
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
String bpid = entry.getKey();
ScanInfo[] blockpoolReport = entry.getValue();

View File

@ -22,6 +22,7 @@
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus
@ -454,7 +455,7 @@ private Map<String, FsVolumeSpi> getStorageIDToVolumeMap()
Map<String, FsVolumeSpi> pathMap = new HashMap<>();
FsDatasetSpi.FsVolumeReferences references;
try {
synchronized (this.dataset) {
try(AutoCloseableLock lock = this.dataset.acquireDatasetLock()) {
references = this.dataset.getFsVolumeReferences();
for (int ndx = 0; ndx < references.size(); ndx++) {
FsVolumeSpi vol = references.get(ndx);

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -639,4 +640,9 @@ ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
*/
ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block,
FsVolumeSpi destination) throws IOException;
/**
* Acquire the lock of the data set.
*/
AutoCloseableLock acquireDatasetLock();
}

View File

@ -60,6 +60,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
@ -175,14 +176,18 @@ public StorageReport[] getStorageReports(String bpid)
}
@Override
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
public FsVolumeImpl getVolume(final ExtendedBlock b) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final ReplicaInfo r =
volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
return r != null ? (FsVolumeImpl) r.getVolume() : null;
}
}
@Override // FsDatasetSpi
public synchronized Block getStoredBlock(String bpid, long blkid)
public Block getStoredBlock(String bpid, long blkid)
throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
File blockfile = getFile(bpid, blkid, false);
if (blockfile == null) {
return null;
@ -191,6 +196,7 @@ public synchronized Block getStoredBlock(String bpid, long blkid)
final long gs = FsDatasetUtil.parseGenerationStamp(blockfile, metafile);
return new Block(blkid, blockfile.length(), gs);
}
}
/**
@ -259,6 +265,8 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
private boolean blockPinningEnabled;
private final int maxDataLength;
private final AutoCloseableLock datasetLock;
/**
* An FSDataset has a directory where it loads its data files.
*/
@ -269,6 +277,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
this.dataStorage = storage;
this.conf = conf;
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
this.datasetLock = new AutoCloseableLock();
// The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate.
volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();
@ -341,6 +350,11 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
}
@Override
public AutoCloseableLock acquireDatasetLock() {
return datasetLock.acquire();
}
/**
* Gets initial volume failure information for all volumes that failed
* immediately at startup. The method works by determining the set difference
@ -375,10 +389,11 @@ private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
* Activate a volume to serve requests.
* @throws IOException if the storage UUID already exists.
*/
private synchronized void activateVolume(
private void activateVolume(
ReplicaMap replicaMap,
Storage.StorageDirectory sd, StorageType storageType,
FsVolumeReference ref) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
if (dnStorage != null) {
final String errorMsg = String.format(
@ -395,6 +410,7 @@ private synchronized void activateVolume(
asyncDiskService.addVolume(sd.getCurrentDir());
volumes.addVolume(ref);
}
}
private void addVolume(Collection<StorageLocation> dataLocations,
Storage.StorageDirectory sd) throws IOException {
@ -488,7 +504,7 @@ public void removeVolumes(Set<File> volumesToRemove, boolean clearFailure) {
Map<String, List<ReplicaInfo>> blkToInvalidate = new HashMap<>();
List<String> storageToRemove = new ArrayList<>();
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
final File absRoot = sd.getRoot().getAbsoluteFile();
@ -534,7 +550,7 @@ public void removeVolumes(Set<File> volumesToRemove, boolean clearFailure) {
}
}
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
for(String storageUuid : storageToRemove) {
storageMap.remove(storageUuid);
}
@ -743,7 +759,7 @@ private File getBlockFileNoExistsCheck(ExtendedBlock b,
boolean touch)
throws IOException {
final File f;
synchronized(this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
f = getFile(b.getBlockPoolId(), b.getLocalBlock().getBlockId(), touch);
}
if (f == null) {
@ -809,14 +825,16 @@ private ReplicaInfo getReplicaInfo(String bpid, long blkid)
* Returns handles to the block file and its metadata file
*/
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
long blkOffset, long metaOffset) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo info = getReplicaInfo(b);
FsVolumeReference ref = info.getVolume().obtainReference();
try {
InputStream blockInStream = openAndSeek(info.getBlockFile(), blkOffset);
try {
InputStream metaInStream = openAndSeek(info.getMetaFile(), metaOffset);
InputStream metaInStream =
openAndSeek(info.getMetaFile(), metaOffset);
return new ReplicaInputStreams(blockInStream, metaInStream, ref);
} catch (IOException e) {
IOUtils.cleanup(null, blockInStream);
@ -827,6 +845,7 @@ public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
throw e;
}
}
}
private static FileInputStream openAndSeek(File file, long offset)
throws IOException {
@ -943,7 +962,7 @@ public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
}
FsVolumeReference volumeRef = null;
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
volumeRef = volumes.getNextVolume(targetStorageType, block.getNumBytes());
}
try {
@ -985,7 +1004,7 @@ private ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo,
newReplicaInfo.setNumBytes(blockFiles[1].length());
// Finalize the copied files
newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// Increment numBlocks here as this block moved without knowing to BPS
FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
@ -1015,7 +1034,7 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi
FsVolumeReference volumeRef = null;
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
volumeRef = destination.obtainReference();
}
@ -1143,8 +1162,9 @@ static private void truncateBlock(File blockFile, File metaFile,
@Override // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
public ReplicaHandler append(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// If the block was successfully finalized because all packets
// were successfully processed at the Datanode but the ack for
// some of the packets were not received by the client. The client
@ -1171,7 +1191,8 @@ public synchronized ReplicaHandler append(ExtendedBlock b,
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaBeingWritten replica = null;
try {
replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
replica = append(b.getBlockPoolId(),
(FinalizedReplica) replicaInfo, newGS,
b.getNumBytes());
} catch (IOException e) {
IOUtils.cleanup(null, ref);
@ -1179,6 +1200,7 @@ public synchronized ReplicaHandler append(ExtendedBlock b,
}
return new ReplicaHandler(replica, ref);
}
}
/** Append to a finalized replica
* Change a finalized replica to be a RBW replica and
@ -1192,14 +1214,15 @@ public synchronized ReplicaHandler append(ExtendedBlock b,
* @throws IOException if moving the replica from finalized directory
* to rbw directory fails
*/
private synchronized ReplicaBeingWritten append(String bpid,
private ReplicaBeingWritten append(String bpid,
FinalizedReplica replicaInfo, long newGS, long estimateBlockLen)
throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, replicaInfo.getBlockId());
// If there are any hardlinks to the block, break them. This ensures we are
// not appending to a file that is part of a previous/ directory.
// If there are any hardlinks to the block, break them. This ensures we
// are not appending to a file that is part of a previous/ directory.
replicaInfo.breakHardLinksIfNeeded();
// construct a RBW replica with the new GS
@ -1253,6 +1276,7 @@ private synchronized ReplicaBeingWritten append(String bpid,
v.reserveSpaceForReplica(bytesReserved);
return newReplicaInfo;
}
}
private static class MustStopExistingWriter extends Exception {
private final ReplicaInPipeline rip;
@ -1321,7 +1345,7 @@ public ReplicaHandler recoverAppend(
while (true) {
try {
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
@ -1353,7 +1377,7 @@ public Replica recoverClose(ExtendedBlock b, long newGS,
LOG.info("Recover failed close " + b);
while (true) {
try {
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// check replica's state
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
// bump the replica's GS
@ -1400,9 +1424,10 @@ private void bumpReplicaGS(ReplicaInfo replicaInfo,
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler createRbw(
public ReplicaHandler createRbw(
StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getBlockId());
if (replicaInfo != null) {
@ -1452,11 +1477,13 @@ public synchronized ReplicaHandler createRbw(
throw e;
}
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(),
ReplicaBeingWritten newReplicaInfo =
new ReplicaBeingWritten(b.getBlockId(),
b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
return new ReplicaHandler(newReplicaInfo, ref);
}
}
@Override // FsDatasetSpi
public ReplicaHandler recoverRbw(
@ -1466,7 +1493,7 @@ public ReplicaHandler recoverRbw(
while (true) {
try {
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check the replica's state
@ -1487,9 +1514,10 @@ public ReplicaHandler recoverRbw(
}
}
private synchronized ReplicaHandler recoverRbwImpl(ReplicaBeingWritten rbw,
private ReplicaHandler recoverRbwImpl(ReplicaBeingWritten rbw,
ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// check generation stamp
long replicaGenerationStamp = rbw.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() ||
@ -1530,10 +1558,12 @@ private synchronized ReplicaHandler recoverRbwImpl(ReplicaBeingWritten rbw,
}
return new ReplicaHandler(rbw, ref);
}
}
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline convertTemporaryToRbw(
public ReplicaInPipeline convertTemporaryToRbw(
final ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
@ -1541,7 +1571,7 @@ public synchronized ReplicaInPipeline convertTemporaryToRbw(
+ visible);
final ReplicaInPipeline temp;
{
// get replica
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
if (r == null) {
@ -1554,7 +1584,7 @@ public synchronized ReplicaInPipeline convertTemporaryToRbw(
"r.getState() != ReplicaState.TEMPORARY, r=" + r);
}
temp = (ReplicaInPipeline) r;
}
// check generation stamp
if (temp.getGenerationStamp() != expectedGs) {
throw new ReplicaAlreadyExistsException(
@ -1591,6 +1621,7 @@ public synchronized ReplicaInPipeline convertTemporaryToRbw(
volumeMap.add(b.getBlockPoolId(), rbw);
return rbw;
}
}
@Override // FsDatasetSpi
public ReplicaHandler createTemporary(
@ -1599,7 +1630,7 @@ public ReplicaHandler createTemporary(
long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
ReplicaInfo lastFoundReplicaInfo = null;
do {
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo currentReplicaInfo =
volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (currentReplicaInfo == lastFoundReplicaInfo) {
@ -1678,7 +1709,8 @@ public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams strea
* Complete the block write!
*/
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
public void finalizeBlock(ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
if (Thread.interrupted()) {
// Don't allow data modifications from interrupted threads
throw new IOException("Cannot finalize block from Interrupted Thread");
@ -1691,13 +1723,15 @@ public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
}
finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
}
private synchronized FinalizedReplica finalizeReplica(String bpid,
private FinalizedReplica finalizeReplica(String bpid,
ReplicaInfo replicaInfo) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
FinalizedReplica newReplicaInfo = null;
if (replicaInfo.getState() == ReplicaState.RUR &&
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() ==
ReplicaState.FINALIZED) {
((ReplicaUnderRecovery) replicaInfo).getOriginalReplica().getState()
== ReplicaState.FINALIZED) {
newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery) replicaInfo).getOriginalReplica();
} else {
@ -1710,11 +1744,13 @@ private synchronized FinalizedReplica finalizeReplica(String bpid,
File dest = v.addFinalizedBlock(
bpid, replicaInfo, f, replicaInfo.getBytesReserved());
newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
newReplicaInfo =
new FinalizedReplica(replicaInfo, v, dest.getParentFile());
if (v.isTransientStorage()) {
releaseLockedMemory(
replicaInfo.getOriginalBytesReserved() - replicaInfo.getNumBytes(),
replicaInfo.getOriginalBytesReserved()
- replicaInfo.getNumBytes(),
false);
ramDiskReplicaTracker.addReplica(
bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes());
@ -1725,15 +1761,18 @@ private synchronized FinalizedReplica finalizeReplica(String bpid,
return newReplicaInfo;
}
}
/**
* Remove the temporary block file (if any)
*/
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
public void unfinalizeBlock(ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getLocalBlock());
if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
if (replicaInfo != null
&& replicaInfo.getState() == ReplicaState.TEMPORARY) {
// remove from volumeMap
volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
@ -1743,7 +1782,9 @@ public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
LOG.warn("Block " + b + " unfinalized and removed. ");
}
if (replicaInfo.getVolume().isTransientStorage()) {
ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(),
b.getBlockId(), true);
}
}
}
}
@ -1791,7 +1832,7 @@ public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
}
synchronized(this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
switch(b.getState()) {
case FINALIZED:
@ -1824,7 +1865,8 @@ public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
public List<FinalizedReplica> getFinalizedBlocks(String bpid) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ArrayList<FinalizedReplica> finalized =
new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
@ -1834,12 +1876,15 @@ public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
}
return finalized;
}
}
/**
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) {
public List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(
String bpid) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ArrayList<FinalizedReplica> finalized =
new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
@ -1850,6 +1895,7 @@ public synchronized List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage
}
return finalized;
}
}
/**
* Check if a block is valid.
@ -1924,7 +1970,7 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
File validateBlockFile(String bpid, long blockId) {
//Should we check for metadata file too?
final File f;
synchronized(this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
f = getFile(bpid, blockId, false);
}
@ -1973,7 +2019,7 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
for (int i = 0; i < invalidBlks.length; i++) {
final File f;
final FsVolumeImpl v;
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
if (info == null) {
// It is okay if the block is not found -- it may be deleted earlier.
@ -2084,7 +2130,7 @@ private void cacheBlock(String bpid, long blockId) {
long length, genstamp;
Executor volumeExecutor;
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo info = volumeMap.get(bpid, blockId);
boolean success = false;
try {
@ -2151,10 +2197,12 @@ public boolean isCached(String bpid, long blockId) {
}
@Override // FsDatasetSpi
public synchronized boolean contains(final ExtendedBlock block) {
public boolean contains(final ExtendedBlock block) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final long blockId = block.getLocalBlock().getBlockId();
return getFile(block.getBlockPoolId(), blockId, false) != null;
}
}
/**
* Turn the block identifier into a filename
@ -2279,7 +2327,7 @@ public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FsVolumeSpi vol) throws IOException {
Block corruptBlock = null;
ReplicaInfo memBlockInfo;
synchronized (this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
memBlockInfo = volumeMap.get(bpid, blockId);
if (memBlockInfo != null && memBlockInfo.getState() != ReplicaState.FINALIZED) {
// Block is not finalized - ignore the difference
@ -2435,10 +2483,12 @@ public ReplicaInfo getReplica(String bpid, long blockId) {
}
@Override
public synchronized String getReplicaString(String bpid, long blockId) {
public String getReplicaString(String bpid, long blockId) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final Replica r = volumeMap.get(bpid, blockId);
return r == null ? "null" : r.toString();
}
}
@Override // FsDatasetSpi
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
@ -2530,11 +2580,12 @@ static ReplicaRecoveryInfo initReplicaRecoveryImpl(String bpid, ReplicaMap map,
}
@Override // FsDatasetSpi
public synchronized Replica updateReplicaUnderRecovery(
public Replica updateReplicaUnderRecovery(
final ExtendedBlock oldBlock,
final long recoveryId,
final long newBlockId,
final long newlength) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
//get replica
final String bpid = oldBlock.getBlockPoolId();
final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
@ -2592,6 +2643,7 @@ public synchronized Replica updateReplicaUnderRecovery(
return finalized;
}
}
private FinalizedReplica updateReplicaUnderRecovery(
String bpid,
@ -2668,8 +2720,9 @@ private File[] copyReplicaWithNewBlockIdAndGS(
}
@Override // FsDatasetSpi
public synchronized long getReplicaVisibleLength(final ExtendedBlock block)
public long getReplicaVisibleLength(final ExtendedBlock block)
throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final Replica replica = getReplicaInfo(block.getBlockPoolId(),
block.getBlockId());
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
@ -2679,12 +2732,13 @@ public synchronized long getReplicaVisibleLength(final ExtendedBlock block)
}
return replica.getVisibleLength();
}
}
@Override
public void addBlockPool(String bpid, Configuration conf)
throws IOException {
LOG.info("Adding block pool " + bpid);
synchronized(this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
volumes.addBlockPool(bpid, conf);
volumeMap.initBlockPool(bpid);
}
@ -2692,12 +2746,15 @@ public void addBlockPool(String bpid, Configuration conf)
}
@Override
public synchronized void shutdownBlockPool(String bpid) {
public void shutdownBlockPool(String bpid) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
LOG.info("Removing block pool " + bpid);
Map<DatanodeStorage, BlockListAsLongs> blocksPerVolume = getBlockReports(bpid);
Map<DatanodeStorage, BlockListAsLongs> blocksPerVolume
= getBlockReports(bpid);
volumeMap.cleanUpBlockPool(bpid);
volumes.removeBlockPool(bpid, blocksPerVolume);
}
}
/**
* Class for representing the Datanode volume information
@ -2759,14 +2816,16 @@ public Map<String, Object> getVolumeInfoMap() {
}
@Override //FsDatasetSpi
public synchronized void deleteBlockPool(String bpid, boolean force)
public void deleteBlockPool(String bpid, boolean force)
throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
if (!force) {
for (FsVolumeImpl volume : curVolumes) {
try (FsVolumeReference ref = volume.obtainReference()) {
if (!volume.isBPDirEmpty(bpid)) {
LOG.warn(bpid + " has some block files, cannot delete unless forced");
LOG.warn(bpid
+ " has some block files, cannot delete unless forced");
throw new IOException("Cannot delete block pool, "
+ "it contains some block files");
}
@ -2783,11 +2842,12 @@ public synchronized void deleteBlockPool(String bpid, boolean force)
}
}
}
}
@Override // FsDatasetSpi
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
synchronized(this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final Replica replica = volumeMap.get(block.getBlockPoolId(),
block.getBlockId());
if (replica == null) {
@ -2838,7 +2898,7 @@ public void clearRollingUpgradeMarker(String bpid) throws IOException {
@Override
public void onCompleteLazyPersist(String bpId, long blockId,
long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
synchronized (FsDatasetImpl.this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length()
@ -2972,7 +3032,7 @@ private boolean saveNextReplica() {
try {
block = ramDiskReplicaTracker.dequeueNextReplicaToPersist();
if (block != null) {
synchronized (FsDatasetImpl.this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
replicaInfo = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
// If replicaInfo is null, the block was either deleted before
@ -3042,7 +3102,7 @@ public void evictBlocks(long bytesNeeded) throws IOException {
long blockFileUsed, metaFileUsed;
final String bpid = replicaState.getBlockPoolId();
synchronized (FsDatasetImpl.this) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
replicaInfo = getReplicaInfo(replicaState.getBlockPoolId(),
replicaState.getBlockId());
Preconditions.checkState(replicaInfo.getVolume().isTransientStorage());
@ -3219,17 +3279,20 @@ public void setTimer(Timer newTimer) {
this.timer = newTimer;
}
synchronized void stopAllDataxceiverThreads(FsVolumeImpl volume) {
void stopAllDataxceiverThreads(FsVolumeImpl volume) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (String blockPoolId : volumeMap.getBlockPoolList()) {
Collection<ReplicaInfo> replicas = volumeMap.replicas(blockPoolId);
for (ReplicaInfo replicaInfo : replicas) {
if (replicaInfo instanceof ReplicaInPipeline
&& replicaInfo.getVolume().equals(volume)) {
ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) replicaInfo;
ReplicaInPipeline replicaInPipeline
= (ReplicaInPipeline) replicaInfo;
replicaInPipeline.interruptThread();
}
}
}
}
}
}

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -304,7 +305,7 @@ void onMetaFileDeletion(String bpid, long value) {
private void decDfsUsedAndNumBlocks(String bpid, long value,
boolean blockFileDeleted) {
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
@ -316,7 +317,7 @@ private void decDfsUsedAndNumBlocks(String bpid, long value,
}
void incDfsUsedAndNumBlocks(String bpid, long value) {
synchronized (dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
@ -326,7 +327,7 @@ void incDfsUsedAndNumBlocks(String bpid, long value) {
}
void incDfsUsed(String bpid, long value) {
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
@ -337,7 +338,7 @@ void incDfsUsed(String bpid, long value) {
@VisibleForTesting
public long getDfsUsed() throws IOException {
long dfsUsed = 0;
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
for(BlockPoolSlice s : bpSlices.values()) {
dfsUsed += s.getDfsUsed();
}

View File

@ -39,6 +39,7 @@
import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -115,6 +116,9 @@ public static byte simulatedByte(Block b, long offsetInBlk) {
DatanodeStorage.State.NORMAL;
static final byte[] nullCrcFileData;
private final AutoCloseableLock datasetLock;
static {
DataChecksum checksum = DataChecksum.newDataChecksum(
DataChecksum.Type.NULL, 16*1024 );
@ -550,6 +554,7 @@ public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration
conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE));
this.volume = new SimulatedVolume(this.storage);
this.datasetLock = new AutoCloseableLock();
}
public synchronized void injectBlocks(String bpid,
@ -1366,5 +1371,9 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
return null;
}
@Override
public AutoCloseableLock acquireDatasetLock() {
return datasetLock.acquire();
}
}

View File

@ -66,6 +66,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@ -725,7 +726,7 @@ public void run() {
final RecoveringBlock recoveringBlock = new RecoveringBlock(
block.getBlock(), locations, block.getBlock()
.getGenerationStamp() + 1);
synchronized (dataNode.data) {
try(AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
Thread.sleep(2000);
dataNode.initReplicaRecovery(recoveringBlock);
}

View File

@ -52,6 +52,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -113,7 +114,7 @@ private List<LocatedBlock> createFile(String fileNamePrefix,
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
synchronized (fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
@ -138,7 +139,7 @@ private long truncateBlockFile() throws IOException {
/** Delete a block file */
private long deleteBlockFile() {
synchronized(fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
@ -154,7 +155,7 @@ private long deleteBlockFile() {
/** Delete block meta file */
private long deleteMetaFile() {
synchronized(fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File file = b.getMetaFile();
// Delete a metadata file
@ -173,7 +174,7 @@ private long deleteMetaFile() {
* @throws IOException
*/
private void duplicateBlock(long blockId) throws IOException {
synchronized (fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
try (FsDatasetSpi.FsVolumeReferences volumes =
fds.getFsVolumeReferences()) {

View File

@ -23,6 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
@ -450,4 +451,8 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
return null;
}
@Override
public AutoCloseableLock acquireDatasetLock() {
return null;
}
}