HDFS-13958. Miscellaneous Improvements for FsVolumeSpi. Contributed by BELUGA BEHR.
This commit is contained in:
parent
f13e231025
commit
73c660b43f
@ -17,8 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
@ -26,7 +24,6 @@
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -46,7 +43,6 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
|
||||
@ -657,7 +653,7 @@ public ScanInfoVolumeReport call() throws IOException {
|
||||
perfTimer.start();
|
||||
throttleTimer.start();
|
||||
for (String bpid : bpList) {
|
||||
LinkedList<ScanInfo> report = new LinkedList<>();
|
||||
List<ScanInfo> report = new ArrayList<>(DEFAULT_MAP_SIZE);
|
||||
|
||||
perfTimer.reset().start();
|
||||
throttleTimer.reset().start();
|
||||
@ -720,16 +716,4 @@ private void accumulateTimeWaiting() {
|
||||
perfTimer.reset().start();
|
||||
}
|
||||
}
|
||||
|
||||
public enum BlockDirFilter implements FilenameFilter {
|
||||
INSTANCE;
|
||||
|
||||
@Override
|
||||
public boolean accept(File dir, String name) {
|
||||
return name.startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)
|
||||
|| name.startsWith(DataStorage.STORAGE_DIR_FINALIZED)
|
||||
|| name.startsWith(Block.BLOCK_FILE_PREFIX);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -22,7 +22,7 @@
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Collection;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
@ -32,9 +32,9 @@
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.common.FileRegion;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
@ -362,13 +362,13 @@ public long getBlockLength() {
|
||||
public File getMetaFile() {
|
||||
if (metaSuffix == null) {
|
||||
return null;
|
||||
} else if (blockSuffix == null) {
|
||||
return new File(new File(volume.getBaseURI()).getAbsolutePath(),
|
||||
metaSuffix);
|
||||
} else {
|
||||
return new File(new File(volume.getBaseURI()).getAbsolutePath(),
|
||||
blockSuffix + metaSuffix);
|
||||
}
|
||||
String fileSuffix = metaSuffix;
|
||||
if (blockSuffix != null) {
|
||||
fileSuffix = blockSuffix + metaSuffix;
|
||||
}
|
||||
return new File(new File(volume.getBaseURI()).getAbsolutePath(),
|
||||
fileSuffix);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -389,18 +389,12 @@ public FsVolumeSpi getVolume() {
|
||||
return volume;
|
||||
}
|
||||
|
||||
@Override // Comparable
|
||||
@Override
|
||||
public int compareTo(ScanInfo b) {
|
||||
if (blockId < b.blockId) {
|
||||
return -1;
|
||||
} else if (blockId == b.blockId) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
return Long.compare(this.blockId, b.blockId);
|
||||
}
|
||||
|
||||
@Override // Object
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
@ -411,9 +405,9 @@ public boolean equals(Object o) {
|
||||
return blockId == ((ScanInfo) o).blockId;
|
||||
}
|
||||
|
||||
@Override // Object
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)(blockId^(blockId>>>32));
|
||||
return Long.hashCode(this.blockId);
|
||||
}
|
||||
|
||||
public long getGenStamp() {
|
||||
@ -447,8 +441,8 @@ byte[] loadLastPartialChunkChecksum(File blockFile, File metaFile)
|
||||
* @param reportCompiler
|
||||
* @throws IOException
|
||||
*/
|
||||
LinkedList<ScanInfo> compileReport(String bpid,
|
||||
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
void compileReport(String bpid,
|
||||
Collection<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException, IOException;
|
||||
|
||||
/**
|
||||
|
@ -28,8 +28,8 @@
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
@ -46,38 +46,37 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.DF;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
|
||||
import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
|
||||
import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.BlockDirFilter;
|
||||
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.util.CloseableReferenceCount;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
import org.slf4j.Logger;
|
||||
@ -311,11 +310,8 @@ void setClosed() throws IOException {
|
||||
*/
|
||||
boolean checkClosed() {
|
||||
if (this.reference.getReferenceCount() > 0) {
|
||||
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
||||
FsDatasetImpl.LOG.debug(String.format(
|
||||
"The reference count for %s is %d, wait to be 0.",
|
||||
this, reference.getReferenceCount()));
|
||||
}
|
||||
FsDatasetImpl.LOG.debug("The reference count for {} is {}, wait to be 0.",
|
||||
this, reference.getReferenceCount());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -400,11 +396,10 @@ long getBlockPoolUsed(String bpid) throws IOException {
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public long getCapacity() {
|
||||
if (configuredCapacity < 0) {
|
||||
if (configuredCapacity < 0L) {
|
||||
long remaining = usage.getCapacity() - getReserved();
|
||||
return remaining > 0 ? remaining : 0;
|
||||
return Math.max(remaining, 0L);
|
||||
}
|
||||
|
||||
return configuredCapacity;
|
||||
}
|
||||
|
||||
@ -418,9 +413,9 @@ public void setCapacityForTesting(long capacity) {
|
||||
this.configuredCapacity = capacity;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Calculate the available space of the filesystem, excluding space reserved
|
||||
* for non-HDFS and space reserved for RBW
|
||||
* for non-HDFS and space reserved for RBW.
|
||||
*
|
||||
* @return the available number of bytes left in this filesystem. May be zero.
|
||||
*/
|
||||
@ -432,7 +427,7 @@ public long getAvailable() throws IOException {
|
||||
if (remaining > available) {
|
||||
remaining = available;
|
||||
}
|
||||
return (remaining > 0) ? remaining : 0;
|
||||
return Math.max(remaining, 0L);
|
||||
}
|
||||
|
||||
long getActualNonDfsUsed() throws IOException {
|
||||
@ -458,10 +453,8 @@ private long getRemainingReserved() throws IOException {
|
||||
public long getNonDfsUsed() throws IOException {
|
||||
long actualNonDfsUsed = getActualNonDfsUsed();
|
||||
long actualReserved = getReserved();
|
||||
if (actualNonDfsUsed < actualReserved) {
|
||||
return 0L;
|
||||
}
|
||||
return actualNonDfsUsed - actualReserved;
|
||||
long nonDfsUsed = actualNonDfsUsed - actualReserved;
|
||||
return Math.max(nonDfsUsed, 0L);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@ -503,7 +496,7 @@ public DF getUsageStats(Configuration conf) {
|
||||
try {
|
||||
return new DF(new File(currentDir.getParent()), conf);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Unable to get disk statistics for volume " + this);
|
||||
LOG.error("Unable to get disk statistics for volume {}", this, e);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
@ -525,11 +518,11 @@ public File getFinalizedDir(String bpid) throws IOException {
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a deep copy of the list of currently active BPIDs
|
||||
* Make a deep copy of the list of currently active BPIDs.
|
||||
*/
|
||||
@Override
|
||||
public String[] getBlockPoolList() {
|
||||
return bpSlices.keySet().toArray(new String[bpSlices.keySet().size()]);
|
||||
return bpSlices.keySet().toArray(new String[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -549,7 +542,7 @@ File createTmpFile(String bpid, Block b) throws IOException {
|
||||
|
||||
@Override
|
||||
public void reserveSpaceForReplica(long bytesToReserve) {
|
||||
if (bytesToReserve != 0) {
|
||||
if (bytesToReserve != 0L) {
|
||||
reservedForReplicas.addAndGet(bytesToReserve);
|
||||
recentReserved = bytesToReserve;
|
||||
}
|
||||
@ -557,17 +550,15 @@ public void reserveSpaceForReplica(long bytesToReserve) {
|
||||
|
||||
@Override
|
||||
public void releaseReservedSpace(long bytesToRelease) {
|
||||
if (bytesToRelease != 0) {
|
||||
|
||||
if (bytesToRelease != 0L) {
|
||||
long oldReservation, newReservation;
|
||||
do {
|
||||
oldReservation = reservedForReplicas.get();
|
||||
newReservation = oldReservation - bytesToRelease;
|
||||
if (newReservation < 0) {
|
||||
// Failsafe, this should never occur in practice, but if it does we
|
||||
// don't want to start advertising more space than we have available.
|
||||
newReservation = 0;
|
||||
}
|
||||
|
||||
// Fail-safe, this should never be less than zero in practice, but if it
|
||||
// does, do not advertise more space than is have available.
|
||||
newReservation = Math.max(newReservation, 0L);
|
||||
} while (!reservedForReplicas.compareAndSet(oldReservation,
|
||||
newReservation));
|
||||
}
|
||||
@ -679,20 +670,15 @@ private String getNextSubDir(String prev, File dir)
|
||||
FsVolumeImpl.this, dir, SubdirFilter.INSTANCE);
|
||||
cache = null;
|
||||
cacheMs = 0;
|
||||
if (children.size() == 0) {
|
||||
if (children.isEmpty()) {
|
||||
LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}",
|
||||
storageID, bpid, dir.getAbsolutePath());
|
||||
return null;
|
||||
}
|
||||
Collections.sort(children);
|
||||
String nextSubDir = nextSorted(children, prev);
|
||||
if (nextSubDir == null) {
|
||||
LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}",
|
||||
storageID, bpid, dir.getAbsolutePath());
|
||||
} else {
|
||||
LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " +
|
||||
"within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath());
|
||||
}
|
||||
LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} within {}",
|
||||
storageID, bpid, nextSubDir, dir.getAbsolutePath());
|
||||
return nextSubDir;
|
||||
}
|
||||
|
||||
@ -731,15 +717,12 @@ private List<String> getSubdirEntries() throws IOException {
|
||||
state.curFinalizedDir, state.curFinalizedSubDir).toFile();
|
||||
List<String> entries = fileIoProvider.listDirectory(
|
||||
FsVolumeImpl.this, dir, BlockFileFilter.INSTANCE);
|
||||
if (entries.size() == 0) {
|
||||
if (entries.isEmpty()) {
|
||||
entries = null;
|
||||
LOG.trace("getSubdirEntries({}, {}): no entries found in {}", storageID,
|
||||
bpid, dir.getAbsolutePath());
|
||||
} else {
|
||||
Collections.sort(entries);
|
||||
}
|
||||
if (entries == null) {
|
||||
LOG.trace("getSubdirEntries({}, {}): no entries found in {}",
|
||||
storageID, bpid, dir.getAbsolutePath());
|
||||
} else {
|
||||
LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}",
|
||||
storageID, bpid, entries.size(), dir.getAbsolutePath());
|
||||
}
|
||||
@ -872,10 +855,12 @@ FsVolumeImpl.this, getTempSaveFile()), "UTF-8"))) {
|
||||
public void load() throws IOException {
|
||||
File file = getSaveFile();
|
||||
this.state = READER.readValue(file);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("load({}, {}): loaded iterator {} from {}: {}", storageID,
|
||||
bpid, name, file.getAbsoluteFile(),
|
||||
WRITER.writeValueAsString(state));
|
||||
}
|
||||
}
|
||||
|
||||
File getSaveFile() {
|
||||
return new File(bpidDir, name + ".cursor");
|
||||
@ -956,15 +941,21 @@ ReplicaInfo addFinalizedBlock(String bpid, Block b, ReplicaInfo replicaInfo,
|
||||
long bytesReserved) throws IOException {
|
||||
releaseReservedSpace(bytesReserved);
|
||||
File dest = getBlockPoolSlice(bpid).addFinalizedBlock(b, replicaInfo);
|
||||
byte[] checksum = null;
|
||||
final byte[] checksum;
|
||||
// copy the last partial checksum if the replica is originally
|
||||
// in finalized or rbw state.
|
||||
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
|
||||
switch (replicaInfo.getState()) {
|
||||
case FINALIZED:
|
||||
FinalizedReplica finalized = (FinalizedReplica) replicaInfo;
|
||||
checksum = finalized.getLastPartialChunkChecksum();
|
||||
} else if (replicaInfo.getState() == ReplicaState.RBW) {
|
||||
break;
|
||||
case RBW:
|
||||
ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaInfo;
|
||||
checksum = rbw.getLastChecksumAndDataLen().getChecksum();
|
||||
break;
|
||||
default:
|
||||
checksum = null;
|
||||
break;
|
||||
}
|
||||
|
||||
return new ReplicaBuilder(ReplicaState.FINALIZED)
|
||||
@ -990,21 +981,19 @@ public VolumeCheckResult check(VolumeCheckContext ignored)
|
||||
}
|
||||
|
||||
void getVolumeMap(ReplicaMap volumeMap,
|
||||
final RamDiskReplicaTracker ramDiskReplicaMap)
|
||||
throws IOException {
|
||||
final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
|
||||
for (BlockPoolSlice s : bpSlices.values()) {
|
||||
s.getVolumeMap(volumeMap, ramDiskReplicaMap);
|
||||
}
|
||||
}
|
||||
|
||||
void getVolumeMap(String bpid, ReplicaMap volumeMap,
|
||||
final RamDiskReplicaTracker ramDiskReplicaMap)
|
||||
throws IOException {
|
||||
final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
|
||||
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
|
||||
}
|
||||
|
||||
long getNumBlocks() {
|
||||
long numBlocks = 0;
|
||||
long numBlocks = 0L;
|
||||
for (BlockPoolSlice s : bpSlices.values()) {
|
||||
numBlocks += s.getNumOfBlocks();
|
||||
}
|
||||
@ -1038,10 +1027,9 @@ void addBlockPool(String bpid, Configuration c, Timer timer)
|
||||
File bpdir = new File(currentDir, bpid);
|
||||
BlockPoolSlice bp;
|
||||
if (timer == null) {
|
||||
bp = new BlockPoolSlice(bpid, this, bpdir, c, new Timer());
|
||||
} else {
|
||||
bp = new BlockPoolSlice(bpid, this, bpdir, c, timer);
|
||||
timer = new Timer();
|
||||
}
|
||||
bp = new BlockPoolSlice(bpid, this, bpdir, c, timer);
|
||||
bpSlices.put(bpid, bp);
|
||||
}
|
||||
|
||||
@ -1137,7 +1125,6 @@ DatanodeStorage toDatanodeStorage() {
|
||||
return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, storageType);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public byte[] loadLastPartialChunkChecksum(
|
||||
File blockFile, File metaFile) throws IOException {
|
||||
@ -1313,11 +1300,10 @@ private File[] copyReplicaWithNewBlockIdAndGS(
|
||||
}
|
||||
|
||||
@Override
|
||||
public LinkedList<ScanInfo> compileReport(String bpid,
|
||||
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException, IOException {
|
||||
return compileReport(getFinalizedDir(bpid),
|
||||
getFinalizedDir(bpid), report, reportCompiler);
|
||||
public void compileReport(String bpid, Collection<ScanInfo> report,
|
||||
ReportCompiler reportCompiler) throws InterruptedException, IOException {
|
||||
compileReport(getFinalizedDir(bpid), getFinalizedDir(bpid), report,
|
||||
reportCompiler);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -1330,21 +1316,35 @@ public DataNodeVolumeMetrics getMetrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
||||
private LinkedList<ScanInfo> compileReport(File bpFinalizedDir,
|
||||
File dir, LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
/**
|
||||
* Filter for block file names stored on the file system volumes.
|
||||
*/
|
||||
public enum BlockDirFilter implements FilenameFilter {
|
||||
INSTANCE;
|
||||
|
||||
@Override
|
||||
public boolean accept(File dir, String name) {
|
||||
return name.startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)
|
||||
|| name.startsWith(DataStorage.STORAGE_DIR_FINALIZED)
|
||||
|| name.startsWith(Block.BLOCK_FILE_PREFIX);
|
||||
}
|
||||
}
|
||||
|
||||
private void compileReport(File bpFinalizedDir, File dir,
|
||||
Collection<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException {
|
||||
|
||||
reportCompiler.throttle();
|
||||
|
||||
List <String> fileNames;
|
||||
try {
|
||||
fileNames = fileIoProvider.listDirectory(
|
||||
this, dir, BlockDirFilter.INSTANCE);
|
||||
fileNames =
|
||||
fileIoProvider.listDirectory(this, dir, BlockDirFilter.INSTANCE);
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Exception occurred while compiling report: ", ioe);
|
||||
LOG.warn("Exception occurred while compiling report", ioe);
|
||||
// Volume error check moved to FileIoProvider.
|
||||
// Ignore this directory and proceed.
|
||||
return report;
|
||||
return;
|
||||
}
|
||||
Collections.sort(fileNames);
|
||||
|
||||
@ -1396,7 +1396,6 @@ private LinkedList<ScanInfo> compileReport(File bpFinalizedDir,
|
||||
verifyFileLocation(blockFile, bpFinalizedDir, blockId);
|
||||
report.add(new ScanInfo(blockId, blockFile, metaFile, this));
|
||||
}
|
||||
return report;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -17,15 +17,17 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
@ -41,21 +43,23 @@
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.common.FileRegion;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
|
||||
import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
import org.codehaus.jackson.annotate.JsonProperty;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.codehaus.jackson.map.ObjectReader;
|
||||
@ -63,11 +67,6 @@
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES;
|
||||
|
||||
/**
|
||||
* This class is used to create provided volumes.
|
||||
*/
|
||||
@ -227,7 +226,7 @@ public void shutdown(BlockListAsLongs blocksListsAsLongs) {
|
||||
// nothing to do!
|
||||
}
|
||||
|
||||
public void compileReport(LinkedList<ScanInfo> report,
|
||||
public void compileReport(Collection<ScanInfo> report,
|
||||
ReportCompiler reportCompiler)
|
||||
throws IOException, InterruptedException {
|
||||
/* refresh the aliasMap and return the list of blocks found.
|
||||
@ -240,9 +239,8 @@ public void compileReport(LinkedList<ScanInfo> report,
|
||||
BlockAliasMap.Reader<FileRegion> reader = aliasMap.getReader(null, bpid);
|
||||
for (FileRegion region : reader) {
|
||||
reportCompiler.throttle();
|
||||
report.add(new ScanInfo(region.getBlock().getBlockId(),
|
||||
providedVolume, region,
|
||||
region.getProvidedStorageLocation().getLength()));
|
||||
report.add(new ScanInfo(region.getBlock().getBlockId(), providedVolume,
|
||||
region, region.getProvidedStorageLocation().getLength()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -336,7 +334,7 @@ public long getNonDfsUsed() throws IOException {
|
||||
|
||||
@Override
|
||||
long getNumBlocks() {
|
||||
long numBlocks = 0;
|
||||
long numBlocks = 0L;
|
||||
for (ProvidedBlockPoolSlice s : bpSlices.values()) {
|
||||
numBlocks += s.getNumOfBlocks();
|
||||
}
|
||||
@ -381,7 +379,7 @@ private static class ProvidedBlockIteratorState {
|
||||
iterStartMs = Time.now();
|
||||
lastSavedMs = iterStartMs;
|
||||
atEnd = false;
|
||||
lastBlockId = -1;
|
||||
lastBlockId = -1L;
|
||||
}
|
||||
|
||||
// The wall-clock ms since the epoch at which this iterator was last saved.
|
||||
@ -611,14 +609,12 @@ void deleteBPDirectories(String bpid, boolean force) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public LinkedList<ScanInfo> compileReport(String bpid,
|
||||
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException, IOException {
|
||||
LOG.info("Compiling report for volume: " + this + " bpid " + bpid);
|
||||
public void compileReport(String bpid, Collection<ScanInfo> report,
|
||||
ReportCompiler reportCompiler) throws InterruptedException, IOException {
|
||||
LOG.info("Compiling report for volume: {}; bpid: {}", this, bpid);
|
||||
if (bpSlices.containsKey(bpid)) {
|
||||
bpSlices.get(bpid).compileReport(report, reportCompiler);
|
||||
}
|
||||
return report;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -25,10 +25,11 @@
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
@ -183,8 +184,8 @@ private class BInfo implements ReplicaInPipeline {
|
||||
private boolean pinned = false;
|
||||
BInfo(String bpid, Block b, boolean forWriting) throws IOException {
|
||||
theBlock = new Block(b);
|
||||
if (theBlock.getNumBytes() < 0) {
|
||||
theBlock.setNumBytes(0);
|
||||
if (theBlock.getNumBytes() < 0L) {
|
||||
theBlock.setNumBytes(0L);
|
||||
}
|
||||
if (!getStorage(theBlock).alloc(bpid, theBlock.getNumBytes())) {
|
||||
// expected length - actual length may
|
||||
@ -260,7 +261,7 @@ synchronized void finalizeBlock(String bpid, long finalSize)
|
||||
// We had allocated the expected length when block was created;
|
||||
// adjust if necessary
|
||||
long extraLen = finalSize - theBlock.getNumBytes();
|
||||
if (extraLen > 0) {
|
||||
if (extraLen > 0L) {
|
||||
if (!getStorage(theBlock).alloc(bpid, extraLen)) {
|
||||
DataNode.LOG.warn("Lack of free storage on a block alloc");
|
||||
throw new IOException("Creating block, no free space available");
|
||||
@ -402,7 +403,8 @@ public void stopWriter(long xceiverStopTimeout) throws IOException {
|
||||
* to {@link BlockPoolSlice}
|
||||
*/
|
||||
private static class SimulatedBPStorage {
|
||||
private long used; // in bytes
|
||||
// in bytes
|
||||
private long used;
|
||||
private final Map<Block, BInfo> blockMap = new TreeMap<>();
|
||||
|
||||
long getUsed() {
|
||||
@ -422,7 +424,7 @@ Map<Block, BInfo> getBlockMap() {
|
||||
}
|
||||
|
||||
SimulatedBPStorage() {
|
||||
used = 0;
|
||||
used = 0L;
|
||||
}
|
||||
}
|
||||
|
||||
@ -447,7 +449,7 @@ long getCapacity() {
|
||||
}
|
||||
|
||||
synchronized long getUsed() {
|
||||
long used = 0;
|
||||
long used = 0L;
|
||||
for (SimulatedBPStorage bpStorage : map.values()) {
|
||||
used += bpStorage.getUsed();
|
||||
}
|
||||
@ -635,10 +637,9 @@ public byte[] loadLastPartialChunkChecksum(
|
||||
}
|
||||
|
||||
@Override
|
||||
public LinkedList<ScanInfo> compileReport(String bpid,
|
||||
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
public void compileReport(String bpid,
|
||||
Collection<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException, IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -662,7 +663,6 @@ public VolumeCheckResult check(VolumeCheckContext context)
|
||||
private final String datanodeUuid;
|
||||
private final DataNode datanode;
|
||||
|
||||
|
||||
public SimulatedFSDataset(DataStorage storage, Configuration conf) {
|
||||
this(null, storage, conf);
|
||||
}
|
||||
@ -792,12 +792,12 @@ public synchronized Map<DatanodeStorage, BlockListAsLongs> getBlockReports(
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
public List<Long> getCacheReport(String bpid) {
|
||||
return new LinkedList<Long>();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getCapacity() {
|
||||
long total = 0;
|
||||
long total = 0L;
|
||||
for (SimulatedStorage storage : storages) {
|
||||
total += storage.getCapacity();
|
||||
}
|
||||
@ -806,7 +806,7 @@ public long getCapacity() {
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getDfsUsed() {
|
||||
long total = 0;
|
||||
long total = 0L;
|
||||
for (SimulatedStorage storage : storages) {
|
||||
total += storage.getUsed();
|
||||
}
|
||||
@ -815,7 +815,7 @@ public long getDfsUsed() {
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getBlockPoolUsed(String bpid) throws IOException {
|
||||
long total = 0;
|
||||
long total = 0L;
|
||||
for (SimulatedStorage storage : storages) {
|
||||
total += storage.getBlockPoolUsed(bpid);
|
||||
}
|
||||
@ -824,8 +824,7 @@ public long getBlockPoolUsed(String bpid) throws IOException {
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getRemaining() {
|
||||
|
||||
long total = 0;
|
||||
long total = 0L;
|
||||
for (SimulatedStorage storage : storages) {
|
||||
total += storage.getFree();
|
||||
}
|
||||
@ -834,7 +833,6 @@ public long getRemaining() {
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public int getNumFailedVolumes() {
|
||||
|
||||
int total = 0;
|
||||
for (SimulatedStorage storage : storages) {
|
||||
total += storage.getNumFailedVolumes();
|
||||
@ -849,12 +847,12 @@ public String[] getFailedStorageLocations() {
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getLastVolumeFailureDate() {
|
||||
return 0;
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getEstimatedCapacityLostTotal() {
|
||||
return 0;
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@ -864,27 +862,27 @@ public VolumeFailureSummary getVolumeFailureSummary() {
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getCacheUsed() {
|
||||
return 0l;
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getCacheCapacity() {
|
||||
return 0l;
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getNumBlocksCached() {
|
||||
return 0l;
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumBlocksFailedToCache() {
|
||||
return 0l;
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumBlocksFailedToUncache() {
|
||||
return 0l;
|
||||
return 0L;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -931,7 +929,7 @@ public synchronized String getReplicaString(String bpid, long blockId) {
|
||||
} catch (IOException ioe) {
|
||||
// Ignore
|
||||
}
|
||||
return r == null? "null": r.toString();
|
||||
return Objects.toString(r);
|
||||
}
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
@ -1013,8 +1011,8 @@ public boolean contains(ExtendedBlock block) {
|
||||
*
|
||||
* @param b The block to check.
|
||||
* @param minLength The minimum length that the block must have. May be 0.
|
||||
* @param state If this is null, it is ignored. If it is non-null, we
|
||||
* will check that the replica has this state.
|
||||
* @param state If this is null, it is ignored. If it is non-null, we will
|
||||
* check that the replica has this state.
|
||||
*
|
||||
* @throws ReplicaNotFoundException If the replica is not found
|
||||
*
|
||||
@ -1159,7 +1157,6 @@ protected synchronized InputStream getBlockInputStream(ExtendedBlock b)
|
||||
if (binfo == null) {
|
||||
throw new IOException("No such Block " + b);
|
||||
}
|
||||
|
||||
return binfo.getIStream();
|
||||
}
|
||||
|
||||
@ -1199,14 +1196,11 @@ public void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
|
||||
|
||||
@Override // FsDatasetSpi
|
||||
public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
|
||||
ReplicaOutputStreams stream,
|
||||
int checksumSize)
|
||||
throws IOException {
|
||||
ReplicaOutputStreams stream, int checksumSize) throws IOException {
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulated input and output streams
|
||||
*
|
||||
* Simulated input and output streams.
|
||||
*/
|
||||
static private class SimulatedInputStream extends java.io.InputStream {
|
||||
final long length; // bytes
|
||||
@ -1215,7 +1209,7 @@ static private class SimulatedInputStream extends java.io.InputStream {
|
||||
Block theBlock = null;
|
||||
|
||||
/**
|
||||
* An input stream of size l with repeated bytes
|
||||
* An input stream of size l with repeated bytes.
|
||||
* @param l size of the stream
|
||||
* @param iRepeatedData byte that is repeated in the stream
|
||||
*/
|
||||
@ -1254,7 +1248,6 @@ public int read() throws IOException {
|
||||
|
||||
@Override
|
||||
public int read(byte[] b) throws IOException {
|
||||
|
||||
if (b == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
@ -1280,7 +1273,6 @@ public int read(byte[] b) throws IOException {
|
||||
/**
|
||||
* This class implements an output stream that merely throws its data away, but records its
|
||||
* length.
|
||||
*
|
||||
*/
|
||||
static private class SimulatedOutputStream extends OutputStream {
|
||||
long length = 0;
|
||||
@ -1316,17 +1308,13 @@ public void write(byte[] b) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] b,
|
||||
int off,
|
||||
int len) throws IOException {
|
||||
public void write(byte[] b, int off, int len) throws IOException {
|
||||
length += len;
|
||||
}
|
||||
}
|
||||
|
||||
private ObjectName mbeanName;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Register the FSDataset MBean using the name
|
||||
* "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
|
||||
|
@ -35,7 +35,6 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.Executors;
|
||||
@ -924,10 +923,9 @@ public byte[] loadLastPartialChunkChecksum(File blockFile, File metaFile)
|
||||
}
|
||||
|
||||
@Override
|
||||
public LinkedList<ScanInfo> compileReport(String bpid,
|
||||
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
public void compileReport(String bpid,
|
||||
Collection<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException, IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -22,14 +22,14 @@
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.DF;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
@ -117,10 +117,8 @@ public byte[] loadLastPartialChunkChecksum(
|
||||
}
|
||||
|
||||
@Override
|
||||
public LinkedList<ScanInfo> compileReport(String bpid,
|
||||
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException, IOException {
|
||||
return null;
|
||||
public void compileReport(String bpid, Collection<ScanInfo> report,
|
||||
ReportCompiler reportCompiler) throws InterruptedException, IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
Loading…
Reference in New Issue
Block a user