HDFS-7060. Avoid taking locks when sending heartbeats from the DataNode. Contributed by Jiandan Yang.
This commit is contained in:
parent
51e882d5c9
commit
bb8a6eea52
@ -153,7 +153,8 @@ public DatanodeStorage getStorage(final String storageUuid) {
|
|||||||
public StorageReport[] getStorageReports(String bpid)
|
public StorageReport[] getStorageReports(String bpid)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
List<StorageReport> reports;
|
List<StorageReport> reports;
|
||||||
synchronized (statsLock) {
|
// Volumes are the references from a copy-on-write snapshot, so the
|
||||||
|
// access on the volume metrics doesn't require an additional lock.
|
||||||
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
|
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
|
||||||
reports = new ArrayList<>(curVolumes.size());
|
reports = new ArrayList<>(curVolumes.size());
|
||||||
for (FsVolumeImpl volume : curVolumes) {
|
for (FsVolumeImpl volume : curVolumes) {
|
||||||
@ -170,7 +171,6 @@ public StorageReport[] getStorageReports(String bpid)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return reports.toArray(new StorageReport[reports.size()]);
|
return reports.toArray(new StorageReport[reports.size()]);
|
||||||
}
|
}
|
||||||
@ -247,9 +247,6 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
|
|||||||
|
|
||||||
private final int smallBufferSize;
|
private final int smallBufferSize;
|
||||||
|
|
||||||
// Used for synchronizing access to usage stats
|
|
||||||
private final Object statsLock = new Object();
|
|
||||||
|
|
||||||
final LocalFileSystem localFS;
|
final LocalFileSystem localFS;
|
||||||
|
|
||||||
private boolean blockPinningEnabled;
|
private boolean blockPinningEnabled;
|
||||||
@ -583,20 +580,16 @@ public void removeVolumes(
|
|||||||
*/
|
*/
|
||||||
@Override // FSDatasetMBean
|
@Override // FSDatasetMBean
|
||||||
public long getDfsUsed() throws IOException {
|
public long getDfsUsed() throws IOException {
|
||||||
synchronized(statsLock) {
|
|
||||||
return volumes.getDfsUsed();
|
return volumes.getDfsUsed();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the total space used by dfs datanode
|
* Return the total space used by dfs datanode
|
||||||
*/
|
*/
|
||||||
@Override // FSDatasetMBean
|
@Override // FSDatasetMBean
|
||||||
public long getBlockPoolUsed(String bpid) throws IOException {
|
public long getBlockPoolUsed(String bpid) throws IOException {
|
||||||
synchronized(statsLock) {
|
|
||||||
return volumes.getBlockPoolUsed(bpid);
|
return volumes.getBlockPoolUsed(bpid);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return true - if there are still valid volumes on the DataNode.
|
* Return true - if there are still valid volumes on the DataNode.
|
||||||
@ -611,20 +604,16 @@ public boolean hasEnoughResource() {
|
|||||||
*/
|
*/
|
||||||
@Override // FSDatasetMBean
|
@Override // FSDatasetMBean
|
||||||
public long getCapacity() throws IOException {
|
public long getCapacity() throws IOException {
|
||||||
synchronized(statsLock) {
|
|
||||||
return volumes.getCapacity();
|
return volumes.getCapacity();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return how many bytes can still be stored in the FSDataset
|
* Return how many bytes can still be stored in the FSDataset
|
||||||
*/
|
*/
|
||||||
@Override // FSDatasetMBean
|
@Override // FSDatasetMBean
|
||||||
public long getRemaining() throws IOException {
|
public long getRemaining() throws IOException {
|
||||||
synchronized(statsLock) {
|
|
||||||
return volumes.getRemaining();
|
return volumes.getRemaining();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the number of failed volumes in the FSDataset.
|
* Return the number of failed volumes in the FSDataset.
|
||||||
|
@ -51,7 +51,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.util.AutoCloseableLock;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
@ -342,7 +341,9 @@ void onMetaFileDeletion(String bpid, long value) {
|
|||||||
|
|
||||||
private void decDfsUsedAndNumBlocks(String bpid, long value,
|
private void decDfsUsedAndNumBlocks(String bpid, long value,
|
||||||
boolean blockFileDeleted) {
|
boolean blockFileDeleted) {
|
||||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
// BlockPoolSlice map is thread safe, and update the space used or
|
||||||
|
// number of blocks are atomic operations, so it doesn't require to
|
||||||
|
// hold the dataset lock.
|
||||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||||
if (bp != null) {
|
if (bp != null) {
|
||||||
bp.decDfsUsed(value);
|
bp.decDfsUsed(value);
|
||||||
@ -351,35 +352,28 @@ private void decDfsUsedAndNumBlocks(String bpid, long value,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void incDfsUsedAndNumBlocks(String bpid, long value) {
|
void incDfsUsedAndNumBlocks(String bpid, long value) {
|
||||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
|
||||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||||
if (bp != null) {
|
if (bp != null) {
|
||||||
bp.incDfsUsed(value);
|
bp.incDfsUsed(value);
|
||||||
bp.incrNumBlocks();
|
bp.incrNumBlocks();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void incDfsUsed(String bpid, long value) {
|
void incDfsUsed(String bpid, long value) {
|
||||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
|
||||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||||
if (bp != null) {
|
if (bp != null) {
|
||||||
bp.incDfsUsed(value);
|
bp.incDfsUsed(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public long getDfsUsed() throws IOException {
|
public long getDfsUsed() throws IOException {
|
||||||
long dfsUsed = 0;
|
long dfsUsed = 0;
|
||||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
|
||||||
for(BlockPoolSlice s : bpSlices.values()) {
|
for(BlockPoolSlice s : bpSlices.values()) {
|
||||||
dfsUsed += s.getDfsUsed();
|
dfsUsed += s.getDfsUsed();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return dfsUsed;
|
return dfsUsed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user