HDFS-6965. NN continues to issue block locations for DNs with full

disks. Contributed by Rushabh Shah.
This commit is contained in:
Kihwal Lee 2014-09-16 09:04:54 -05:00
parent 7e08c0f23f
commit 0c26412be4
3 changed files with 59 additions and 1 deletions

View File

@ -653,6 +653,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7032. Add WebHDFS support for reading and writing to encryption zones.
(clamb via wang)
HDFS-6965. NN continues to issue block locations for DNs with full disks.
(Rushabh Shah via kihwal)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an

View File

@ -635,7 +635,7 @@ private boolean isGoodTarget(DatanodeStorageInfo storage,
final long requiredSize = blockSize * HdfsConstants.MIN_BLOCKS_FOR_WRITE;
final long scheduledSize = blockSize * node.getBlocksScheduled();
if (requiredSize > node.getRemaining() - scheduledSize) {
if (requiredSize > storage.getRemaining() - scheduledSize) {
logNodeIsNotChosen(storage, "the node does not have enough space ");
return false;
}

View File

@ -35,17 +35,24 @@
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
@ -599,5 +606,53 @@ public void testSafeModeIBRAfterIncremental() throws Exception {
new BlockListAsLongs(null, null));
assertEquals(1, ds.getBlockReportCount());
}
/**
* Tests that a namenode doesn't choose a datanode with full disks to
* store blocks.
* @throws Exception
*/
@Test
public void testStorageWithRemainingCapacity() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = FileSystem.get(conf);
Path file1 = null;
try {
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final String poolId = namesystem.getBlockPoolId();
final DatanodeRegistration nodeReg =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().
get(0), poolId);
final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem,
nodeReg);
// By default, MiniDFSCluster will create 1 datanode with 2 storages.
// Assigning 64k for remaining storage capacity and will
//create a file with 100k.
for(DatanodeStorageInfo storage: dd.getStorageInfos()) {
storage.setUtilizationForTesting(65536, 0, 65536, 0);
}
//sum of the remaining capacity of both the storages
dd.setRemaining(131072);
file1 = new Path("testRemainingStorage.dat");
try {
DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short)1,
0x1BAD5EED);
}
catch (RemoteException re) {
GenericTestUtils.assertExceptionContains("nodes instead of "
+ "minReplication", re);
}
}
finally {
// Clean up
assertTrue(fs.exists(file1));
fs.delete(file1, true);
assertTrue(!fs.exists(file1));
cluster.shutdown();
}
}
}