HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to BlockPlacementPolicyRackFaultTolerant. (wang)

This commit is contained in:
Andrew Wang 2015-06-02 15:48:26 -07:00
parent efc510a570
commit c1d50a91f7
3 changed files with 7 additions and 4 deletions

View File

@ -594,6 +594,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8386. Improve synchronization of 'streamer' reference in HDFS-8386. Improve synchronization of 'streamer' reference in
DFSOutputStream. (Rakesh R via wang) DFSOutputStream. (Rakesh R via wang)
HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to
BlockPlacementPolicyRackFaultTolerant. (wang)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -30,7 +30,7 @@ import java.util.*;
* The strategy is that it tries its best to place the replicas to most racks. * The strategy is that it tries its best to place the replicas to most racks.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class BlockPlacementPolicyRackFaultTolarent extends BlockPlacementPolicyDefault { public class BlockPlacementPolicyRackFaultTolerant extends BlockPlacementPolicyDefault {
@Override @Override
protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) { protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) {

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolarent; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.net.StaticMapping; import org.apache.hadoop.net.StaticMapping;
import org.junit.After; import org.junit.After;
@ -42,7 +42,7 @@ import java.util.*;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
public class TestBlockPlacementPolicyRackFaultTolarent { public class TestBlockPlacementPolicyRackFaultTolerant {
private static final int DEFAULT_BLOCK_SIZE = 1024; private static final int DEFAULT_BLOCK_SIZE = 1024;
private MiniDFSCluster cluster = null; private MiniDFSCluster cluster = null;
@ -63,7 +63,7 @@ public class TestBlockPlacementPolicyRackFaultTolarent {
} }
} }
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
BlockPlacementPolicyRackFaultTolarent.class, BlockPlacementPolicyRackFaultTolerant.class,
BlockPlacementPolicy.class); BlockPlacementPolicy.class);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);