diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java index 4dae2334d5..ae134c4818 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java @@ -239,6 +239,13 @@ public boolean isCorrupt() { return corrupt; } + /** + * Return true if the block is striped (erasure coded). + */ + public boolean isStriped() { + return false; + } + /** * Set the start offset of file associated with this block */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java index 2ee7f4164c..47ba64bd6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java @@ -54,4 +54,8 @@ private void readObject(ObjectInputStream ois) block = null; } + @Override + public boolean isStriped() { + return block.isStriped(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index 9374c047ee..348301cc7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSStripedOutputStream; @@ -65,6 +66,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; public class TestAddStripedBlocks { private final ErasureCodingPolicy ecPolicy = @@ -476,4 +479,25 @@ public void testCheckStripedReplicaCorrupt() throws Exception { Assert.assertEquals(3, bm.getCorruptReplicas(stored).size()); } + @Test + public void testStripedFlagInBlockLocation() throws IOException { + Path replicated = new Path("/blockLocation/replicated"); + try (FSDataOutputStream out = + dfs.createFile(replicated).replicate().recursive().build()) { + out.write("this is a replicated file".getBytes()); + } + BlockLocation[] locations = dfs.getFileBlockLocations(replicated, 0, 100); + assertEquals("There should be exactly one Block present", + 1, locations.length); + assertFalse("The file is Striped", locations[0].isStriped()); + + Path striped = new Path("/blockLocation/striped"); + try (FSDataOutputStream out = dfs.createFile(striped).recursive().build()) { + out.write("this is a striped file".getBytes()); + } + locations = dfs.getFileBlockLocations(striped, 0, 100); + assertEquals("There should be exactly one Block present", + 1, locations.length); + assertTrue("The file is not Striped", locations[0].isStriped()); + } }