HDFS-8191. Fix byte to integer casting in SimulatedFSDataset#simulatedByte. Contributed by Zhe Zhang.

This commit is contained in:
Andrew Wang 2015-04-24 11:54:25 -07:00
parent cf6c8a1b4e
commit c7d9ad68e3
3 changed files with 54 additions and 29 deletions

View File

@ -557,6 +557,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8231. StackTrace displayed at client while QuotaByStorageType exceeds
(J.Andreina and Xiaoyu Yao via vinayakumarb)
HDFS-8191. Fix byte to integer casting in SimulatedFSDataset#simulatedByte.
(Zhe Zhang via wang)
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -80,6 +80,7 @@
* Note the synchronization is coarse grained - it is at each method.
*/
public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
public final static int BYTE_MASK = 0xff;
static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
@Override
public SimulatedFSDataset newInstance(DataNode datanode,
@ -99,8 +100,8 @@ public static void setFactory(Configuration conf) {
}
public static byte simulatedByte(Block b, long offsetInBlk) {
byte firstByte = (byte) (b.getBlockId() % Byte.MAX_VALUE);
return (byte) ((firstByte + offsetInBlk) % Byte.MAX_VALUE);
byte firstByte = (byte) (b.getBlockId() & BYTE_MASK);
return (byte) ((firstByte + offsetInBlk) & BYTE_MASK);
}
public static final String CONFIG_PROPERTY_CAPACITY =
@ -1028,12 +1029,13 @@ long getLength() {
@Override
public int read() throws IOException {
if (currentPos >= length)
if (currentPos >= length) {
return -1;
}
if (data !=null) {
return data[currentPos++];
} else {
return simulatedByte(theBlock, currentPos++);
return simulatedByte(theBlock, currentPos++) & BYTE_MASK;
}
}

View File

@ -33,6 +33,7 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.SequentialBlockIdGenerator;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
@ -48,6 +49,7 @@ public class TestSimulatedFSDataset {
static final String bpid = "BP-TEST";
static final int NUMBLOCKS = 20;
static final int BLOCK_LENGTH_MULTIPLIER = 79;
static final long FIRST_BLK_ID = 1;
@Before
public void setUp() throws Exception {
@ -55,15 +57,25 @@ public void setUp() throws Exception {
SimulatedFSDataset.setFactory(conf);
}
long blockIdToLen(long blkid) {
static long blockIdToLen(long blkid) {
return blkid * BLOCK_LENGTH_MULTIPLIER;
}
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
throws IOException {
static int addSomeBlocks(SimulatedFSDataset fsdataset) throws IOException {
return addSomeBlocks(fsdataset, false);
}
static int addSomeBlocks(SimulatedFSDataset fsdataset,
boolean negativeBlkID) throws IOException {
return addSomeBlocks(fsdataset, FIRST_BLK_ID, negativeBlkID);
}
static int addSomeBlocks(SimulatedFSDataset fsdataset, long startingBlockId,
boolean negativeBlkID) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
for (long i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
long blkID = negativeBlkID ? i * -1 : i;
ExtendedBlock b = new ExtendedBlock(bpid, blkID, 0, 0);
// we pass expected len as zero, - fsdataset should use the sizeof actual
// data written
ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
@ -87,8 +99,16 @@ int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
}
return bytesAdded;
}
int addSomeBlocks(SimulatedFSDataset fsdataset ) throws IOException {
return addSomeBlocks(fsdataset, 1);
static void readSomeBlocks(SimulatedFSDataset fsdataset,
boolean negativeBlkID) throws IOException {
for (long i = FIRST_BLK_ID; i <= NUMBLOCKS; ++i) {
long blkID = negativeBlkID ? i * -1 : i;
ExtendedBlock b = new ExtendedBlock(bpid, blkID, 0, 0);
assertTrue(fsdataset.isValidBlock(b));
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
}
}
@Test
@ -107,7 +127,7 @@ public void testFSDatasetFactory() {
@Test
public void testGetMetaData() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
try {
assertTrue(fsdataset.getMetaDataInputStream(b) == null);
assertTrue("Expected an IO exception", false);
@ -115,7 +135,7 @@ public void testGetMetaData() throws IOException {
// ok - as expected
}
addSomeBlocks(fsdataset); // Only need to add one but ....
b = new ExtendedBlock(bpid, 1, 0, 0);
b = new ExtendedBlock(bpid, FIRST_BLK_ID, 0, 0);
InputStream metaInput = fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput = new DataInputStream(metaInput);
short version = metaDataInput.readShort();
@ -138,14 +158,14 @@ public void testStorageUsage() throws IOException {
void checkBlockDataAndSize(SimulatedFSDataset fsdataset, ExtendedBlock b,
long expectedLen) throws IOException {
static void checkBlockDataAndSize(SimulatedFSDataset fsdataset,
ExtendedBlock b, long expectedLen) throws IOException {
InputStream input = fsdataset.getBlockInputStream(b);
long lengthRead = 0;
int data;
while ((data = input.read()) != -1) {
assertEquals(SimulatedFSDataset.simulatedByte(b.getLocalBlock(),
lengthRead), data);
lengthRead), (byte) (data & SimulatedFSDataset.BYTE_MASK));
lengthRead++;
}
assertEquals(expectedLen, lengthRead);
@ -153,14 +173,14 @@ void checkBlockDataAndSize(SimulatedFSDataset fsdataset, ExtendedBlock b,
@Test
public void testWriteRead() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
addSomeBlocks(fsdataset);
for (int i=1; i <= NUMBLOCKS; ++i) {
ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
assertTrue(fsdataset.isValidBlock(b));
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
testWriteRead(false);
testWriteRead(true);
}
private void testWriteRead(boolean negativeBlkID) throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
addSomeBlocks(fsdataset, negativeBlkID);
readSomeBlocks(fsdataset, negativeBlkID);
}
@Test
@ -225,7 +245,7 @@ public void testInjectionNonEmpty() throws IOException {
SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
// Add come blocks whose block ids do not conflict with
// the ones we are going to inject.
bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1);
bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1, false);
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.getBlockReport(bpid);
@ -283,7 +303,7 @@ public void checkInvalidBlock(ExtendedBlock b) {
@Test
public void testInValidBlocks() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
checkInvalidBlock(b);
// Now check invlaid after adding some blocks