HDFS-3924. Multi-byte id in HdfsVolumeId. Contributed by Andrew Wang.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1384602 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cae6885b71
commit
86635168fe
@ -240,6 +240,8 @@ Release 2.0.3-alpha - Unreleased
|
|||||||
HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
|
HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
|
||||||
(Andy Isaacson via eli)
|
(Andy Isaacson via eli)
|
||||||
|
|
||||||
|
HDFS-3924. Multi-byte id in HdfsVolumeId. (Andrew Wang via atm)
|
||||||
|
|
||||||
Release 2.0.2-alpha - 2012-09-07
|
Release 2.0.2-alpha - 2012-09-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import org.apache.commons.codec.binary.Base64;
|
||||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
import org.apache.commons.lang.builder.EqualsBuilder;
|
||||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
@ -31,10 +32,10 @@
|
|||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
public class HdfsVolumeId implements VolumeId {
|
public class HdfsVolumeId implements VolumeId {
|
||||||
|
|
||||||
private final byte id;
|
private final byte[] id;
|
||||||
private final boolean isValid;
|
private final boolean isValid;
|
||||||
|
|
||||||
public HdfsVolumeId(byte id, boolean isValid) {
|
public HdfsVolumeId(byte[] id, boolean isValid) {
|
||||||
this.id = id;
|
this.id = id;
|
||||||
this.isValid = isValid;
|
this.isValid = isValid;
|
||||||
}
|
}
|
||||||
@ -69,6 +70,6 @@ public boolean equals(Object obj) {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return Byte.toString(id);
|
return Base64.encodeBase64String(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
@ -201,7 +202,7 @@ static Map<LocatedBlock, List<VolumeId>> associateVolumeIdsWithBlocks(
|
|||||||
ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
|
ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
|
||||||
// Start off all IDs as invalid, fill it in later with results from RPCs
|
// Start off all IDs as invalid, fill it in later with results from RPCs
|
||||||
for (int i = 0; i < b.getLocations().length; i++) {
|
for (int i = 0; i < b.getLocations().length; i++) {
|
||||||
l.add(new HdfsVolumeId((byte)-1, false));
|
l.add(new HdfsVolumeId(null, false));
|
||||||
}
|
}
|
||||||
blockVolumeIds.put(b, l);
|
blockVolumeIds.put(b, l);
|
||||||
}
|
}
|
||||||
@ -234,8 +235,8 @@ static Map<LocatedBlock, List<VolumeId>> associateVolumeIdsWithBlocks(
|
|||||||
}
|
}
|
||||||
// Get the VolumeId by indexing into the list of VolumeIds
|
// Get the VolumeId by indexing into the list of VolumeIds
|
||||||
// provided by the datanode
|
// provided by the datanode
|
||||||
HdfsVolumeId id = new HdfsVolumeId(metaVolumeIds.get(volumeIndex)[0],
|
byte[] volumeId = metaVolumeIds.get(volumeIndex);
|
||||||
true);
|
HdfsVolumeId id = new HdfsVolumeId(volumeId, true);
|
||||||
// Find out which index we are in the LocatedBlock's replicas
|
// Find out which index we are in the LocatedBlock's replicas
|
||||||
LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
|
LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
|
||||||
DatanodeInfo[] dnInfos = locBlock.getLocations();
|
DatanodeInfo[] dnInfos = locBlock.getLocations();
|
||||||
@ -255,8 +256,8 @@ static Map<LocatedBlock, List<VolumeId>> associateVolumeIdsWithBlocks(
|
|||||||
}
|
}
|
||||||
// Place VolumeId at the same index as the DN's index in the list of
|
// Place VolumeId at the same index as the DN's index in the list of
|
||||||
// replicas
|
// replicas
|
||||||
List<VolumeId> VolumeIds = blockVolumeIds.get(locBlock);
|
List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
|
||||||
VolumeIds.set(index, id);
|
volumeIds.set(index, id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return blockVolumeIds;
|
return blockVolumeIds;
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.RandomAccessFile;
|
import java.io.RandomAccessFile;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
import java.nio.channels.FileChannel;
|
import java.nio.channels.FileChannel;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
@ -1676,10 +1677,10 @@ public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks)
|
|||||||
List<byte[]> blocksVolumeIds = new ArrayList<byte[]>(volumes.volumes.size());
|
List<byte[]> blocksVolumeIds = new ArrayList<byte[]>(volumes.volumes.size());
|
||||||
// List of indexes into the list of VolumeIds, pointing at the VolumeId of
|
// List of indexes into the list of VolumeIds, pointing at the VolumeId of
|
||||||
// the volume that the block is on
|
// the volume that the block is on
|
||||||
List<Integer> blocksVolumendexes = new ArrayList<Integer>(blocks.size());
|
List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blocks.size());
|
||||||
// Initialize the list of VolumeIds simply by enumerating the volumes
|
// Initialize the list of VolumeIds simply by enumerating the volumes
|
||||||
for (int i = 0; i < volumes.volumes.size(); i++) {
|
for (int i = 0; i < volumes.volumes.size(); i++) {
|
||||||
blocksVolumeIds.add(new byte[] { (byte) i });
|
blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
|
||||||
}
|
}
|
||||||
// Determine the index of the VolumeId of each block's volume, by comparing
|
// Determine the index of the VolumeId of each block's volume, by comparing
|
||||||
// the block's volume against the enumerated volumes
|
// the block's volume against the enumerated volumes
|
||||||
@ -1700,10 +1701,10 @@ public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks)
|
|||||||
if (!isValid) {
|
if (!isValid) {
|
||||||
volumeIndex = Integer.MAX_VALUE;
|
volumeIndex = Integer.MAX_VALUE;
|
||||||
}
|
}
|
||||||
blocksVolumendexes.add(volumeIndex);
|
blocksVolumeIndexes.add(volumeIndex);
|
||||||
}
|
}
|
||||||
return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}),
|
return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}),
|
||||||
blocksVolumeIds, blocksVolumendexes);
|
blocksVolumeIds, blocksVolumeIndexes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
Loading…
Reference in New Issue
Block a user