HDFS-12712. [9806] Code style cleanup
This commit is contained in:
parent
80c3fec3a1
commit
8239e3afb3
@ -47,7 +47,6 @@ public final class HdfsConstants {
|
||||
public static final String WARM_STORAGE_POLICY_NAME = "WARM";
|
||||
public static final byte COLD_STORAGE_POLICY_ID = 2;
|
||||
public static final String COLD_STORAGE_POLICY_NAME = "COLD";
|
||||
// branch HDFS-9806 XXX temporary until HDFS-7076
|
||||
public static final byte PROVIDED_STORAGE_POLICY_ID = 1;
|
||||
public static final String PROVIDED_STORAGE_POLICY_NAME = "PROVIDED";
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
@ -40,6 +41,32 @@
|
||||
@InterfaceStability.Evolving
|
||||
public class LocatedBlock {
|
||||
|
||||
/**
|
||||
* Comparator that ensures that a PROVIDED storage type is greater than any
|
||||
* other storage type. Any other storage types are considered equal.
|
||||
*/
|
||||
private static class ProvidedLastComparator
|
||||
implements Comparator<DatanodeInfoWithStorage>, Serializable {
|
||||
|
||||
private static final long serialVersionUID = 6441720011443190984L;
|
||||
|
||||
@Override
|
||||
public int compare(DatanodeInfoWithStorage dns1,
|
||||
DatanodeInfoWithStorage dns2) {
|
||||
if (StorageType.PROVIDED.equals(dns1.getStorageType())
|
||||
&& !StorageType.PROVIDED.equals(dns2.getStorageType())) {
|
||||
return 1;
|
||||
}
|
||||
if (!StorageType.PROVIDED.equals(dns1.getStorageType())
|
||||
&& StorageType.PROVIDED.equals(dns2.getStorageType())) {
|
||||
return -1;
|
||||
}
|
||||
// Storage types of dns1 and dns2 are now both provided or not provided;
|
||||
// thus, are essentially equal for the purpose of this comparator.
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private final ExtendedBlock b;
|
||||
private long offset; // offset of the first byte of the block in the file
|
||||
private final DatanodeInfoWithStorage[] locs;
|
||||
@ -52,6 +79,10 @@ public class LocatedBlock {
|
||||
// their locations are not part of this object
|
||||
private boolean corrupt;
|
||||
private Token<BlockTokenIdentifier> blockToken = new Token<>();
|
||||
|
||||
// use one instance of the Provided comparator as it uses no state.
|
||||
private static ProvidedLastComparator providedLastComparator =
|
||||
new ProvidedLastComparator();
|
||||
/**
|
||||
* List of cached datanode locations
|
||||
*/
|
||||
@ -156,29 +187,6 @@ public void updateCachedStorageInfo() {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Comparator that ensures that a PROVIDED storage type is greater than
|
||||
* any other storage type. Any other storage types are considered equal.
|
||||
*/
|
||||
private class ProvidedLastComparator
|
||||
implements Comparator<DatanodeInfoWithStorage> {
|
||||
@Override
|
||||
public int compare(DatanodeInfoWithStorage dns1,
|
||||
DatanodeInfoWithStorage dns2) {
|
||||
if (StorageType.PROVIDED.equals(dns1.getStorageType())
|
||||
&& !StorageType.PROVIDED.equals(dns2.getStorageType())) {
|
||||
return 1;
|
||||
}
|
||||
if (!StorageType.PROVIDED.equals(dns1.getStorageType())
|
||||
&& StorageType.PROVIDED.equals(dns2.getStorageType())) {
|
||||
return -1;
|
||||
}
|
||||
// Storage types of dns1 and dns2 are now both provided or not provided;
|
||||
// thus, are essentially equal for the purpose of this comparator.
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves all locations that have {@link StorageType}
|
||||
* {@code PROVIDED} to the end of the locations array without
|
||||
@ -196,9 +204,8 @@ public void moveProvidedToEnd(int activeLen) {
|
||||
}
|
||||
// as this is a stable sort, for elements that are equal,
|
||||
// the current order of the elements is maintained
|
||||
Arrays.sort(locs, 0,
|
||||
(activeLen < locs.length) ? activeLen : locs.length,
|
||||
new ProvidedLastComparator());
|
||||
Arrays.sort(locs, 0, (activeLen < locs.length) ? activeLen : locs.length,
|
||||
providedLastComparator);
|
||||
}
|
||||
|
||||
public long getStartOffset() {
|
||||
|
@ -1240,7 +1240,6 @@ private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks,
|
||||
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
|
||||
final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(),
|
||||
blk);
|
||||
//TODO use locatedBlocks builder??
|
||||
return newLocatedStripedBlock(eb, storages, uc.getBlockIndices(), pos,
|
||||
false);
|
||||
} else {
|
||||
@ -2497,8 +2496,8 @@ public boolean processReport(final DatanodeID nodeID,
|
||||
|
||||
// To minimize startup time, we discard any second (or later) block reports
|
||||
// that we receive while still in startup phase.
|
||||
// !#! Register DN with provided storage, not with storage owned by DN
|
||||
// !#! DN should still have a ref to the DNStorageInfo
|
||||
// Register DN with provided storage, not with storage owned by DN
|
||||
// DN should still have a ref to the DNStorageInfo.
|
||||
DatanodeStorageInfo storageInfo =
|
||||
providedStorageMap.getStorage(node, storage);
|
||||
|
||||
|
@ -294,6 +294,7 @@ LocatedBlock newLocatedBlock(ExtendedBlock eb,
|
||||
|
||||
@Override
|
||||
LocatedBlocks build(DatanodeDescriptor client) {
|
||||
// TODO choose provided locations close to the client.
|
||||
return new LocatedBlocks(
|
||||
flen, isUC, blocks, last, lastComplete, feInfo, ecPolicy);
|
||||
}
|
||||
@ -333,7 +334,6 @@ DatanodeStorageInfo getProvidedStorage(
|
||||
DatanodeDescriptor dn, DatanodeStorage s) {
|
||||
dns.put(dn.getDatanodeUuid(), dn);
|
||||
dnR.add(dn);
|
||||
// TODO: maintain separate RPC ident per dn
|
||||
return storageMap.get(s.getStorageID());
|
||||
}
|
||||
|
||||
@ -522,7 +522,7 @@ public void remove() {
|
||||
|
||||
@Override
|
||||
public int getNumberOfBlocks() {
|
||||
// VERIFY: only printed for debugging
|
||||
// is ignored for ProvidedBlockList.
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -640,7 +640,6 @@ void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo)
|
||||
* that holds the snapshot.
|
||||
*/
|
||||
void doFinalize(File dnCurDir) throws IOException {
|
||||
LOG.info("doFinalize: " + dnCurDir);
|
||||
if (dnCurDir == null) {
|
||||
return; //we do nothing if the directory is null
|
||||
}
|
||||
|
@ -149,8 +149,8 @@ public static boolean createStorageID(
|
||||
final String oldStorageID = sd.getStorageUuid();
|
||||
if (sd.getStorageLocation() != null &&
|
||||
sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
|
||||
// We only support one provided storage per datanode for now.
|
||||
// TODO support multiple provided storage ids per datanode.
|
||||
// Only one provided storage id is supported.
|
||||
// TODO support multiple provided storage ids
|
||||
sd.setStorageUuid(conf.get(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
|
||||
DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT));
|
||||
return false;
|
||||
|
@ -310,7 +310,6 @@ public ReplicaRecoveryInfo createInfo()
|
||||
|
||||
@Override
|
||||
public int compareWith(ScanInfo info) {
|
||||
//local scanning cannot find any provided blocks.
|
||||
if (info.getFileRegion().equals(
|
||||
new FileRegion(this.getBlockId(), new Path(getRemoteURI()),
|
||||
fileOffset, this.getNumBytes(), this.getGenerationStamp()))) {
|
||||
|
@ -108,7 +108,7 @@ public boolean matchesStorageDirectory(StorageDirectory sd,
|
||||
}
|
||||
if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED ||
|
||||
storageType == StorageType.PROVIDED) {
|
||||
//only one of these is PROVIDED; so it cannot be a match!
|
||||
// only one PROVIDED storage directory can exist; so this cannot match!
|
||||
return false;
|
||||
}
|
||||
// both storage directories are local
|
||||
@ -213,6 +213,8 @@ public void makeBlockPoolDir(String blockPoolID,
|
||||
}
|
||||
if (storageType == StorageType.PROVIDED) {
|
||||
// skip creation if the storage type is PROVIDED
|
||||
Storage.LOG.info("Skipping creating directory for block pool "
|
||||
+ blockPoolID + " for PROVIDED storage location " + this);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -231,7 +233,7 @@ public void makeBlockPoolDir(String blockPoolID,
|
||||
|
||||
@Override // Checkable
|
||||
public VolumeCheckResult check(CheckContext context) throws IOException {
|
||||
//we assume provided storage locations are always healthy,
|
||||
// assume provided storage locations are always healthy,
|
||||
// and check only for local storages.
|
||||
if (storageType != StorageType.PROVIDED) {
|
||||
DiskChecker.checkDir(
|
||||
|
@ -1760,7 +1760,7 @@ public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
|
||||
|
||||
Set<String> missingVolumesReported = new HashSet<>();
|
||||
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
|
||||
//skip blocks in PROVIDED storage
|
||||
// skip PROVIDED replicas.
|
||||
if (b.getVolume().getStorageType() == StorageType.PROVIDED) {
|
||||
continue;
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ private class ProviderBlockIteratorImpl
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
//No action needed
|
||||
blockAliasMap.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -467,7 +467,7 @@ public void rewind() {
|
||||
|
||||
@Override
|
||||
public void save() throws IOException {
|
||||
//We do not persist the state of this iterator anywhere, locally.
|
||||
// We do not persist the state of this iterator locally.
|
||||
// We just re-scan provided volumes as necessary.
|
||||
state.lastSavedMs = Time.now();
|
||||
}
|
||||
@ -615,7 +615,6 @@ public LinkedList<ScanInfo> compileReport(String bpid,
|
||||
LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
|
||||
throws InterruptedException, IOException {
|
||||
LOG.info("Compiling report for volume: " + this + " bpid " + bpid);
|
||||
//get the report from the appropriate block pool.
|
||||
if(bpSlices.containsKey(bpid)) {
|
||||
bpSlices.get(bpid).compileReport(report, reportCompiler);
|
||||
}
|
||||
|
@ -144,9 +144,11 @@ public void testDefaultPolicies() {
|
||||
expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
|
||||
", storageTypes=[SSD], creationFallbacks=[DISK], " +
|
||||
"replicationFallbacks=[DISK]}");
|
||||
expectedPolicyStrings.put(PROVIDED, "BlockStoragePolicy{PROVIDED:" + PROVIDED +
|
||||
", storageTypes=[PROVIDED, DISK], creationFallbacks=[PROVIDED, DISK], " +
|
||||
"replicationFallbacks=[PROVIDED, DISK]}");
|
||||
expectedPolicyStrings.put(PROVIDED,
|
||||
"BlockStoragePolicy{PROVIDED:" + PROVIDED
|
||||
+ ", storageTypes=[PROVIDED, DISK], "
|
||||
+ "creationFallbacks=[PROVIDED, DISK], "
|
||||
+ "replicationFallbacks=[PROVIDED, DISK]}");
|
||||
|
||||
for(byte i = 1; i < 16; i++) {
|
||||
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
|
||||
|
@ -325,11 +325,12 @@ public void testgoodScript() throws IOException, URISyntaxException {
|
||||
*/
|
||||
@Test
|
||||
public void testBadScript() throws IOException, URISyntaxException {
|
||||
HelperFunction("/"+ Shell.appendScriptExtension("topology-broken-script"), 0);
|
||||
HelperFunction("/" + Shell.appendScriptExtension("topology-broken-script"),
|
||||
0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test with different sorting functions but include datanodes
|
||||
* Test with different sorting functions but include datanodes.
|
||||
* with provided storage
|
||||
* @throws IOException
|
||||
* @throws URISyntaxException
|
||||
|
@ -63,7 +63,6 @@ private static void createFileIfNotExists(String baseDir) throws IOException {
|
||||
if(!newFile.exists()) {
|
||||
newFile.createNewFile();
|
||||
OutputStream writer = new FileOutputStream(newFile.getAbsolutePath());
|
||||
//FILE_LEN is length in bytes.
|
||||
byte[] bytes = new byte[1];
|
||||
bytes[0] = (byte) 0;
|
||||
for(int i=0; i< FILE_LEN; i++) {
|
||||
@ -106,7 +105,7 @@ public void setUp() throws IOException {
|
||||
* @param dataLength length
|
||||
* @throws IOException
|
||||
*/
|
||||
private void verifyReplicaContents(File file,
|
||||
public static void verifyReplicaContents(File file,
|
||||
InputStream ins, long fileOffset, long dataLength)
|
||||
throws IOException {
|
||||
|
||||
|
@ -26,8 +26,6 @@
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@ -35,9 +33,6 @@
|
||||
import java.io.Writer;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.Channels;
|
||||
import java.nio.channels.ReadableByteChannel;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
@ -76,6 +71,7 @@
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdfs.server.datanode.TestProvidedReplicaImpl;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
|
||||
@ -314,33 +310,6 @@ private int getNumVolumes() {
|
||||
}
|
||||
}
|
||||
|
||||
private void compareBlkFile(InputStream ins, String filepath)
|
||||
throws FileNotFoundException, IOException {
|
||||
try (ReadableByteChannel i = Channels.newChannel(
|
||||
new FileInputStream(new File(filepath)))) {
|
||||
try (ReadableByteChannel j = Channels.newChannel(ins)) {
|
||||
ByteBuffer ib = ByteBuffer.allocate(4096);
|
||||
ByteBuffer jb = ByteBuffer.allocate(4096);
|
||||
while (true) {
|
||||
int il = i.read(ib);
|
||||
int jl = j.read(jb);
|
||||
if (il < 0 || jl < 0) {
|
||||
assertEquals(il, jl);
|
||||
break;
|
||||
}
|
||||
ib.flip();
|
||||
jb.flip();
|
||||
int cmp = Math.min(ib.remaining(), jb.remaining());
|
||||
for (int k = 0; k < cmp; ++k) {
|
||||
assertEquals(ib.get(), jb.get());
|
||||
}
|
||||
ib.compact();
|
||||
jb.compact();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
datanode = mock(DataNode.class);
|
||||
@ -445,7 +414,8 @@ public void testProvidedBlockRead() throws IOException {
|
||||
HdfsConstants.GRANDFATHER_GENERATION_STAMP);
|
||||
InputStream ins = dataset.getBlockInputStream(eb, 0);
|
||||
String filepath = blkToPathMap.get((long) id);
|
||||
compareBlkFile(ins, filepath);
|
||||
TestProvidedReplicaImpl.verifyReplicaContents(new File(filepath), ins, 0,
|
||||
BLK_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,8 +23,8 @@
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-fs2img</artifactId>
|
||||
<version>3.1.0-SNAPSHOT</version>
|
||||
<description>fs2img</description>
|
||||
<name>fs2img</name>
|
||||
<description>Apache Hadoop Image Generation Tool</description>
|
||||
<name>Apache Hadoop Image Generation Tool</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
|
@ -125,7 +125,8 @@ public int run(String[] argv) throws Exception {
|
||||
opts.blockPoolID(o.getValue());
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Internal error");
|
||||
throw new UnsupportedOperationException(
|
||||
"Unknown option: " + o.getOpt());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -265,7 +265,6 @@ synchronized void writeDirEntry(DirEntry e) throws IOException {
|
||||
e.writeDelimitedTo(dirs);
|
||||
}
|
||||
|
||||
// from FSImageFormatProtobuf... why not just read position from the stream?
|
||||
private static int getOndiskSize(com.google.protobuf.GeneratedMessage s) {
|
||||
return CodedOutputStream.computeRawVarint32Size(s.getSerializedSize())
|
||||
+ s.getSerializedSize();
|
||||
@ -283,7 +282,7 @@ public synchronized void close() throws IOException {
|
||||
dircache.clear();
|
||||
|
||||
// close side files
|
||||
IOUtils.cleanup(null, dirs, inodes, blocks);
|
||||
IOUtils.cleanupWithLogger(null, dirs, inodes, blocks);
|
||||
if (null == dirs || null == inodes) {
|
||||
// init failed
|
||||
if (raw != null) {
|
||||
@ -317,7 +316,6 @@ public synchronized void close() throws IOException {
|
||||
*/
|
||||
void writeMD5(String imagename) throws IOException {
|
||||
if (null == outdir) {
|
||||
//LOG.warn("Not writing MD5");
|
||||
return;
|
||||
}
|
||||
MD5Hash md5 = new MD5Hash(digest.digest());
|
||||
@ -382,7 +380,6 @@ void writeINodeSection() throws IOException {
|
||||
|
||||
void writeDirSection() throws IOException {
|
||||
// No header, so dirs can be written/compressed independently
|
||||
//INodeDirectorySection.Builder b = INodeDirectorySection.newBuilder();
|
||||
OutputStream sec = raw;
|
||||
// copy dirs
|
||||
try (FileInputStream in = new FileInputStream(dirsTmp)) {
|
||||
|
@ -121,7 +121,6 @@ void writeBlock(long blockId, long offset, long length, long genStamp,
|
||||
INode toFile(UGIResolver ugi, BlockResolver blk,
|
||||
BlockAliasMap.Writer<FileRegion> out) throws IOException {
|
||||
final FileStatus s = getFileStatus();
|
||||
// TODO should this store resolver's user/group?
|
||||
ugi.addUser(s.getOwner());
|
||||
ugi.addGroup(s.getGroup());
|
||||
INodeFile.Builder b = INodeFile.newBuilder()
|
||||
|
@ -84,19 +84,22 @@
|
||||
import static org.apache.hadoop.net.NodeBase.PATH_SEPARATOR_STR;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class TestNameNodeProvidedImplementation {
|
||||
/**
|
||||
* Integration tests for the Provided implementation.
|
||||
*/
|
||||
public class ITestProvidedImplementation {
|
||||
|
||||
@Rule public TestName name = new TestName();
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestNameNodeProvidedImplementation.class);
|
||||
LoggerFactory.getLogger(ITestProvidedImplementation.class);
|
||||
|
||||
final Random r = new Random();
|
||||
final File fBASE = new File(MiniDFSCluster.getBaseDirectory());
|
||||
final Path BASE = new Path(fBASE.toURI().toString());
|
||||
final Path NAMEPATH = new Path(BASE, "providedDir");
|
||||
final Path NNDIRPATH = new Path(BASE, "nnDir");
|
||||
final String SINGLEUSER = "usr1";
|
||||
final String SINGLEGROUP = "grp1";
|
||||
private final Random r = new Random();
|
||||
private final File fBASE = new File(MiniDFSCluster.getBaseDirectory());
|
||||
private final Path pBASE = new Path(fBASE.toURI().toString());
|
||||
private final Path providedPath = new Path(pBASE, "providedDir");
|
||||
private final Path nnDirPath = new Path(pBASE, "nnDir");
|
||||
private final String singleUser = "usr1";
|
||||
private final String singleGroup = "grp1";
|
||||
private final int numFiles = 10;
|
||||
private final String filePrefix = "file";
|
||||
private final String fileSuffix = ".dat";
|
||||
@ -104,8 +107,8 @@ public class TestNameNodeProvidedImplementation {
|
||||
private long providedDataSize = 0;
|
||||
private final String bpid = "BP-1234-10.1.1.1-1224";
|
||||
|
||||
Configuration conf;
|
||||
MiniDFSCluster cluster;
|
||||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
|
||||
@Before
|
||||
public void setSeed() throws Exception {
|
||||
@ -116,8 +119,8 @@ public void setSeed() throws Exception {
|
||||
r.setSeed(seed);
|
||||
System.out.println(name.getMethodName() + " seed: " + seed);
|
||||
conf = new HdfsConfiguration();
|
||||
conf.set(SingleUGIResolver.USER, SINGLEUSER);
|
||||
conf.set(SingleUGIResolver.GROUP, SINGLEGROUP);
|
||||
conf.set(SingleUGIResolver.USER, singleUser);
|
||||
conf.set(SingleUGIResolver.GROUP, singleGroup);
|
||||
|
||||
conf.set(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
|
||||
DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT);
|
||||
@ -126,28 +129,28 @@ public void setSeed() throws Exception {
|
||||
conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
|
||||
TextFileRegionAliasMap.class, BlockAliasMap.class);
|
||||
conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_WRITE_DIR,
|
||||
NNDIRPATH.toString());
|
||||
nnDirPath.toString());
|
||||
conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_READ_FILE,
|
||||
new Path(NNDIRPATH, fileNameFromBlockPoolID(bpid)).toString());
|
||||
new Path(nnDirPath, fileNameFromBlockPoolID(bpid)).toString());
|
||||
conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER, ",");
|
||||
|
||||
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR_PROVIDED,
|
||||
new File(NAMEPATH.toUri()).toString());
|
||||
File imageDir = new File(NAMEPATH.toUri());
|
||||
new File(providedPath.toUri()).toString());
|
||||
File imageDir = new File(providedPath.toUri());
|
||||
if (!imageDir.exists()) {
|
||||
LOG.info("Creating directory: " + imageDir);
|
||||
imageDir.mkdirs();
|
||||
}
|
||||
|
||||
File nnDir = new File(NNDIRPATH.toUri());
|
||||
File nnDir = new File(nnDirPath.toUri());
|
||||
if (!nnDir.exists()) {
|
||||
nnDir.mkdirs();
|
||||
}
|
||||
|
||||
// create 10 random files under BASE
|
||||
// create 10 random files under pBASE
|
||||
for (int i=0; i < numFiles; i++) {
|
||||
File newFile = new File(
|
||||
new Path(NAMEPATH, filePrefix + i + fileSuffix).toUri());
|
||||
new Path(providedPath, filePrefix + i + fileSuffix).toUri());
|
||||
if(!newFile.exists()) {
|
||||
try {
|
||||
LOG.info("Creating " + newFile.toString());
|
||||
@ -244,9 +247,9 @@ void startCluster(Path nspath, int numDatanodes,
|
||||
@Test(timeout=20000)
|
||||
public void testLoadImage() throws Exception {
|
||||
final long seed = r.nextLong();
|
||||
LOG.info("NAMEPATH: " + NAMEPATH);
|
||||
createImage(new RandomTreeWalk(seed), NNDIRPATH, FixedBlockResolver.class);
|
||||
startCluster(NNDIRPATH, 0,
|
||||
LOG.info("providedPath: " + providedPath);
|
||||
createImage(new RandomTreeWalk(seed), nnDirPath, FixedBlockResolver.class);
|
||||
startCluster(nnDirPath, 0,
|
||||
new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null,
|
||||
false);
|
||||
|
||||
@ -260,8 +263,8 @@ public void testLoadImage() throws Exception {
|
||||
hs.getPath().toUri().getPath());
|
||||
assertEquals(rs.getPermission(), hs.getPermission());
|
||||
assertEquals(rs.getLen(), hs.getLen());
|
||||
assertEquals(SINGLEUSER, hs.getOwner());
|
||||
assertEquals(SINGLEGROUP, hs.getGroup());
|
||||
assertEquals(singleUser, hs.getOwner());
|
||||
assertEquals(singleGroup, hs.getGroup());
|
||||
assertEquals(rs.getAccessTime(), hs.getAccessTime());
|
||||
assertEquals(rs.getModificationTime(), hs.getModificationTime());
|
||||
}
|
||||
@ -271,10 +274,10 @@ public void testLoadImage() throws Exception {
|
||||
public void testProvidedReporting() throws Exception {
|
||||
conf.setClass(ImageWriter.Options.UGI_CLASS,
|
||||
SingleUGIResolver.class, UGIResolver.class);
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
int numDatanodes = 10;
|
||||
startCluster(NNDIRPATH, numDatanodes,
|
||||
startCluster(nnDirPath, numDatanodes,
|
||||
new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null,
|
||||
false);
|
||||
long diskCapacity = 1000;
|
||||
@ -350,10 +353,10 @@ public void testProvidedReporting() throws Exception {
|
||||
public void testDefaultReplication() throws Exception {
|
||||
int targetReplication = 2;
|
||||
conf.setInt(FixedBlockMultiReplicaResolver.REPLICATION, targetReplication);
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockMultiReplicaResolver.class);
|
||||
// make the last Datanode with only DISK
|
||||
startCluster(NNDIRPATH, 3, null,
|
||||
startCluster(nnDirPath, 3, null,
|
||||
new StorageType[][] {
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
@ -364,15 +367,10 @@ public void testDefaultReplication() throws Exception {
|
||||
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
int count = 0;
|
||||
for (TreePath e : new FSTreeWalk(NAMEPATH, conf)) {
|
||||
for (TreePath e : new FSTreeWalk(providedPath, conf)) {
|
||||
FileStatus rs = e.getFileStatus();
|
||||
Path hp = removePrefix(NAMEPATH, rs.getPath());
|
||||
LOG.info("hp " + hp.toUri().getPath());
|
||||
//skip HDFS specific files, which may have been created later on.
|
||||
if (hp.toString().contains("in_use.lock")
|
||||
|| hp.toString().contains("current")) {
|
||||
continue;
|
||||
}
|
||||
Path hp = removePrefix(providedPath, rs.getPath());
|
||||
LOG.info("path: " + hp.toUri().getPath());
|
||||
e.accept(count++);
|
||||
assertTrue(fs.exists(hp));
|
||||
FileStatus hs = fs.getFileStatus(hp);
|
||||
@ -411,15 +409,10 @@ private void verifyFileSystemContents() throws Exception {
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
int count = 0;
|
||||
// read NN metadata, verify contents match
|
||||
for (TreePath e : new FSTreeWalk(NAMEPATH, conf)) {
|
||||
for (TreePath e : new FSTreeWalk(providedPath, conf)) {
|
||||
FileStatus rs = e.getFileStatus();
|
||||
Path hp = removePrefix(NAMEPATH, rs.getPath());
|
||||
LOG.info("hp " + hp.toUri().getPath());
|
||||
//skip HDFS specific files, which may have been created later on.
|
||||
if(hp.toString().contains("in_use.lock")
|
||||
|| hp.toString().contains("current")) {
|
||||
continue;
|
||||
}
|
||||
Path hp = removePrefix(providedPath, rs.getPath());
|
||||
LOG.info("path: " + hp.toUri().getPath());
|
||||
e.accept(count++);
|
||||
assertTrue(fs.exists(hp));
|
||||
FileStatus hs = fs.getFileStatus(hp);
|
||||
@ -462,7 +455,7 @@ private void verifyFileSystemContents() throws Exception {
|
||||
private BlockLocation[] createFile(Path path, short replication,
|
||||
long fileLen, long blockLen) throws IOException {
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
//create a sample file that is not provided
|
||||
// create a file that is not provided
|
||||
DFSTestUtil.createFile(fs, path, false, (int) blockLen,
|
||||
fileLen, blockLen, replication, 0, true);
|
||||
return fs.getFileBlockLocations(path, 0, fileLen);
|
||||
@ -471,7 +464,7 @@ private BlockLocation[] createFile(Path path, short replication,
|
||||
@Test(timeout=30000)
|
||||
public void testClusterWithEmptyImage() throws IOException {
|
||||
// start a cluster with 2 datanodes without any provided storage
|
||||
startCluster(NNDIRPATH, 2, null,
|
||||
startCluster(nnDirPath, 2, null,
|
||||
new StorageType[][] {
|
||||
{StorageType.DISK},
|
||||
{StorageType.DISK}},
|
||||
@ -518,10 +511,10 @@ private void checkUniqueness(DatanodeInfo[] locations) {
|
||||
*/
|
||||
@Test(timeout=50000)
|
||||
public void testSetReplicationForProvidedFiles() throws Exception {
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
// 10 Datanodes with both DISK and PROVIDED storage
|
||||
startCluster(NNDIRPATH, 10,
|
||||
startCluster(nnDirPath, 10,
|
||||
new StorageType[]{
|
||||
StorageType.PROVIDED, StorageType.DISK},
|
||||
null,
|
||||
@ -559,9 +552,9 @@ private void setAndUnsetReplication(String filename) throws Exception {
|
||||
|
||||
@Test(timeout=30000)
|
||||
public void testProvidedDatanodeFailures() throws Exception {
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
startCluster(NNDIRPATH, 3, null,
|
||||
startCluster(nnDirPath, 3, null,
|
||||
new StorageType[][] {
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
@ -630,10 +623,10 @@ public void testProvidedDatanodeFailures() throws Exception {
|
||||
|
||||
@Test(timeout=300000)
|
||||
public void testTransientDeadDatanodes() throws Exception {
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
// 3 Datanodes, 2 PROVIDED and other DISK
|
||||
startCluster(NNDIRPATH, 3, null,
|
||||
startCluster(nnDirPath, 3, null,
|
||||
new StorageType[][] {
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
@ -668,10 +661,10 @@ private DatanodeStorageInfo getProvidedDatanodeStorageInfo() {
|
||||
|
||||
@Test(timeout=30000)
|
||||
public void testNamenodeRestart() throws Exception {
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
// 3 Datanodes, 2 PROVIDED and other DISK
|
||||
startCluster(NNDIRPATH, 3, null,
|
||||
startCluster(nnDirPath, 3, null,
|
||||
new StorageType[][] {
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
@ -696,7 +689,7 @@ private void verifyFileLocation(int fileIndex, int replication)
|
||||
cluster.getConfiguration(0));
|
||||
if (fileIndex < numFiles && fileIndex >= 0) {
|
||||
String filename = filePrefix + fileIndex + fileSuffix;
|
||||
File file = new File(new Path(NAMEPATH, filename).toUri());
|
||||
File file = new File(new Path(providedPath, filename).toUri());
|
||||
long fileLen = file.length();
|
||||
long blockSize = conf.getLong(FixedBlockResolver.BLOCKSIZE,
|
||||
FixedBlockResolver.BLOCKSIZE_DEFAULT);
|
||||
@ -710,10 +703,10 @@ private void verifyFileLocation(int fileIndex, int replication)
|
||||
@Test(timeout=30000)
|
||||
public void testSetClusterID() throws Exception {
|
||||
String clusterID = "PROVIDED-CLUSTER";
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class, clusterID, TextFileRegionAliasMap.class);
|
||||
// 2 Datanodes, 1 PROVIDED and other DISK
|
||||
startCluster(NNDIRPATH, 2, null,
|
||||
startCluster(nnDirPath, 2, null,
|
||||
new StorageType[][] {
|
||||
{StorageType.PROVIDED, StorageType.DISK},
|
||||
{StorageType.DISK}},
|
||||
@ -726,10 +719,10 @@ public void testSetClusterID() throws Exception {
|
||||
public void testNumberOfProvidedLocations() throws Exception {
|
||||
// set default replication to 4
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 4);
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
// start with 4 PROVIDED location
|
||||
startCluster(NNDIRPATH, 4,
|
||||
startCluster(nnDirPath, 4,
|
||||
new StorageType[]{
|
||||
StorageType.PROVIDED, StorageType.DISK},
|
||||
null,
|
||||
@ -759,10 +752,10 @@ public void testNumberOfProvidedLocationsManyBlocks() throws Exception {
|
||||
conf.setLong(FixedBlockResolver.BLOCKSIZE, baseFileLen/10);
|
||||
// set default replication to 4
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 4);
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
// start with 4 PROVIDED location
|
||||
startCluster(NNDIRPATH, 4,
|
||||
startCluster(nnDirPath, 4,
|
||||
new StorageType[]{
|
||||
StorageType.PROVIDED, StorageType.DISK},
|
||||
null,
|
||||
@ -795,15 +788,15 @@ public void testInMemoryAliasMap() throws Exception {
|
||||
levelDBAliasMapServer.setConf(conf);
|
||||
levelDBAliasMapServer.start();
|
||||
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf),
|
||||
NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf),
|
||||
nnDirPath,
|
||||
FixedBlockResolver.class, "",
|
||||
InMemoryLevelDBAliasMapClient.class);
|
||||
levelDBAliasMapServer.close();
|
||||
|
||||
// start cluster with two datanodes,
|
||||
// each with 1 PROVIDED volume and other DISK volume
|
||||
startCluster(NNDIRPATH, 2,
|
||||
startCluster(nnDirPath, 2,
|
||||
new StorageType[] {StorageType.PROVIDED, StorageType.DISK},
|
||||
null, false);
|
||||
verifyFileSystemContents();
|
||||
@ -841,9 +834,9 @@ private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
|
||||
|
||||
@Test
|
||||
public void testDatanodeLifeCycle() throws Exception {
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
startCluster(NNDIRPATH, 3,
|
||||
startCluster(nnDirPath, 3,
|
||||
new StorageType[] {StorageType.PROVIDED, StorageType.DISK},
|
||||
null, false);
|
||||
|
||||
@ -915,7 +908,7 @@ public void testProvidedWithHierarchicalTopology() throws Exception {
|
||||
"BlockPlacementPolicyRackFaultTolerant",
|
||||
"BlockPlacementPolicyWithNodeGroup",
|
||||
"BlockPlacementPolicyWithUpgradeDomain"};
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
|
||||
FixedBlockResolver.class);
|
||||
String[] racks =
|
||||
{"/pod0/rack0", "/pod0/rack0", "/pod0/rack1", "/pod0/rack1",
|
||||
@ -923,7 +916,7 @@ public void testProvidedWithHierarchicalTopology() throws Exception {
|
||||
for (String policy: policies) {
|
||||
LOG.info("Using policy: " + packageName + "." + policy);
|
||||
conf.set(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, packageName + "." + policy);
|
||||
startCluster(NNDIRPATH, racks.length,
|
||||
startCluster(nnDirPath, racks.length,
|
||||
new StorageType[]{StorageType.PROVIDED, StorageType.DISK},
|
||||
null, false, racks);
|
||||
verifyFileSystemContents();
|
@ -17,7 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@ -43,7 +42,6 @@ public class RandomTreeWalk extends TreeWalk {
|
||||
private final float depth;
|
||||
private final int children;
|
||||
private final Map<Long, Long> mSeed;
|
||||
//private final AtomicLong blockIds = new AtomicLong(1L << 30);
|
||||
|
||||
RandomTreeWalk(long seed) {
|
||||
this(seed, 10);
|
||||
@ -54,7 +52,7 @@ public class RandomTreeWalk extends TreeWalk {
|
||||
}
|
||||
|
||||
RandomTreeWalk(long seed, int children, float depth) {
|
||||
this(randomRoot(seed), seed, children, 0.15f);
|
||||
this(randomRoot(seed), seed, children, depth);
|
||||
}
|
||||
|
||||
RandomTreeWalk(Path root, long seed, int children, float depth) {
|
||||
|
Loading…
Reference in New Issue
Block a user