HDFS-1245. Plugable block id generation. Contributed by Konstantin Shvachko.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432539 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Konstantin Shvachko 2013-01-13 01:13:29 +00:00
parent e1a3043382
commit 06406d7056
6 changed files with 104 additions and 13 deletions

View File

@ -0,0 +1,31 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Generic ID generator
* used for generating various types of number sequences.
*/
@InterfaceAudience.Private
public interface IdGenerator {
/** Increment and then return the next value. */
public long nextValue();
}

View File

@ -27,7 +27,7 @@
* This class is thread safe. * This class is thread safe.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class SequentialNumber { public abstract class SequentialNumber implements IdGenerator {
private final AtomicLong currentValue; private final AtomicLong currentValue;
/** Create a new instance with the given initial value. */ /** Create a new instance with the given initial value. */

View File

@ -701,6 +701,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-4274. BlockPoolSliceScanner does not close verification log during HDFS-4274. BlockPoolSliceScanner does not close verification log during
shutdown. (Chris Nauroth via suresh) shutdown. (Chris Nauroth via suresh)
HDFS-1245. Plugable block id generation. (shv)
BREAKDOWN OF HDFS-3077 SUBTASKS BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs. HDFS-3077. Quorum-based protocol for reading and writing edit logs.

View File

@ -59,6 +59,7 @@
import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.IdGenerator;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
@ -92,6 +93,7 @@ public class FSImage implements Closeable {
final private Configuration conf; final private Configuration conf;
protected NNStorageRetentionManager archivalManager; protected NNStorageRetentionManager archivalManager;
protected IdGenerator blockIdGenerator;
/** /**
* Construct an FSImage * Construct an FSImage
@ -137,6 +139,9 @@ void format(FSNamesystem fsn, String clusterId) throws IOException {
Preconditions.checkState(fileCount == 1, Preconditions.checkState(fileCount == 1,
"FSImage.format should be called with an uninitialized namesystem, has " + "FSImage.format should be called with an uninitialized namesystem, has " +
fileCount + " files"); fileCount + " files");
// BlockIdGenerator is defined during formatting
// currently there is only one BlockIdGenerator
blockIdGenerator = createBlockIdGenerator(fsn);
NamespaceInfo ns = NNStorage.newNamespaceInfo(); NamespaceInfo ns = NNStorage.newNamespaceInfo();
ns.clusterID = clusterId; ns.clusterID = clusterId;
@ -253,6 +258,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
doRollback(); doRollback();
break; break;
case REGULAR: case REGULAR:
default:
// just load the image // just load the image
} }
@ -737,6 +743,9 @@ private void loadFSImage(File curFile, MD5Hash expectedMd5,
FSImageFormat.Loader loader = new FSImageFormat.Loader( FSImageFormat.Loader loader = new FSImageFormat.Loader(
conf, target); conf, target);
loader.load(curFile); loader.load(curFile);
// BlockIdGenerator is determined after loading image
// currently there is only one BlockIdGenerator
blockIdGenerator = createBlockIdGenerator(target);
target.setBlockPoolId(this.getBlockPoolID()); target.setBlockPoolId(this.getBlockPoolID());
// Check that the image digest we loaded matches up with what // Check that the image digest we loaded matches up with what
@ -1165,4 +1174,12 @@ public void updateLastAppliedTxIdFromWritten() {
public synchronized long getMostRecentCheckpointTxId() { public synchronized long getMostRecentCheckpointTxId() {
return storage.getMostRecentCheckpointTxId(); return storage.getMostRecentCheckpointTxId();
} }
public long getUniqueBlockId() {
return blockIdGenerator.nextValue();
}
public IdGenerator createBlockIdGenerator(FSNamesystem fsn) {
return new RandomBlockIdGenerator(fsn);
}
} }

View File

@ -79,7 +79,6 @@
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStreamWriter; import java.io.OutputStreamWriter;
import java.io.PrintWriter; import java.io.PrintWriter;
@ -2539,10 +2538,7 @@ private boolean completeFileInternal(String src,
private Block allocateBlock(String src, INodesInPath inodesInPath, private Block allocateBlock(String src, INodesInPath inodesInPath,
DatanodeDescriptor targets[]) throws IOException { DatanodeDescriptor targets[]) throws IOException {
assert hasWriteLock(); assert hasWriteLock();
Block b = new Block(DFSUtil.getRandom().nextLong(), 0, 0); Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0);
while(isValidBlock(b)) {
b.setBlockId(DFSUtil.getRandom().nextLong());
}
// Increment the generation stamp for every new block. // Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp()); b.setGenerationStamp(nextGenerationStamp());
b = dir.addBlock(src, inodesInPath, b, targets); b = dir.addBlock(src, inodesInPath, b, targets);
@ -4554,13 +4550,6 @@ void endCheckpoint(NamenodeRegistration registration,
} }
} }
/**
* Returns whether the given block is one pointed-to by a file.
*/
private boolean isValidBlock(Block b) {
return (blockManager.getBlockCollection(b) != null);
}
PermissionStatus createFsOwnerPermissions(FsPermission permission) { PermissionStatus createFsOwnerPermissions(FsPermission permission) {
return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission); return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission);
} }

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.util.IdGenerator;
/**
* Generator of random block IDs.
*/
@InterfaceAudience.Private
public class RandomBlockIdGenerator implements IdGenerator {
private final BlockManager blockManager;
RandomBlockIdGenerator(FSNamesystem namesystem) {
this.blockManager = namesystem.getBlockManager();
}
@Override // NumberGenerator
public long nextValue() {
Block b = new Block(DFSUtil.getRandom().nextLong(), 0, 0);
while(isValidBlock(b)) {
b.setBlockId(DFSUtil.getRandom().nextLong());
}
return b.getBlockId();
}
/**
* Returns whether the given block is one pointed-to by a file.
*/
private boolean isValidBlock(Block b) {
return (blockManager.getBlockCollection(b) != null);
}
}