HDFS-2285. BackupNode should reject requests to modify namespace. Contributed by Konstantin Shvachko and Uma Maheswara Rao G.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1195013 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2eca60199a
commit
64c019cccc
@ -1816,6 +1816,9 @@ Release 0.22.0 - Unreleased
|
||||
HDFS-2452. OutOfMemoryError in DataXceiverServer takes down the DataNode
|
||||
(Uma Maheswara Rao via cos)
|
||||
|
||||
HDFS-2285. BackupNode should reject requests to modify namespace.
|
||||
(shv and Uma Maheswara Rao)
|
||||
|
||||
Release 0.21.1 - Unreleased
|
||||
|
||||
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
|
||||
|
@ -28,6 +28,7 @@
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolServerSideTranslatorR23;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
@ -137,6 +138,10 @@ protected void initialize(Configuration conf) throws IOException {
|
||||
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
|
||||
NamespaceInfo nsInfo = handshake(conf);
|
||||
super.initialize(conf);
|
||||
if (false == namesystem.isInSafeMode()) {
|
||||
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
}
|
||||
|
||||
// Backup node should never do lease recovery,
|
||||
// therefore lease hard limit should never expire.
|
||||
namesystem.leaseManager.setLeasePeriod(
|
||||
@ -190,7 +195,12 @@ public void stop() {
|
||||
// Stop name-node threads
|
||||
super.stop();
|
||||
}
|
||||
|
||||
|
||||
/* @Override */// NameNode
|
||||
public boolean setSafeMode(SafeModeAction action) throws IOException {
|
||||
throw new UnsupportedActionException("setSafeMode");
|
||||
}
|
||||
|
||||
static class BackupNodeRpcServer extends NameNodeRpcServer implements
|
||||
JournalProtocol {
|
||||
private final String nnRpcAddress;
|
||||
|
@ -241,8 +241,12 @@ void doCheckpoint() throws IOException {
|
||||
|
||||
rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
|
||||
}
|
||||
|
||||
|
||||
long txid = bnImage.getLastAppliedTxId();
|
||||
|
||||
backupNode.namesystem.dir.setReady();
|
||||
backupNode.namesystem.setBlockTotal();
|
||||
|
||||
bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
|
||||
bnStorage.writeAll();
|
||||
|
||||
|
@ -159,6 +159,11 @@ private BlockManager getBlockManager() {
|
||||
*/
|
||||
void imageLoadComplete() {
|
||||
Preconditions.checkState(!ready, "FSDirectory already loaded");
|
||||
setReady();
|
||||
}
|
||||
|
||||
void setReady() {
|
||||
if(ready) return;
|
||||
writeLock();
|
||||
try {
|
||||
setReady(true);
|
||||
|
@ -3344,7 +3344,7 @@ public void decrementSafeBlockCount(Block b) {
|
||||
/**
|
||||
* Set the total number of blocks in the system.
|
||||
*/
|
||||
private void setBlockTotal() {
|
||||
void setBlockTotal() {
|
||||
// safeMode is volatile, and may be set to null at any time
|
||||
SafeModeInfo safeMode = this.safeMode;
|
||||
if (safeMode == null)
|
||||
|
@ -19,9 +19,12 @@
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
@ -29,13 +32,13 @@
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
|
||||
@ -44,8 +47,6 @@
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestBackupNode extends TestCase {
|
||||
public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
|
||||
|
||||
@ -241,8 +242,11 @@ public void testBackupNode() throws Exception {
|
||||
void testCheckpoint(StartupOption op) throws Exception {
|
||||
Path file1 = new Path("checkpoint.dat");
|
||||
Path file2 = new Path("checkpoint2.dat");
|
||||
Path file3 = new Path("backup.dat");
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
short replication = (short)conf.getInt("dfs.replication", 3);
|
||||
int numDatanodes = Math.max(3, replication);
|
||||
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
|
||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
|
||||
@ -290,7 +294,7 @@ void testCheckpoint(StartupOption op) throws Exception {
|
||||
//
|
||||
// Restart cluster and verify that file1 still exist.
|
||||
//
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
|
||||
.format(false).build();
|
||||
fileSys = cluster.getFileSystem();
|
||||
// check that file1 still exists
|
||||
@ -319,6 +323,26 @@ void testCheckpoint(StartupOption op) throws Exception {
|
||||
backup.doCheckpoint();
|
||||
waitCheckpointDone(cluster, backup, txid);
|
||||
|
||||
// Try BackupNode operations
|
||||
InetSocketAddress add = backup.getNameNodeAddress();
|
||||
// Write to BN
|
||||
FileSystem bnFS = FileSystem.get(new Path("hdfs://"
|
||||
+ NameNode.getHostPortString(add)).toUri(), conf);
|
||||
boolean canWrite = true;
|
||||
try {
|
||||
TestCheckpoint.writeFile(bnFS, file3, replication);
|
||||
} catch (IOException eio) {
|
||||
LOG.info("Write to BN failed as expected: ", eio);
|
||||
canWrite = false;
|
||||
}
|
||||
assertFalse("Write to BackupNode must be prohibited.", canWrite);
|
||||
|
||||
TestCheckpoint.writeFile(fileSys, file3, replication);
|
||||
TestCheckpoint.checkFile(fileSys, file3, replication);
|
||||
// should also be on BN right away
|
||||
assertTrue("file3 does not exist on BackupNode",
|
||||
op != StartupOption.BACKUP || bnFS.exists(file3));
|
||||
|
||||
} catch(IOException e) {
|
||||
LOG.error("Error in TestBackupNode:", e);
|
||||
assertTrue(e.getLocalizedMessage(), false);
|
||||
|
Loading…
Reference in New Issue
Block a user