HDFS-2285. BackupNode should reject requests to modify namespace. Contributed by Konstantin Shvachko and Uma Maheswara Rao G.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1195013 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Konstantin Shvachko 2011-10-29 20:11:35 +00:00
parent 2eca60199a
commit 64c019cccc
6 changed files with 53 additions and 7 deletions

View File

@ -1816,6 +1816,9 @@ Release 0.22.0 - Unreleased
HDFS-2452. OutOfMemoryError in DataXceiverServer takes down the DataNode HDFS-2452. OutOfMemoryError in DataXceiverServer takes down the DataNode
(Uma Maheswara Rao via cos) (Uma Maheswara Rao via cos)
HDFS-2285. BackupNode should reject requests to modify namespace.
(shv and Uma Maheswara Rao)
Release 0.21.1 - Unreleased Release 0.21.1 - Unreleased
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli) HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolServerSideTranslatorR23; import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolServerSideTranslatorR23;
import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol; import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@ -137,6 +138,10 @@ protected void initialize(Configuration conf) throws IOException {
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
NamespaceInfo nsInfo = handshake(conf); NamespaceInfo nsInfo = handshake(conf);
super.initialize(conf); super.initialize(conf);
if (false == namesystem.isInSafeMode()) {
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
}
// Backup node should never do lease recovery, // Backup node should never do lease recovery,
// therefore lease hard limit should never expire. // therefore lease hard limit should never expire.
namesystem.leaseManager.setLeasePeriod( namesystem.leaseManager.setLeasePeriod(
@ -191,6 +196,11 @@ public void stop() {
super.stop(); super.stop();
} }
/* @Override */// NameNode
public boolean setSafeMode(SafeModeAction action) throws IOException {
throw new UnsupportedActionException("setSafeMode");
}
static class BackupNodeRpcServer extends NameNodeRpcServer implements static class BackupNodeRpcServer extends NameNodeRpcServer implements
JournalProtocol { JournalProtocol {
private final String nnRpcAddress; private final String nnRpcAddress;

View File

@ -243,6 +243,10 @@ void doCheckpoint() throws IOException {
} }
long txid = bnImage.getLastAppliedTxId(); long txid = bnImage.getLastAppliedTxId();
backupNode.namesystem.dir.setReady();
backupNode.namesystem.setBlockTotal();
bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid); bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
bnStorage.writeAll(); bnStorage.writeAll();

View File

@ -159,6 +159,11 @@ private BlockManager getBlockManager() {
*/ */
void imageLoadComplete() { void imageLoadComplete() {
Preconditions.checkState(!ready, "FSDirectory already loaded"); Preconditions.checkState(!ready, "FSDirectory already loaded");
setReady();
}
void setReady() {
if(ready) return;
writeLock(); writeLock();
try { try {
setReady(true); setReady(true);

View File

@ -3344,7 +3344,7 @@ public void decrementSafeBlockCount(Block b) {
/** /**
* Set the total number of blocks in the system. * Set the total number of blocks in the system.
*/ */
private void setBlockTotal() { void setBlockTotal() {
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode; SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) if (safeMode == null)

View File

@ -19,9 +19,12 @@
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
@ -29,13 +32,13 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -44,8 +47,6 @@
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import junit.framework.TestCase;
public class TestBackupNode extends TestCase { public class TestBackupNode extends TestCase {
public static final Log LOG = LogFactory.getLog(TestBackupNode.class); public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
@ -241,8 +242,11 @@ public void testBackupNode() throws Exception {
void testCheckpoint(StartupOption op) throws Exception { void testCheckpoint(StartupOption op) throws Exception {
Path file1 = new Path("checkpoint.dat"); Path file1 = new Path("checkpoint.dat");
Path file2 = new Path("checkpoint2.dat"); Path file2 = new Path("checkpoint2.dat");
Path file3 = new Path("backup.dat");
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
short replication = (short)conf.getInt("dfs.replication", 3);
int numDatanodes = Math.max(3, replication);
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0"); conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
@ -290,7 +294,7 @@ void testCheckpoint(StartupOption op) throws Exception {
// //
// Restart cluster and verify that file1 still exist. // Restart cluster and verify that file1 still exist.
// //
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(false).build(); .format(false).build();
fileSys = cluster.getFileSystem(); fileSys = cluster.getFileSystem();
// check that file1 still exists // check that file1 still exists
@ -319,6 +323,26 @@ void testCheckpoint(StartupOption op) throws Exception {
backup.doCheckpoint(); backup.doCheckpoint();
waitCheckpointDone(cluster, backup, txid); waitCheckpointDone(cluster, backup, txid);
// Try BackupNode operations
InetSocketAddress add = backup.getNameNodeAddress();
// Write to BN
FileSystem bnFS = FileSystem.get(new Path("hdfs://"
+ NameNode.getHostPortString(add)).toUri(), conf);
boolean canWrite = true;
try {
TestCheckpoint.writeFile(bnFS, file3, replication);
} catch (IOException eio) {
LOG.info("Write to BN failed as expected: ", eio);
canWrite = false;
}
assertFalse("Write to BackupNode must be prohibited.", canWrite);
TestCheckpoint.writeFile(fileSys, file3, replication);
TestCheckpoint.checkFile(fileSys, file3, replication);
// should also be on BN right away
assertTrue("file3 does not exist on BackupNode",
op != StartupOption.BACKUP || bnFS.exists(file3));
} catch(IOException e) { } catch(IOException e) {
LOG.error("Error in TestBackupNode:", e); LOG.error("Error in TestBackupNode:", e);
assertTrue(e.getLocalizedMessage(), false); assertTrue(e.getLocalizedMessage(), false);