Merge trunk into HA branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1229279 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-01-09 18:01:55 +00:00
commit ade897636d
5 changed files with 131 additions and 98 deletions

View File

@ -12,8 +12,6 @@ Trunk (unreleased changes)
HADOOP-7875. Add helper class to unwrap protobuf ServiceException.
(suresh)
HADOOP-7910. Add Configuration.getLongBytes to handle human readable byte size values. (Sho Shimauchi via harsh)
IMPROVEMENTS
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
@ -155,6 +153,9 @@ Release 0.23.1 - Unreleased
HADOOP-7657. Add support for LZ4 compression. (Binglin Chang via todd)
HADOOP-7910. Add Configuration.getLongBytes to handle human readable byte size values. (Sho Shimauchi via harsh)
IMPROVEMENTS
HADOOP-7801. HADOOP_PREFIX cannot be overriden. (Bruno Mahé via tomwhite)

View File

@ -90,8 +90,6 @@ Trunk (unreleased changes)
HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
(Hari Mankude via eli)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
HDFS-2410. Further cleanup of hardcoded configuration keys and values.
(suresh)
@ -110,12 +108,6 @@ Trunk (unreleased changes)
HDFS-2669 Enable protobuf rpc for ClientNamenodeProtocol
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong)
@ -126,8 +118,6 @@ Trunk (unreleased changes)
HDFS-2476. More CPU efficient data structure for under-replicated,
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
BUG FIXES
HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
via atm)
@ -148,9 +138,6 @@ Trunk (unreleased changes)
HDFS-2373. Commands using WebHDFS and hftp print unnecessary debug
info on the console with security enabled. (Arpit Gupta via suresh)
HDFS-2349. Corruption detected during block transfers between DNs
should log a WARN instead of INFO. (harsh)
HDFS-2188. Make FSEditLog create its journals from a list of URIs rather
than NNStorage. (Ivan Kelly via jitendra)
@ -176,6 +163,8 @@ Trunk (unreleased changes)
HDFS-2700. Fix failing TestDataNodeMultipleRegistrations in trunk
(Uma Maheswara Rao G via todd)
HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm)
Release 0.23.1 - UNRELEASED
INCOMPATIBLE CHANGES
@ -252,6 +241,19 @@ Release 0.23.1 - UNRELEASED
HDFS-2511. Add dev script to generate HDFS protobufs. (tucu)
HDFS-2349. Corruption detected during block transfers between DNs
should log a WARN instead of INFO. (harsh)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)

View File

@ -17,20 +17,30 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import junit.framework.TestCase;
import java.io.*;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
@ -39,7 +49,10 @@
* This class tests various combinations of dfs.namenode.name.dir
* and dfs.namenode.edits.dir configurations.
*/
public class TestNameEditsConfigs extends TestCase {
public class TestNameEditsConfigs {
private static final Log LOG = LogFactory.getLog(FSEditLog.class);
static final long SEED = 0xDEADBEEFL;
static final int BLOCK_SIZE = 4096;
static final int FILE_SIZE = 8192;
@ -51,9 +64,9 @@ public class TestNameEditsConfigs extends TestCase {
private File base_dir = new File(
System.getProperty("test.build.data", "build/test/data"), "dfs/");
protected void setUp() throws java.lang.Exception {
if(base_dir.exists()) {
if (!FileUtil.fullyDelete(base_dir))
@Before
public void setUp() throws IOException {
if(base_dir.exists() && !FileUtil.fullyDelete(base_dir)) {
throw new IOException("Cannot remove directory " + base_dir);
}
}
@ -130,6 +143,7 @@ SecondaryNameNode startSecondaryNameNode(Configuration conf
* @throws Exception
*/
@SuppressWarnings("deprecation")
@Test
public void testNameEditsConfigs() throws Exception {
Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2");
@ -314,12 +328,14 @@ private FSImageTransactionalStorageInspector inspect(File storageDir)
* This test tries to simulate failure scenarios.
* 1. Start cluster with shared name and edits dir
* 2. Restart cluster by adding separate name and edits dirs
* T3. Restart cluster by removing shared name and edits dir
* 3. Restart cluster by removing shared name and edits dir
* 4. Restart cluster with old shared name and edits dir, but only latest
* name dir. This should fail since we dont have latest edits dir
* name dir. This should fail since we don't have latest edits dir
* 5. Restart cluster with old shared name and edits dir, but only latest
* edits dir. This should fail since we dont have latest name dir
* edits dir. This should succeed since the latest edits will have
* segments leading all the way from the image in name_and_edits.
*/
@Test
public void testNameEditsConfigsFailure() throws IOException {
Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2");
@ -327,15 +343,18 @@ public void testNameEditsConfigsFailure() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = null;
FileSystem fileSys = null;
File newNameDir = new File(base_dir, "name");
File newEditsDir = new File(base_dir, "edits");
File nameAndEdits = new File(base_dir, "name_and_edits");
File nameOnlyDir = new File(base_dir, "name");
File editsOnlyDir = new File(base_dir, "edits");
File nameAndEditsDir = new File(base_dir, "name_and_edits");
// 1
// Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
// Manage our own dfs directories
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
@ -344,11 +363,10 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.waitActive();
// Check that the dir has a VERSION file
assertTrue(new File(nameAndEdits, "current/VERSION").exists());
assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
@ -357,16 +375,19 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.shutdown();
}
// 2
// Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration();
assertTrue(newNameDir.mkdir());
assertTrue(newEditsDir.mkdir());
assertTrue(nameOnlyDir.mkdir());
assertTrue(editsOnlyDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() +
"," + newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath() +
"," + newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath() +
"," + nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath() +
"," + editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
// Manage our own dfs directories. Do not format.
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
@ -376,13 +397,12 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.waitActive();
// Check that the dirs have a VERSION file
assertTrue(new File(nameAndEdits, "current/VERSION").exists());
assertTrue(new File(newNameDir, "current/VERSION").exists());
assertTrue(new File(newEditsDir, "current/VERSION").exists());
assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
assertTrue(new File(nameOnlyDir, "current/VERSION").exists());
assertTrue(new File(editsOnlyDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
try {
assertTrue(fileSys.exists(file1));
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
@ -393,11 +413,13 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.shutdown();
}
// 3
// Now remove common directory both have and start namenode with
// separate name and edits dirs
try {
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
@ -407,8 +429,7 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.waitActive();
fileSys = cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
assertFalse(fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2);
@ -419,11 +440,12 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.shutdown();
}
// 4
// Add old shared directory for name and edits along with latest name
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath() + "," +
nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath() + "," +
nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
cluster = new MiniDFSCluster.Builder(conf)
@ -431,21 +453,25 @@ public void testNameEditsConfigsFailure() throws IOException {
.format(false)
.manageNameDfsDirs(false)
.build();
assertTrue(false);
fail("Successfully started cluster but should not have been able to.");
} catch (IOException e) { // expect to fail
System.out.println("cluster start failed due to missing " +
"latest edits dir");
LOG.info("EXPECTED: cluster start failed due to missing " +
"latest edits dir", e);
} finally {
if (cluster != null) {
cluster.shutdown();
}
cluster = null;
}
// 5
// Add old shared directory for name and edits along with latest edits.
// This is OK, since the latest edits will have segments leading all
// the way from the image in name_and_edits.
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath() +
"," + nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath() +
"," + nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
cluster = new MiniDFSCluster.Builder(conf)
@ -453,14 +479,16 @@ public void testNameEditsConfigsFailure() throws IOException {
.format(false)
.manageNameDfsDirs(false)
.build();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2);
fileSys = cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertFalse(fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys, file3, replication);
cleanupFile(fileSys, file3);
writeFile(fileSys, file3, replication);
checkFile(fileSys, file3, replication);
} catch (IOException e) { // expect to fail
System.out.println("cluster start failed due to missing latest name dir");
} finally {
fileSys.close();
cluster.shutdown();

View File

@ -425,6 +425,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3557. MR1 test fail to compile because of missing hadoop-archives dependency.
(tucu)
MAPREDUCE-3624. Remove unnecessary dependency on JDK's tools.jar. (mahadev
via acmurthy)
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES

View File

@ -110,7 +110,6 @@ if [ ! -d "$HADOOP_CONF_DIR" ]; then
fi
CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}:${CLASSPATH}"
CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
# for developers, add Hadoop classes to CLASSPATH
if [ -d "$YARN_HOME/yarn-api/target/classes" ]; then