Merge trunk into HA branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1229279 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-01-09 18:01:55 +00:00
commit ade897636d
5 changed files with 131 additions and 98 deletions

View File

@ -12,8 +12,6 @@ Trunk (unreleased changes)
HADOOP-7875. Add helper class to unwrap protobuf ServiceException. HADOOP-7875. Add helper class to unwrap protobuf ServiceException.
(suresh) (suresh)
HADOOP-7910. Add Configuration.getLongBytes to handle human readable byte size values. (Sho Shimauchi via harsh)
IMPROVEMENTS IMPROVEMENTS
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm) HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
@ -155,6 +153,9 @@ Release 0.23.1 - Unreleased
HADOOP-7657. Add support for LZ4 compression. (Binglin Chang via todd) HADOOP-7657. Add support for LZ4 compression. (Binglin Chang via todd)
HADOOP-7910. Add Configuration.getLongBytes to handle human readable byte size values. (Sho Shimauchi via harsh)
IMPROVEMENTS IMPROVEMENTS
HADOOP-7801. HADOOP_PREFIX cannot be overriden. (Bruno Mahé via tomwhite) HADOOP-7801. HADOOP_PREFIX cannot be overriden. (Bruno Mahé via tomwhite)

View File

@ -90,8 +90,6 @@ Trunk (unreleased changes)
HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts. HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
(Hari Mankude via eli) (Hari Mankude via eli)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
HDFS-2410. Further cleanup of hardcoded configuration keys and values. HDFS-2410. Further cleanup of hardcoded configuration keys and values.
(suresh) (suresh)
@ -110,12 +108,6 @@ Trunk (unreleased changes)
HDFS-2669 Enable protobuf rpc for ClientNamenodeProtocol HDFS-2669 Enable protobuf rpc for ClientNamenodeProtocol
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong) namenode state. (Tomasz Nykiel via hairong)
@ -126,8 +118,6 @@ Trunk (unreleased changes)
HDFS-2476. More CPU efficient data structure for under-replicated, HDFS-2476. More CPU efficient data structure for under-replicated,
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd) over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
BUG FIXES BUG FIXES
HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
via atm) via atm)
@ -148,9 +138,6 @@ Trunk (unreleased changes)
HDFS-2373. Commands using WebHDFS and hftp print unnecessary debug HDFS-2373. Commands using WebHDFS and hftp print unnecessary debug
info on the console with security enabled. (Arpit Gupta via suresh) info on the console with security enabled. (Arpit Gupta via suresh)
HDFS-2349. Corruption detected during block transfers between DNs
should log a WARN instead of INFO. (harsh)
HDFS-2188. Make FSEditLog create its journals from a list of URIs rather HDFS-2188. Make FSEditLog create its journals from a list of URIs rather
than NNStorage. (Ivan Kelly via jitendra) than NNStorage. (Ivan Kelly via jitendra)
@ -176,6 +163,8 @@ Trunk (unreleased changes)
HDFS-2700. Fix failing TestDataNodeMultipleRegistrations in trunk HDFS-2700. Fix failing TestDataNodeMultipleRegistrations in trunk
(Uma Maheswara Rao G via todd) (Uma Maheswara Rao G via todd)
HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm)
Release 0.23.1 - UNRELEASED Release 0.23.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -252,6 +241,19 @@ Release 0.23.1 - UNRELEASED
HDFS-2511. Add dev script to generate HDFS protobufs. (tucu) HDFS-2511. Add dev script to generate HDFS protobufs. (tucu)
HDFS-2349. Corruption detected during block transfers between DNs
should log a WARN instead of INFO. (harsh)
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd) HDFS-2130. Switch default checksum to CRC32C. (todd)

View File

@ -17,20 +17,30 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import junit.framework.TestCase; import static org.junit.Assert.assertEquals;
import java.io.*; import static org.junit.Assert.assertFalse;
import java.util.Random; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
@ -39,7 +49,10 @@
* This class tests various combinations of dfs.namenode.name.dir * This class tests various combinations of dfs.namenode.name.dir
* and dfs.namenode.edits.dir configurations. * and dfs.namenode.edits.dir configurations.
*/ */
public class TestNameEditsConfigs extends TestCase { public class TestNameEditsConfigs {
private static final Log LOG = LogFactory.getLog(FSEditLog.class);
static final long SEED = 0xDEADBEEFL; static final long SEED = 0xDEADBEEFL;
static final int BLOCK_SIZE = 4096; static final int BLOCK_SIZE = 4096;
static final int FILE_SIZE = 8192; static final int FILE_SIZE = 8192;
@ -51,15 +64,15 @@ public class TestNameEditsConfigs extends TestCase {
private File base_dir = new File( private File base_dir = new File(
System.getProperty("test.build.data", "build/test/data"), "dfs/"); System.getProperty("test.build.data", "build/test/data"), "dfs/");
protected void setUp() throws java.lang.Exception { @Before
if(base_dir.exists()) { public void setUp() throws IOException {
if (!FileUtil.fullyDelete(base_dir)) if(base_dir.exists() && !FileUtil.fullyDelete(base_dir)) {
throw new IOException("Cannot remove directory " + base_dir); throw new IOException("Cannot remove directory " + base_dir);
} }
} }
private void writeFile(FileSystem fileSys, Path name, int repl) private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, BLOCK_SIZE); (short) repl, BLOCK_SIZE);
@ -73,7 +86,7 @@ private void writeFile(FileSystem fileSys, Path name, int repl)
void checkImageAndEditsFilesExistence(File dir, void checkImageAndEditsFilesExistence(File dir,
boolean shouldHaveImages, boolean shouldHaveImages,
boolean shouldHaveEdits) boolean shouldHaveEdits)
throws IOException { throws IOException {
FSImageTransactionalStorageInspector ins = inspect(dir); FSImageTransactionalStorageInspector ins = inspect(dir);
if (shouldHaveImages) { if (shouldHaveImages) {
@ -92,7 +105,7 @@ void checkImageAndEditsFilesExistence(File dir,
} }
private void checkFile(FileSystem fileSys, Path name, int repl) private void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException { throws IOException {
assertTrue(fileSys.exists(name)); assertTrue(fileSys.exists(name));
int replication = fileSys.getFileStatus(name).getReplication(); int replication = fileSys.getFileStatus(name).getReplication();
assertEquals("replication for " + name, repl, replication); assertEquals("replication for " + name, repl, replication);
@ -101,7 +114,7 @@ private void checkFile(FileSystem fileSys, Path name, int repl)
} }
private void cleanupFile(FileSystem fileSys, Path name) private void cleanupFile(FileSystem fileSys, Path name)
throws IOException { throws IOException {
assertTrue(fileSys.exists(name)); assertTrue(fileSys.exists(name));
fileSys.delete(name, true); fileSys.delete(name, true);
assertTrue(!fileSys.exists(name)); assertTrue(!fileSys.exists(name));
@ -130,6 +143,7 @@ SecondaryNameNode startSecondaryNameNode(Configuration conf
* @throws Exception * @throws Exception
*/ */
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
@Test
public void testNameEditsConfigs() throws Exception { public void testNameEditsConfigs() throws Exception {
Path file1 = new Path("TestNameEditsConfigs1"); Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2"); Path file2 = new Path("TestNameEditsConfigs2");
@ -314,12 +328,14 @@ private FSImageTransactionalStorageInspector inspect(File storageDir)
* This test tries to simulate failure scenarios. * This test tries to simulate failure scenarios.
* 1. Start cluster with shared name and edits dir * 1. Start cluster with shared name and edits dir
* 2. Restart cluster by adding separate name and edits dirs * 2. Restart cluster by adding separate name and edits dirs
* T3. Restart cluster by removing shared name and edits dir * 3. Restart cluster by removing shared name and edits dir
* 4. Restart cluster with old shared name and edits dir, but only latest * 4. Restart cluster with old shared name and edits dir, but only latest
* name dir. This should fail since we dont have latest edits dir * name dir. This should fail since we don't have latest edits dir
* 5. Restart cluster with old shared name and edits dir, but only latest * 5. Restart cluster with old shared name and edits dir, but only latest
* edits dir. This should fail since we dont have latest name dir * edits dir. This should succeed since the latest edits will have
* segments leading all the way from the image in name_and_edits.
*/ */
@Test
public void testNameEditsConfigsFailure() throws IOException { public void testNameEditsConfigsFailure() throws IOException {
Path file1 = new Path("TestNameEditsConfigs1"); Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2"); Path file2 = new Path("TestNameEditsConfigs2");
@ -327,28 +343,30 @@ public void testNameEditsConfigsFailure() throws IOException {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
Configuration conf = null; Configuration conf = null;
FileSystem fileSys = null; FileSystem fileSys = null;
File newNameDir = new File(base_dir, "name"); File nameOnlyDir = new File(base_dir, "name");
File newEditsDir = new File(base_dir, "edits"); File editsOnlyDir = new File(base_dir, "edits");
File nameAndEdits = new File(base_dir, "name_and_edits"); File nameAndEditsDir = new File(base_dir, "name_and_edits");
// 1
// Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
// Manage our own dfs directories
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
// Check that the dir has a VERSION file
assertTrue(new File(nameAndEdits, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
try { try {
// Manage our own dfs directories
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
// Check that the dir has a VERSION file
assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1)); assertTrue(!fileSys.exists(file1));
writeFile(fileSys, file1, replication); writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication); checkFile(fileSys, file1, replication);
@ -357,32 +375,34 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.shutdown(); cluster.shutdown();
} }
// 2
// Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
assertTrue(newNameDir.mkdir()); assertTrue(nameOnlyDir.mkdir());
assertTrue(newEditsDir.mkdir()); assertTrue(editsOnlyDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath() +
"," + newNameDir.getPath()); "," + nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath() + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath() +
"," + newEditsDir.getPath()); "," + editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
// Manage our own dfs directories. Do not format.
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
// Check that the dirs have a VERSION file
assertTrue(new File(nameAndEdits, "current/VERSION").exists());
assertTrue(new File(newNameDir, "current/VERSION").exists());
assertTrue(new File(newEditsDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
try { try {
// Manage our own dfs directories. Do not format.
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
// Check that the dirs have a VERSION file
assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
assertTrue(new File(nameOnlyDir, "current/VERSION").exists());
assertTrue(new File(editsOnlyDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
assertTrue(fileSys.exists(file1)); assertTrue(fileSys.exists(file1));
checkFile(fileSys, file1, replication); checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1); cleanupFile(fileSys, file1);
@ -393,22 +413,23 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.shutdown(); cluster.shutdown();
} }
// 3
// Now remove common directory both have and start namenode with // Now remove common directory both have and start namenode with
// separate name and edits dirs // separate name and edits dirs
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
try { try {
assertTrue(!fileSys.exists(file1)); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertTrue(fileSys.exists(file2)); assertTrue(fileSys.exists(file2));
checkFile(fileSys, file2, replication); checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2); cleanupFile(fileSys, file2);
@ -419,11 +440,12 @@ public void testNameEditsConfigsFailure() throws IOException {
cluster.shutdown(); cluster.shutdown();
} }
// 4
// Add old shared directory for name and edits along with latest name // Add old shared directory for name and edits along with latest name
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath() + "," + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath() + "," +
nameAndEdits.getPath()); nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try { try {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
@ -431,21 +453,25 @@ public void testNameEditsConfigsFailure() throws IOException {
.format(false) .format(false)
.manageNameDfsDirs(false) .manageNameDfsDirs(false)
.build(); .build();
assertTrue(false); fail("Successfully started cluster but should not have been able to.");
} catch (IOException e) { // expect to fail } catch (IOException e) { // expect to fail
System.out.println("cluster start failed due to missing " + LOG.info("EXPECTED: cluster start failed due to missing " +
"latest edits dir"); "latest edits dir", e);
} finally { } finally {
if (cluster != null) {
cluster.shutdown();
}
cluster = null; cluster = null;
} }
// 5
// Add old shared directory for name and edits along with latest edits. // Add old shared directory for name and edits along with latest edits.
// This is OK, since the latest edits will have segments leading all // This is OK, since the latest edits will have segments leading all
// the way from the image in name_and_edits. // the way from the image in name_and_edits.
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath() + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath() +
"," + nameAndEdits.getPath()); "," + nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try { try {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
@ -453,14 +479,16 @@ public void testNameEditsConfigsFailure() throws IOException {
.format(false) .format(false)
.manageNameDfsDirs(false) .manageNameDfsDirs(false)
.build(); .build();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2)); fileSys = cluster.getFileSystem();
checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2); assertFalse(fileSys.exists(file1));
assertFalse(fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys, file3, replication);
cleanupFile(fileSys, file3);
writeFile(fileSys, file3, replication); writeFile(fileSys, file3, replication);
checkFile(fileSys, file3, replication); checkFile(fileSys, file3, replication);
} catch (IOException e) { // expect to fail
System.out.println("cluster start failed due to missing latest name dir");
} finally { } finally {
fileSys.close(); fileSys.close();
cluster.shutdown(); cluster.shutdown();

View File

@ -425,6 +425,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3557. MR1 test fail to compile because of missing hadoop-archives dependency. MAPREDUCE-3557. MR1 test fail to compile because of missing hadoop-archives dependency.
(tucu) (tucu)
MAPREDUCE-3624. Remove unnecessary dependency on JDK's tools.jar. (mahadev
via acmurthy)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -110,7 +110,6 @@ if [ ! -d "$HADOOP_CONF_DIR" ]; then
fi fi
CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}:${CLASSPATH}" CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}:${CLASSPATH}"
CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
# for developers, add Hadoop classes to CLASSPATH # for developers, add Hadoop classes to CLASSPATH
if [ -d "$YARN_HOME/yarn-api/target/classes" ]; then if [ -d "$YARN_HOME/yarn-api/target/classes" ]; then