Merge trunk into HA branch.

Some conflicts around TestBlockRecovery: mocking changed for new heartbeat types/responses


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1243691 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-02-13 21:00:37 +00:00
commit db187cf40e
21 changed files with 241 additions and 146 deletions

View File

@ -174,6 +174,14 @@ Release 0.23.2 - UNRELEASED
HADOOP-8035 Hadoop Maven site is inefficient and runs phases redundantly HADOOP-8035 Hadoop Maven site is inefficient and runs phases redundantly
(abayer via tucu) (abayer via tucu)
HADOOP-8051 HttpFS documentation it is not wired to the generated site (tucu)
HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh)
HADOOP-8052. Hadoop Metrics2 should emit Float.MAX_VALUE (instead of
Double.MAX_VALUE) to avoid making Ganglia's gmetad core. (Varun Kapoor
via mattf)
Release 0.23.1 - 2012-02-08 Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
</configuration>

View File

@ -143,8 +143,16 @@ public double max() {
@SuppressWarnings("PublicInnerClass") @SuppressWarnings("PublicInnerClass")
public static class MinMax { public static class MinMax {
private double min = Double.MAX_VALUE; // Float.MAX_VALUE is used rather than Double.MAX_VALUE, even though the
private double max = Double.MIN_VALUE; // min and max variables are of type double.
// Float.MAX_VALUE is big enough, and using Double.MAX_VALUE makes
// Ganglia core due to buffer overflow.
// The same reasoning applies to the MIN_VALUE counterparts.
static final double DEFAULT_MIN_VALUE = Float.MAX_VALUE;
static final double DEFAULT_MAX_VALUE = Float.MIN_VALUE;
private double min = DEFAULT_MIN_VALUE;
private double max = DEFAULT_MAX_VALUE;
public void add(double value) { public void add(double value) {
if (value > max) max = value; if (value > max) max = value;
@ -155,8 +163,8 @@ public void add(double value) {
public double max() { return max; } public double max() { return max; }
public void reset() { public void reset() {
min = Double.MAX_VALUE; min = DEFAULT_MIN_VALUE;
max = Double.MIN_VALUE; max = DEFAULT_MAX_VALUE;
} }
public void reset(MinMax other) { public void reset(MinMax other) {

View File

@ -36,8 +36,8 @@ public class TestSampleStat {
assertEquals("mean", 0.0, stat.mean(), EPSILON); assertEquals("mean", 0.0, stat.mean(), EPSILON);
assertEquals("variance", 0.0, stat.variance(), EPSILON); assertEquals("variance", 0.0, stat.variance(), EPSILON);
assertEquals("stddev", 0.0, stat.stddev(), EPSILON); assertEquals("stddev", 0.0, stat.stddev(), EPSILON);
assertEquals("min", Double.MAX_VALUE, stat.min(), EPSILON); assertEquals("min", SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON);
assertEquals("max", Double.MIN_VALUE, stat.max(), EPSILON); assertEquals("max", SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON);
stat.add(3); stat.add(3);
assertEquals("num samples", 1L, stat.numSamples()); assertEquals("num samples", 1L, stat.numSamples());
@ -60,8 +60,8 @@ public class TestSampleStat {
assertEquals("mean", 0.0, stat.mean(), EPSILON); assertEquals("mean", 0.0, stat.mean(), EPSILON);
assertEquals("variance", 0.0, stat.variance(), EPSILON); assertEquals("variance", 0.0, stat.variance(), EPSILON);
assertEquals("stddev", 0.0, stat.stddev(), EPSILON); assertEquals("stddev", 0.0, stat.stddev(), EPSILON);
assertEquals("min", Double.MAX_VALUE, stat.min(), EPSILON); assertEquals("min", SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON);
assertEquals("max", Double.MIN_VALUE, stat.max(), EPSILON); assertEquals("max", SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON);
} }
} }

View File

@ -14,12 +14,6 @@
--> -->
<project name="HttpFS"> <project name="HttpFS">
<version position="right"/>
<bannerLeft>
<name>&nbsp;</name>
</bannerLeft>
<skin> <skin>
<groupId>org.apache.maven.skins</groupId> <groupId>org.apache.maven.skins</groupId>
<artifactId>maven-stylus-skin</artifactId> <artifactId>maven-stylus-skin</artifactId>
@ -28,6 +22,7 @@
<body> <body>
<links> <links>
<item name="Apache Hadoop" href="http://hadoop.apache.org/"/>
</links> </links>
</body> </body>

View File

@ -129,6 +129,9 @@ Trunk (unreleased changes)
HDFS-2486. Remove unnecessary priority level checks in HDFS-2486. Remove unnecessary priority level checks in
UnderReplicatedBlocks. (Uma Maheswara Rao G via szetszwo) UnderReplicatedBlocks. (Uma Maheswara Rao G via szetszwo)
HDFS-2878. Fix TestBlockRecovery and move it back into main test directory.
(todd)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong) namenode state. (Tomasz Nykiel via hairong)
@ -216,6 +219,9 @@ Release 0.23.2 - UNRELEASED
IMPROVEMENTS IMPROVEMENTS
HDFS-2931. Switch DataNode's BlockVolumeChoosingPolicy to private-audience.
(harsh via szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -224,6 +230,14 @@ Release 0.23.2 - UNRELEASED
HDFS-2764. TestBackupNode is racy. (atm) HDFS-2764. TestBackupNode is racy. (atm)
HDFS-2869. Fix an error in the webhdfs docs for the mkdir op (harsh)
HDFS-776. Fix exception handling in Balancer. (Uma Maheswara Rao G
via szetszwo)
HDFS-2815. Namenode sometimes oes not come out of safemode during
NN crash + restart. (Uma Maheswara Rao via suresh)
Release 0.23.1 - 2012-02-08 Release 0.23.1 - 2012-02-08
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -349,7 +349,7 @@ Hello, webhdfs user!
<ul> <ul>
<li>Submit a HTTP PUT request. <li>Submit a HTTP PUT request.
<source> <source>
curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/&lt;PATH&gt;?op=MKDIRS[&amp;permission=&lt;OCTAL&gt;]" curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=MKDIRS[&amp;permission=&lt;OCTAL&gt;]"
</source> </source>
The client receives a response with a <a href="#boolean"><code>boolean</code> JSON object</a>: The client receives a response with a <a href="#boolean"><code>boolean</code> JSON object</a>:
<source> <source>

View File

@ -133,6 +133,10 @@ Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb
if (!isBlockTokenEnabled) { if (!isBlockTokenEnabled) {
return BlockTokenSecretManager.DUMMY_TOKEN; return BlockTokenSecretManager.DUMMY_TOKEN;
} else { } else {
if (!shouldRun) {
throw new IOException(
"Can not get access token. BlockKeyUpdater is not running");
}
return blockTokenSecretManager.generateToken(null, eb, return blockTokenSecretManager.generateToken(null, eb,
EnumSet.of(BlockTokenSecretManager.AccessMode.REPLACE, EnumSet.of(BlockTokenSecretManager.AccessMode.REPLACE,
BlockTokenSecretManager.AccessMode.COPY)); BlockTokenSecretManager.AccessMode.COPY));
@ -202,16 +206,20 @@ public String toString() {
*/ */
class BlockKeyUpdater implements Runnable { class BlockKeyUpdater implements Runnable {
public void run() { public void run() {
try {
while (shouldRun) { while (shouldRun) {
try { try {
blockTokenSecretManager.setKeys(namenode.getBlockKeys()); blockTokenSecretManager.setKeys(namenode.getBlockKeys());
} catch (Exception e) { } catch (IOException e) {
LOG.error("Failed to set keys", e); LOG.error("Failed to set keys", e);
} }
try {
Thread.sleep(keyUpdaterInterval); Thread.sleep(keyUpdaterInterval);
} catch (InterruptedException ie) {
} }
} catch (InterruptedException e) {
LOG.info("InterruptedException in block key updater thread", e);
} catch (Throwable e) {
LOG.error("Exception in block key updater thread", e);
shouldRun = false;
} }
} }
} }

View File

@ -255,7 +255,6 @@ void notifyNamenodeReceivingBlock(ExtendedBlock block) {
} }
} }
//This must be called only by blockPoolManager //This must be called only by blockPoolManager
void start() { void start() {
for (BPServiceActor actor : bpServices) { for (BPServiceActor actor : bpServices) {
@ -666,14 +665,4 @@ private boolean processCommandFromStandby(DatanodeCommand cmd,
return true; return true;
} }
/**
* Connect to the NN at the given address. This is separated out for ease
* of testing.
*/
DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr)
throws IOException {
return new DatanodeProtocolClientSideTranslatorPB(nnAddr,
dn.getConf());
}
} }

View File

@ -189,7 +189,7 @@ private void checkNNVersion(NamespaceInfo nsInfo)
private void connectToNNAndHandshake() throws IOException { private void connectToNNAndHandshake() throws IOException {
// get NN proxy // get NN proxy
bpNamenode = bpos.connectToNN(nnAddr); bpNamenode = dn.connectToNN(nnAddr);
// First phase of the handshake with NN - get the namespace // First phase of the handshake with NN - get the namespace
// info. // info.

View File

@ -29,9 +29,11 @@
* specify what policy is to be used while choosing * specify what policy is to be used while choosing
* a volume for a block request. * a volume for a block request.
* *
* Note: This is an evolving i/f and is only for
* advanced use.
*
***************************************************/ ***************************************************/
@InterfaceAudience.Public @InterfaceAudience.Private
@InterfaceStability.Evolving
public interface BlockVolumeChoosingPolicy { public interface BlockVolumeChoosingPolicy {
/** /**

View File

@ -926,6 +926,14 @@ protected Socket newSocket() throws IOException {
SocketChannel.open().socket() : new Socket(); SocketChannel.open().socket() : new Socket();
} }
/**
* Connect to the NN. This is separated out for easier testing.
*/
DatanodeProtocolClientSideTranslatorPB connectToNN(
InetSocketAddress nnAddr) throws IOException {
return new DatanodeProtocolClientSideTranslatorPB(nnAddr, conf);
}
public static InterDatanodeProtocol createInterDataNodeProtocolProxy( public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
DatanodeID datanodeid, final Configuration conf, final int socketTimeout) DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
throws IOException { throws IOException {
@ -1893,9 +1901,15 @@ public DatanodeProtocolClientSideTranslatorPB getBPNamenode(String bpid)
throws IOException { throws IOException {
BPOfferService bpos = blockPoolManager.get(bpid); BPOfferService bpos = blockPoolManager.get(bpid);
if (bpos == null) { if (bpos == null) {
throw new IOException("cannot find a namnode proxy for bpid=" + bpid); throw new IOException("No block pool offer service for bpid=" + bpid);
} }
return bpos.getActiveNN();
DatanodeProtocolClientSideTranslatorPB activeNN = bpos.getActiveNN();
if (activeNN == null) {
throw new IOException(
"Block pool " + bpid + " has not recognized an active NN");
}
return activeNN;
} }
/** Block synchronization */ /** Block synchronization */
@ -1904,6 +1918,7 @@ void syncBlock(RecoveringBlock rBlock,
ExtendedBlock block = rBlock.getBlock(); ExtendedBlock block = rBlock.getBlock();
DatanodeProtocolClientSideTranslatorPB nn = getBPNamenode(block DatanodeProtocolClientSideTranslatorPB nn = getBPNamenode(block
.getBlockPoolId()); .getBlockPoolId());
assert nn != null;
long recoveryId = rBlock.getNewGenerationStamp(); long recoveryId = rBlock.getNewGenerationStamp();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
@ -2236,5 +2251,4 @@ DNConf getDnConf() {
boolean shouldRun() { boolean shouldRun() {
return shouldRun; return shouldRun;
} }
} }

View File

@ -2366,7 +2366,6 @@ private boolean deleteInternal(String src, boolean recursive,
boolean enforcePermission) boolean enforcePermission)
throws AccessControlException, SafeModeException, UnresolvedLinkException, throws AccessControlException, SafeModeException, UnresolvedLinkException,
IOException { IOException {
boolean deleteNow = false;
ArrayList<Block> collectedBlocks = new ArrayList<Block>(); ArrayList<Block> collectedBlocks = new ArrayList<Block>();
writeLock(); writeLock();
@ -2385,10 +2384,6 @@ private boolean deleteInternal(String src, boolean recursive,
if (!dir.delete(src, collectedBlocks)) { if (!dir.delete(src, collectedBlocks)) {
return false; return false;
} }
deleteNow = collectedBlocks.size() <= BLOCK_DELETION_INCREMENT;
if (deleteNow) { // Perform small deletes right away
removeBlocks(collectedBlocks);
}
} finally { } finally {
writeUnlock(); writeUnlock();
} }
@ -2397,9 +2392,7 @@ private boolean deleteInternal(String src, boolean recursive,
writeLock(); writeLock();
try { try {
if (!deleteNow) {
removeBlocks(collectedBlocks); // Incremental deletion of blocks removeBlocks(collectedBlocks); // Incremental deletion of blocks
}
} finally { } finally {
writeUnlock(); writeUnlock();
} }

View File

@ -59,14 +59,12 @@ public class TestBalancerWithMultipleNameNodes {
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF); ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
// ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
} }
private static final long CAPACITY = 500L; private static final long CAPACITY = 500L;
private static final String RACK0 = "/rack0"; private static final String RACK0 = "/rack0";
private static final String RACK1 = "/rack1"; private static final String RACK1 = "/rack1";
private static final String RACK2 = "/rack2";
private static final String FILE_NAME = "/tmp.txt"; private static final String FILE_NAME = "/tmp.txt";
private static final Path FILE_PATH = new Path(FILE_NAME); private static final Path FILE_PATH = new Path(FILE_NAME);

View File

@ -21,7 +21,6 @@
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Map; import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -259,29 +258,29 @@ public void testPickActiveNameNode() throws Exception {
// Have NN1 claim active at txid 1 // Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 1); mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 1);
waitForHeartbeats(bpos); bpos.triggerHeartbeatForTests();
assertSame(mockNN1, bpos.getActiveNN()); assertSame(mockNN1, bpos.getActiveNN());
// NN2 claims active at a higher txid // NN2 claims active at a higher txid
mockHaStatuses[1] = new NNHAStatusHeartbeat(State.ACTIVE, 2); mockHaStatuses[1] = new NNHAStatusHeartbeat(State.ACTIVE, 2);
waitForHeartbeats(bpos); bpos.triggerHeartbeatForTests();
assertSame(mockNN2, bpos.getActiveNN()); assertSame(mockNN2, bpos.getActiveNN());
// Even after another heartbeat from the first NN, it should // Even after another heartbeat from the first NN, it should
// think NN2 is active, since it claimed a higher txid // think NN2 is active, since it claimed a higher txid
waitForHeartbeats(bpos); bpos.triggerHeartbeatForTests();
assertSame(mockNN2, bpos.getActiveNN()); assertSame(mockNN2, bpos.getActiveNN());
// Even if NN2 goes to standby, DN shouldn't reset to talking to NN1, // Even if NN2 goes to standby, DN shouldn't reset to talking to NN1,
// because NN1's txid is lower than the last active txid. Instead, // because NN1's txid is lower than the last active txid. Instead,
// it should consider neither active. // it should consider neither active.
mockHaStatuses[1] = new NNHAStatusHeartbeat(State.STANDBY, 2); mockHaStatuses[1] = new NNHAStatusHeartbeat(State.STANDBY, 2);
waitForHeartbeats(bpos); bpos.triggerHeartbeatForTests();
assertNull(bpos.getActiveNN()); assertNull(bpos.getActiveNN());
// Now if NN1 goes back to a higher txid, it should be considered active // Now if NN1 goes back to a higher txid, it should be considered active
mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 3); mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 3);
waitForHeartbeats(bpos); bpos.triggerHeartbeatForTests();
assertSame(mockNN1, bpos.getActiveNN()); assertSame(mockNN1, bpos.getActiveNN());
} finally { } finally {
@ -302,28 +301,21 @@ public Boolean get() {
/** /**
* Create a BPOfferService which registers with and heartbeats with the * Create a BPOfferService which registers with and heartbeats with the
* specified namenode proxy objects. * specified namenode proxy objects.
* @throws IOException
*/ */
private BPOfferService setupBPOSForNNs( private BPOfferService setupBPOSForNNs(
DatanodeProtocolClientSideTranslatorPB ... nns) { DatanodeProtocolClientSideTranslatorPB ... nns) throws IOException {
// Set up some fake InetAddresses, then override the connectToNN // Set up some fake InetAddresses, then override the connectToNN
// function to return the corresponding proxies. // function to return the corresponding proxies.
final Map<InetSocketAddress, DatanodeProtocolClientSideTranslatorPB> nnMap = Maps.newLinkedHashMap(); final Map<InetSocketAddress, DatanodeProtocolClientSideTranslatorPB> nnMap = Maps.newLinkedHashMap();
for (int port = 0; port < nns.length; port++) { for (int port = 0; port < nns.length; port++) {
nnMap.put(new InetSocketAddress(port), nns[port]); nnMap.put(new InetSocketAddress(port), nns[port]);
Mockito.doReturn(nns[port]).when(mockDn).connectToNN(
Mockito.eq(new InetSocketAddress(port)));
} }
return new BPOfferService(Lists.newArrayList(nnMap.keySet()), mockDn) { return new BPOfferService(Lists.newArrayList(nnMap.keySet()), mockDn);
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr)
throws IOException {
DatanodeProtocolClientSideTranslatorPB nn = nnMap.get(nnAddr);
if (nn == null) {
throw new AssertionError("bad NN addr: " + nnAddr);
}
return nn;
}
};
} }
private void waitForInitialization(final BPOfferService bpos) private void waitForInitialization(final BPOfferService bpos)
@ -355,30 +347,6 @@ public Boolean get() {
}, 500, 10000); }, 500, 10000);
} }
private void waitForHeartbeats(BPOfferService bpos)
throws Exception {
final int countAtStart[];
synchronized (heartbeatCounts) {
countAtStart = Arrays.copyOf(
heartbeatCounts, heartbeatCounts.length);
}
bpos.triggerHeartbeatForTests();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (heartbeatCounts) {
for (int i = 0; i < countAtStart.length; i++) {
if (heartbeatCounts[i] <= countAtStart[i]) {
return false;
}
}
return true;
}
}
}, 200, 10000);
}
private ReceivedDeletedBlockInfo[] waitForBlockReceived( private ReceivedDeletedBlockInfo[] waitForBlockReceived(
ExtendedBlock fakeBlock, ExtendedBlock fakeBlock,
DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception { DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {

View File

@ -22,6 +22,7 @@
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -31,6 +32,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord; import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord;
@ -39,23 +41,33 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat.State;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import static org.mockito.Mockito.*; import static org.mockito.Mockito.*;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
@ -72,6 +84,8 @@ public class TestBlockRecovery {
private final static long RECOVERY_ID = 3000L; private final static long RECOVERY_ID = 3000L;
private final static String CLUSTER_ID = "testClusterID"; private final static String CLUSTER_ID = "testClusterID";
private final static String POOL_ID = "BP-TEST"; private final static String POOL_ID = "BP-TEST";
private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
"localhost", 5020);
private final static long BLOCK_ID = 1000L; private final static long BLOCK_ID = 1000L;
private final static long GEN_STAMP = 2000L; private final static long GEN_STAMP = 2000L;
private final static long BLOCK_LEN = 3000L; private final static long BLOCK_LEN = 3000L;
@ -80,9 +94,6 @@ public class TestBlockRecovery {
private final static ExtendedBlock block = new ExtendedBlock(POOL_ID, private final static ExtendedBlock block = new ExtendedBlock(POOL_ID,
BLOCK_ID, BLOCK_LEN, GEN_STAMP); BLOCK_ID, BLOCK_LEN, GEN_STAMP);
private final NamespaceInfo nsifno =
new NamespaceInfo(1,CLUSTER_ID, POOL_ID, 2, 3);
static { static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
@ -99,21 +110,50 @@ public void startUp() throws IOException {
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
FileSystem.setDefaultUri(conf, "hdfs://localhost:5020"); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
FileSystem.setDefaultUri(conf,
"hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
ArrayList<File> dirs = new ArrayList<File>(); ArrayList<File> dirs = new ArrayList<File>();
File dataDir = new File(DATA_DIR); File dataDir = new File(DATA_DIR);
FileUtil.fullyDelete(dataDir); FileUtil.fullyDelete(dataDir);
dataDir.mkdirs(); dataDir.mkdirs();
dirs.add(dataDir); dirs.add(dataDir);
DatanodeProtocol namenode = mock(DatanodeProtocol.class); final DatanodeProtocolClientSideTranslatorPB namenode =
mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
@Override
public DatanodeRegistration answer(InvocationOnMock invocation)
throws Throwable {
return (DatanodeRegistration) invocation.getArguments()[0];
}
}).when(namenode).registerDatanode(
Mockito.any(DatanodeRegistration.class),
Mockito.any(DatanodeStorage[].class));
when(namenode.versionRequest()).thenReturn(new NamespaceInfo when(namenode.versionRequest()).thenReturn(new NamespaceInfo
(1, CLUSTER_ID, POOL_ID, 1L, 1)); (1, CLUSTER_ID, POOL_ID, 1L, 1));
when(namenode.sendHeartbeat(any(DatanodeRegistration.class), anyLong(),
anyLong(), anyLong(), anyLong(), anyInt(), anyInt(), anyInt()))
.thenReturn(new DatanodeCommand[0]);
dn = new DataNode(conf, dirs, null);
DataNodeTestUtils.setBPNamenodeByIndex(dn, nsifno, POOL_ID, namenode); when(namenode.sendHeartbeat(
Mockito.any(DatanodeRegistration.class),
Mockito.any(StorageReport[].class),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.anyInt()))
.thenReturn(new HeartbeatResponse(
new DatanodeCommand[0],
new NNHAStatusHeartbeat(State.ACTIVE, 1)));
dn = new DataNode(conf, dirs, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(
InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(NN_ADDR, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs()[0].triggerHeartbeatForTests();
} }
/** /**
@ -355,9 +395,11 @@ public void testRWRReplicas() throws IOException {
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException { private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1); Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
DatanodeInfo mockOtherDN = new DatanodeInfo(
new DatanodeID("127.0.0.1", "storage-1234", 0, 0));
DatanodeInfo[] locs = new DatanodeInfo[] { DatanodeInfo[] locs = new DatanodeInfo[] {
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())), new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
mock(DatanodeInfo.class) }; mockOtherDN };
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID); RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
blocks.add(rBlock); blocks.add(rBlock);
return blocks; return blocks;
@ -495,7 +537,8 @@ public void testNotMatchedReplicaID() throws IOException {
ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block); ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
BlockWriteStreams streams = null; BlockWriteStreams streams = null;
try { try {
streams = replicaInfo.createStreams(true, 0, 0); streams = replicaInfo.createStreams(true,
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
streams.checksumOut.write('a'); streams.checksumOut.write('a');
dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1)); dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
try { try {

View File

@ -3,15 +3,18 @@ Hadoop MapReduce Change Log
Trunk (unreleased changes) Trunk (unreleased changes)
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
MAPREDUCE-3545. Remove Avro RPC. (suresh) MAPREDUCE-3545. Remove Avro RPC. (suresh)
NEW FEATURES NEW FEATURES
MAPREDUCE-778. Rumen Anonymizer. (Amar Kamat and Chris Douglas via amarrk) MAPREDUCE-778. Rumen Anonymizer. (Amar Kamat and Chris Douglas via amarrk)
MAPREDUCE-2669. Add new examples for Mean, Median, and Standard Deviation. MAPREDUCE-2669. Add new examples for Mean, Median, and Standard Deviation.
(Plamen Jeliazkov via shv) (Plamen Jeliazkov via shv)
IMPROVEMENTS IMPROVEMENTS
MAPREDUCE-3481. [Gridmix] Improve Gridmix STRESS mode. (amarrk) MAPREDUCE-3481. [Gridmix] Improve Gridmix STRESS mode. (amarrk)
MAPREDUCE-3597. [Rumen] Rumen should provide APIs to access all the MAPREDUCE-3597. [Rumen] Rumen should provide APIs to access all the
@ -51,10 +54,6 @@ Trunk (unreleased changes)
MAPREDUCE-2944. Improve checking of input for JobClient.displayTasks() (XieXianshan via harsh) MAPREDUCE-2944. Improve checking of input for JobClient.displayTasks() (XieXianshan via harsh)
BUG FIXES BUG FIXES
MAPREDUCE-3770. Zombie.getJobConf() results into NPE. (amarrk)
MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks
(Dave Thompson via bobby)
MAPREDUCE-3194. "mapred mradmin" command is broken in mrv2 MAPREDUCE-3194. "mapred mradmin" command is broken in mrv2
(Jason Lowe via bobby) (Jason Lowe via bobby)
@ -98,11 +97,12 @@ Release 0.23.2 - UNRELEASED
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
MAPREDUCE-3680. FifoScheduler web service rest API can print out invalid MAPREDUCE-3680. FifoScheduler web service rest API can print out invalid
JSON. (B Anil Kumar via tgraves) JSON. (B Anil Kumar via tgraves)
MAPREDUCE-3840. JobEndNotifier doesn't use the proxyToUse during connecting MAPREDUCE-3852. Test TestLinuxResourceCalculatorPlugin failing. (Thomas
(Ravi Prakash via bobby) Graves via mahadev)
Release 0.23.1 - 2012-02-08 Release 0.23.1 - 2012-02-08
@ -744,6 +744,9 @@ Release 0.23.1 - 2012-02-08
MAPREDUCE-3808. Fixed an NPE in FileOutputCommitter for jobs with maps MAPREDUCE-3808. Fixed an NPE in FileOutputCommitter for jobs with maps
but no reduces. (Robert Joseph Evans via vinodkv) but no reduces. (Robert Joseph Evans via vinodkv)
MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks
(Dave Thompson via bobby)
MAPREDUCE-3354. Changed scripts so that jobhistory server is started by MAPREDUCE-3354. Changed scripts so that jobhistory server is started by
bin/mapred instead of bin/yarn. (Jonathan Eagles via acmurthy) bin/mapred instead of bin/yarn. (Jonathan Eagles via acmurthy)
@ -795,6 +798,14 @@ Release 0.23.1 - 2012-02-08
MAPREDUCE-3828. Ensure that urls in single-node mode are correct. (sseth MAPREDUCE-3828. Ensure that urls in single-node mode are correct. (sseth
via acmurthy) via acmurthy)
MAPREDUCE-3770. Zombie.getJobConf() results into NPE. (amarrk)
MAPREDUCE-3840. JobEndNotifier doesn't use the proxyToUse during connecting
(Ravi Prakash via bobby)
MAPREDUCE-3843. Job summary log file found missing on the RM host
(Anupam Seth via tgraves)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -20,6 +20,9 @@
# #
# Environment Variables # Environment Variables
# #
# HADOOP_LOGFILE Hadoop log file.
# HADOOP_ROOT_LOGGER Hadoop root logger.
# HADOOP_JHS_LOGGER Hadoop JobSummary logger.
# YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf. # YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf.
# YARN_LOG_DIR Where log files are stored. PWD by default. # YARN_LOG_DIR Where log files are stored. PWD by default.
# YARN_MASTER host:path where hadoop code should be rsync'd from # YARN_MASTER host:path where hadoop code should be rsync'd from
@ -86,8 +89,9 @@ if [ "$YARN_PID_DIR" = "" ]; then
fi fi
# some variables # some variables
export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log export HADOOP_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA} export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,DRFA}
export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid

View File

@ -437,32 +437,32 @@ Hadoop MapReduce Next Generation - Cluster Setup
Format a new distributed filesystem: Format a new distributed filesystem:
---- ----
$ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format <cluster_name> $ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
---- ----
Start the HDFS with the following command, run on the designated NameNode: Start the HDFS with the following command, run on the designated NameNode:
---- ----
$ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
---- ----
Run a script to start DataNodes on all slaves: Run a script to start DataNodes on all slaves:
---- ----
$ $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
---- ----
Start the YARN with the following command, run on the designated Start the YARN with the following command, run on the designated
ResourceManager: ResourceManager:
---- ----
$ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
---- ----
Run a script to start NodeManagers on all slaves: Run a script to start NodeManagers on all slaves:
---- ----
$ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
---- ----
Start a standalone WebAppProxy server. If multiple servers Start a standalone WebAppProxy server. If multiple servers
@ -476,7 +476,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
designated server: designated server:
---- ----
$ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR
---- ----
* Hadoop Shutdown * Hadoop Shutdown
@ -485,26 +485,26 @@ Hadoop MapReduce Next Generation - Cluster Setup
NameNode: NameNode:
---- ----
$ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
---- ----
Run a script to stop DataNodes on all slaves: Run a script to stop DataNodes on all slaves:
---- ----
$ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
---- ----
Stop the ResourceManager with the following command, run on the designated Stop the ResourceManager with the following command, run on the designated
ResourceManager: ResourceManager:
---- ----
$ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager
---- ----
Run a script to stop NodeManagers on all slaves: Run a script to stop NodeManagers on all slaves:
---- ----
$ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager
---- ----
Stop the WebAppProxy server. If multiple servers are used with load Stop the WebAppProxy server. If multiple servers are used with load
@ -519,7 +519,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
designated server: designated server:
---- ----
$ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR
---- ----
@ -978,34 +978,34 @@ KVNO Timestamp Principal
Format a new distributed filesystem as <hdfs>: Format a new distributed filesystem as <hdfs>:
---- ----
[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format <cluster_name> [hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
---- ----
Start the HDFS with the following command, run on the designated NameNode Start the HDFS with the following command, run on the designated NameNode
as <hdfs>: as <hdfs>:
---- ----
[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
---- ----
Run a script to start DataNodes on all slaves as <root> with a special Run a script to start DataNodes on all slaves as <root> with a special
environment variable <<<HADOOP_SECURE_DN_USER>>> set to <hdfs>: environment variable <<<HADOOP_SECURE_DN_USER>>> set to <hdfs>:
---- ----
[root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR [root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
---- ----
Start the YARN with the following command, run on the designated Start the YARN with the following command, run on the designated
ResourceManager as <yarn>: ResourceManager as <yarn>:
---- ----
[yarn]$ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR [yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
---- ----
Run a script to start NodeManagers on all slaves as <yarn>: Run a script to start NodeManagers on all slaves as <yarn>:
---- ----
[yarn]$ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR [yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
---- ----
Start a standalone WebAppProxy server. Run on the WebAppProxy Start a standalone WebAppProxy server. Run on the WebAppProxy
@ -1020,7 +1020,7 @@ KVNO Timestamp Principal
designated server as <mapred>: designated server as <mapred>:
---- ----
[mapred]$ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR [mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR
---- ----
* Hadoop Shutdown * Hadoop Shutdown
@ -1029,26 +1029,26 @@ KVNO Timestamp Principal
as <hdfs>: as <hdfs>:
---- ----
[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
---- ----
Run a script to stop DataNodes on all slaves as <root>: Run a script to stop DataNodes on all slaves as <root>:
---- ----
[root]$ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR [root]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
---- ----
Stop the ResourceManager with the following command, run on the designated Stop the ResourceManager with the following command, run on the designated
ResourceManager as <yarn>: ResourceManager as <yarn>:
---- ----
[yarn]$ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR [yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager
---- ----
Run a script to stop NodeManagers on all slaves as <yarn>: Run a script to stop NodeManagers on all slaves as <yarn>:
---- ----
[yarn]$ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR [yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager
---- ----
Stop the WebAppProxy server. Run on the WebAppProxy server as Stop the WebAppProxy server. Run on the WebAppProxy server as
@ -1063,7 +1063,7 @@ KVNO Timestamp Principal
designated server as <mapred>: designated server as <mapred>:
---- ----
[mapred]$ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR [mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR
---- ----
* {Web Interfaces} * {Web Interfaces}

View File

@ -738,6 +738,25 @@
</pluginManagement> </pluginManagement>
<plugins> <plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>create-testdirs</id>
<phase>validate</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="${test.build.dir}"/>
<mkdir dir="${test.build.data}"/>
</target>
</configuration>
</execution>
</executions>
</plugin>
<plugin> <plugin>
<groupId>org.apache.maven.plugins</groupId> <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId> <artifactId>maven-surefire-plugin</artifactId>

View File

@ -56,6 +56,7 @@
<item name="High Availability" href="hadoop-yarn/hadoop-yarn-site/HDFSHighAvailability.html"/> <item name="High Availability" href="hadoop-yarn/hadoop-yarn-site/HDFSHighAvailability.html"/>
<item name="Federation" href="hadoop-yarn/hadoop-yarn-site/Federation.html"/> <item name="Federation" href="hadoop-yarn/hadoop-yarn-site/Federation.html"/>
<item name="WebHDFS REST API" href="hadoop-yarn/hadoop-yarn-site/WebHDFS.html"/> <item name="WebHDFS REST API" href="hadoop-yarn/hadoop-yarn-site/WebHDFS.html"/>
<item name="HttpFS Gateway" href="hadoop-hdfs-httpfs/index.html"/>
</menu> </menu>
<menu name="MapReduce" inherit="top"> <menu name="MapReduce" inherit="top">