From fa6033a029aeb04ebb0b6221bdc9e6e06c1bf0ba Mon Sep 17 00:00:00 2001
From: Eli Collins
Date: Thu, 19 Jan 2012 21:41:50 +0000
Subject: [PATCH 1/5] HDFS-2768. BackupNode stop can not close proxy
connections because it is not a proxy instance. Contributed by Uma Maheswara
Rao G.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1233584 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../apache/hadoop/hdfs/server/namenode/BackupNode.java | 5 +++--
.../hdfs/server/namenode/EditLogBackupOutputStream.java | 9 ++++-----
3 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e09c47a88..a55b795514 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -169,6 +169,9 @@ Trunk (unreleased changes)
HDFS-2776. Missing interface annotation on JournalSet.
(Brandon Li via jitendra)
+ HDFS-2768. BackupNode stop can not close proxy connections because
+ it is not a proxy instance. (Uma Maheswara Rao G via eli)
+
Release 0.23.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 6ef843e296..84d1c9f8a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -69,7 +70,7 @@ public class BackupNode extends NameNode {
private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
/** Name-node proxy */
- NamenodeProtocol namenode;
+ NamenodeProtocolTranslatorPB namenode;
/** Name-node RPC address */
String nnRpcAddress;
/** Name-node HTTP address */
@@ -189,7 +190,7 @@ public void stop() {
}
// Stop the RPC client
if (namenode != null) {
- RPC.stopProxy(namenode);
+ IOUtils.cleanup(LOG, namenode);
}
namenode = null;
// Stop the checkpoint manager
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
index 867a93d058..8c3ad2ecdb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
@@ -24,10 +24,9 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
/**
@@ -41,7 +40,7 @@
class EditLogBackupOutputStream extends EditLogOutputStream {
static int DEFAULT_BUFFER_SIZE = 256;
- private JournalProtocol backupNode; // RPC proxy to backup node
+ private JournalProtocolTranslatorPB backupNode; // RPC proxy to backup node
private NamenodeRegistration bnRegistration; // backup node registration
private NamenodeRegistration nnRegistration; // active node registration
private EditsDoubleBuffer doubleBuf;
@@ -94,14 +93,14 @@ public void close() throws IOException {
throw new IOException("BackupEditStream has " + size +
" records still to be flushed and cannot be closed.");
}
- RPC.stopProxy(backupNode); // stop the RPC threads
+ IOUtils.cleanup(Storage.LOG, backupNode); // stop the RPC threads
doubleBuf.close();
doubleBuf = null;
}
@Override
public void abort() throws IOException {
- RPC.stopProxy(backupNode);
+ IOUtils.cleanup(Storage.LOG, backupNode);
doubleBuf = null;
}
From 68615600985dfadf067a4967a5e08c1421e7a1c8 Mon Sep 17 00:00:00 2001
From: Eli Collins
Date: Thu, 19 Jan 2012 22:23:53 +0000
Subject: [PATCH 2/5] MAPREDUCE-3692. yarn-resourcemanager out and log files
can get big. Contributed by Eli Collins
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1233605 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 ++
.../org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java | 2 +-
.../apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java | 2 +-
.../org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java | 5 ++---
.../containermanager/application/ApplicationImpl.java | 2 +-
.../containermanager/container/ContainerImpl.java | 2 +-
.../containermanager/localizer/LocalizedResource.java | 2 +-
.../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java | 2 +-
.../resourcemanager/rmapp/attempt/RMAppAttemptImpl.java | 2 +-
.../server/resourcemanager/rmcontainer/RMContainerImpl.java | 2 +-
.../yarn/server/resourcemanager/rmnode/RMNodeImpl.java | 2 +-
.../server/resourcemanager/scheduler/fifo/FifoScheduler.java | 4 ++--
12 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d57ab9c861..1290899182 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -187,6 +187,8 @@ Release 0.23.1 - Unreleased
assign only one off-switch container in a single scheduling
iteration. (Arun C Murthy via vinodkv)
+ MAPREDUCE-3692. yarn-resourcemanager out and log files can get big. (eli)
+
OPTIMIZATIONS
MAPREDUCE-3567. Extraneous JobConf objects in AM heap. (Vinod Kumar
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 808e92d762..a0e8613e20 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -632,7 +632,7 @@ protected void scheduleTasks(Set taskIDs) {
* The only entry point to change the Job.
*/
public void handle(JobEvent event) {
- LOG.info("Processing " + event.getJobId() + " of type " + event.getType());
+ LOG.debug("Processing " + event.getJobId() + " of type " + event.getType());
try {
writeLock.lock();
JobState oldState = getState();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
index e376c9e887..fb2d32f4ab 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -537,7 +537,7 @@ private void addAndScheduleAttempt() {
@Override
public void handle(TaskEvent event) {
- LOG.info("Processing " + event.getTaskID() + " of type " + event.getType());
+ LOG.debug("Processing " + event.getTaskID() + " of type " + event.getType());
try {
writeLock.lock();
TaskState oldState = getState();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
index 5e84c3e501..5f973a2578 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
@@ -315,11 +315,10 @@ public Writable call(RpcKind rpcKind, String protocol,
ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest;
ProtoSpecificRpcRequest rpcRequest = request.message;
String methodName = rpcRequest.getMethodName();
- System.out.println("Call: protocol=" + protocol + ", method="
- + methodName);
- if (verbose)
+ if (verbose) {
log("Call: protocol=" + protocol + ", method="
+ methodName);
+ }
MethodDescriptor methodDescriptor = service.getDescriptorForType()
.findMethodByName(methodName);
if (methodDescriptor == null) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index afdbe5b082..d1bcaf2ef9 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -373,7 +373,7 @@ public void handle(ApplicationEvent event) {
try {
ApplicationId applicationID = event.getApplicationID();
- LOG.info("Processing " + applicationID + " of type " + event.getType());
+ LOG.debug("Processing " + applicationID + " of type " + event.getType());
ApplicationState oldState = stateMachine.getCurrentState();
ApplicationState newState = null;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 2c2d2baaa4..1cbdbaa814 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -811,7 +811,7 @@ public void handle(ContainerEvent event) {
this.writeLock.lock();
ContainerId containerID = event.getContainerID();
- LOG.info("Processing " + containerID + " of type " + event.getType());
+ LOG.debug("Processing " + containerID + " of type " + event.getType());
ContainerState oldState = stateMachine.getCurrentState();
ContainerState newState = null;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
index 883b4bcf55..bee9c2d3da 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -181,7 +181,7 @@ public void handle(ResourceEvent event) {
this.writeLock.lock();
Path resourcePath = event.getLocalResourceRequest().getPath();
- LOG.info("Processing " + resourcePath + " of type " + event.getType());
+ LOG.debug("Processing " + resourcePath + " of type " + event.getType());
ResourceState oldState = this.stateMachine.getCurrentState();
ResourceState newState = null;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index f6cf29ac65..c8cdf463d5 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -413,7 +413,7 @@ public void handle(RMAppEvent event) {
try {
ApplicationId appID = event.getApplicationId();
- LOG.info("Processing event for " + appID + " of type "
+ LOG.debug("Processing event for " + appID + " of type "
+ event.getType());
final RMAppState oldState = getState();
try {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 0f695fda9f..9882c61587 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -468,7 +468,7 @@ public void handle(RMAppAttemptEvent event) {
try {
ApplicationAttemptId appAttemptID = event.getApplicationAttemptId();
- LOG.info("Processing event for " + appAttemptID + " of type "
+ LOG.debug("Processing event for " + appAttemptID + " of type "
+ event.getType());
final RMAppAttemptState oldState = getAppAttemptState();
try {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index ffdd23c438..d845edeb61 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -192,7 +192,7 @@ public Priority getReservedPriority() {
@Override
public void handle(RMContainerEvent event) {
- LOG.info("Processing " + event.getContainerId() + " of type " + event.getType());
+ LOG.debug("Processing " + event.getContainerId() + " of type " + event.getType());
try {
writeLock.lock();
RMContainerState oldState = getState();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index dd3e25fe16..7f2b48f85b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -283,7 +283,7 @@ public HeartbeatResponse getLastHeartBeatResponse() {
}
public void handle(RMNodeEvent event) {
- LOG.info("Processing " + event.getNodeId() + " of type " + event.getType());
+ LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType());
try {
writeLock.lock();
RMNodeState oldState = getState();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index 0f6a8a84c8..48e01a72fd 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -575,12 +575,12 @@ private synchronized void nodeUpdate(RMNode rmNode,
if (Resources.greaterThanOrEqual(node.getAvailableResource(),
minimumAllocation)) {
- LOG.info("Node heartbeat " + rmNode.getNodeID() +
+ LOG.debug("Node heartbeat " + rmNode.getNodeID() +
" available resource = " + node.getAvailableResource());
assignContainers(node);
- LOG.info("Node after allocation " + rmNode.getNodeID() + " resource = "
+ LOG.debug("Node after allocation " + rmNode.getNodeID() + " resource = "
+ node.getAvailableResource());
}
From b7eb5334f5ca1fbc033084ffe9690a45e596b7bb Mon Sep 17 00:00:00 2001
From: Todd Lipcon
Date: Fri, 20 Jan 2012 03:33:50 +0000
Subject: [PATCH 3/5] HADOOP-7982. UserGroupInformation fails to login if
thread's context classloader can't load HadoopLoginModule. Contributed by
Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1233751 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../hadoop/security/UserGroupInformation.java | 14 ++++++++++++--
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c2ee27e0d3..c295c8855c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -279,6 +279,9 @@ Release 0.23.1 - Unreleased
HADOOP-7971. Adding back job/pipes/queue commands to bin/hadoop for
backward compatibility. (Prashath Sharma via acmurthy)
+ HADOOP-7982. UserGroupInformation fails to login if thread's context
+ classloader can't load HadoopLoginModule. (todd)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index e2e6b90512..7c7e975193 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -416,9 +416,19 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
private static LoginContext
newLoginContext(String appName, Subject subject) throws LoginException {
- return new LoginContext(appName, subject, null, new HadoopConfiguration());
+ // Temporarily switch the thread's ContextClassLoader to match this
+ // class's classloader, so that we can properly load HadoopLoginModule
+ // from the JAAS libraries.
+ Thread t = Thread.currentThread();
+ ClassLoader oldCCL = t.getContextClassLoader();
+ t.setContextClassLoader(HadoopLoginModule.class.getClassLoader());
+ try {
+ return new LoginContext(appName, subject, null, new HadoopConfiguration());
+ } finally {
+ t.setContextClassLoader(oldCCL);
+ }
}
-
+
private LoginContext getLogin() {
return user.getLogin();
}
From 520a39ac2daf86c0d67fff1b67f5f8d63e65114c Mon Sep 17 00:00:00 2001
From: Todd Lipcon
Date: Fri, 20 Jan 2012 07:26:19 +0000
Subject: [PATCH 4/5] HDFS-2810. Leases not getting renewed properly by
clients. Contributed by Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1233794 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../org/apache/hadoop/hdfs/DFSClient.java | 10 ++-
.../org/apache/hadoop/hdfs/LeaseRenewer.java | 10 ++-
.../apache/hadoop/hdfs/TestLeaseRenewer.java | 81 ++++++++++++++++---
4 files changed, 89 insertions(+), 14 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a55b795514..90ae91617d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -338,6 +338,8 @@ Release 0.23.1 - UNRELEASED
HDFS-2790. FSNamesystem.setTimes throws exception with wrong
configuration name in the message. (Arpit Gupta via eli)
+ HDFS-2810. Leases not getting renewed properly by clients (todd)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 2938bbd319..e52ef995f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -373,11 +373,17 @@ boolean isClientRunning() {
return clientRunning;
}
- /** Renew leases */
- void renewLease() throws IOException {
+ /**
+ * Renew leases.
+ * @return true if lease was renewed. May return false if this
+ * client has been closed or has no files open.
+ **/
+ boolean renewLease() throws IOException {
if (clientRunning && !isFilesBeingWrittenEmpty()) {
namenode.renewLease(clientName);
+ return true;
}
+ return false;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
index 14b9c9a3b7..862be0c184 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
@@ -67,7 +67,7 @@
*
*/
class LeaseRenewer {
- private static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
+ static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
@@ -407,7 +407,13 @@ public int compare(final DFSClient left, final DFSClient right) {
final DFSClient c = copies.get(i);
//skip if current client name is the same as the previous name.
if (!c.getClientName().equals(previousName)) {
- c.renewLease();
+ if (!c.renewLease()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Did not renew lease for client " +
+ c);
+ }
+ continue;
+ }
previousName = c.getClientName();
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewed for client " + previousName);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
index f3817671b0..1bdb497927 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
@@ -17,11 +17,14 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.*;
+
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -29,6 +32,8 @@
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import com.google.common.base.Supplier;
+
public class TestLeaseRenewer {
private String FAKE_AUTHORITY="hdfs://nn1/";
private UserGroupInformation FAKE_UGI_A =
@@ -46,19 +51,24 @@ public class TestLeaseRenewer {
@Before
public void setupMocksAndRenewer() throws IOException {
- MOCK_DFSCLIENT = Mockito.mock(DFSClient.class);
- Mockito.doReturn(true)
- .when(MOCK_DFSCLIENT).isClientRunning();
- Mockito.doReturn((int)FAST_GRACE_PERIOD)
- .when(MOCK_DFSCLIENT).getHdfsTimeout();
- Mockito.doReturn("myclient")
- .when(MOCK_DFSCLIENT).getClientName();
+ MOCK_DFSCLIENT = createMockClient();
renewer = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
}
+ private DFSClient createMockClient() {
+ DFSClient mock = Mockito.mock(DFSClient.class);
+ Mockito.doReturn(true)
+ .when(mock).isClientRunning();
+ Mockito.doReturn((int)FAST_GRACE_PERIOD)
+ .when(mock).getHdfsTimeout();
+ Mockito.doReturn("myclient")
+ .when(mock).getClientName();
+ return mock;
+ }
+
@Test
public void testInstanceSharing() throws IOException {
// Two lease renewers with the same UGI should return
@@ -93,11 +103,11 @@ public void testClientName() throws IOException {
public void testRenewal() throws Exception {
// Keep track of how many times the lease gets renewed
final AtomicInteger leaseRenewalCount = new AtomicInteger();
- Mockito.doAnswer(new Answer() {
+ Mockito.doAnswer(new Answer() {
@Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
+ public Boolean answer(InvocationOnMock invocation) throws Throwable {
leaseRenewalCount.incrementAndGet();
- return null;
+ return true;
}
}).when(MOCK_DFSCLIENT).renewLease();
@@ -120,6 +130,57 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
renewer.closeFile(filePath, MOCK_DFSCLIENT);
}
+ /**
+ * Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
+ * to several DFSClients with the same name, the first of which has no files
+ * open. Previously, this was causing the lease to not get renewed.
+ */
+ @Test
+ public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
+ // First DFSClient has no files open so doesn't renew leases.
+ final DFSClient mockClient1 = createMockClient();
+ Mockito.doReturn(false).when(mockClient1).renewLease();
+ assertSame(renewer, LeaseRenewer.getInstance(
+ FAKE_AUTHORITY, FAKE_UGI_A, mockClient1));
+
+ // Set up a file so that we start renewing our lease.
+ DFSOutputStream mockStream1 = Mockito.mock(DFSOutputStream.class);
+ String filePath = "/foo";
+ renewer.put(filePath, mockStream1, mockClient1);
+
+ // Second DFSClient does renew lease
+ final DFSClient mockClient2 = createMockClient();
+ Mockito.doReturn(true).when(mockClient2).renewLease();
+ assertSame(renewer, LeaseRenewer.getInstance(
+ FAKE_AUTHORITY, FAKE_UGI_A, mockClient2));
+
+ // Set up a file so that we start renewing our lease.
+ DFSOutputStream mockStream2 = Mockito.mock(DFSOutputStream.class);
+ renewer.put(filePath, mockStream2, mockClient2);
+
+
+ // Wait for lease to get renewed
+ GenericTestUtils.waitFor(new Supplier() {
+ @Override
+ public Boolean get() {
+ try {
+ Mockito.verify(mockClient1, Mockito.atLeastOnce()).renewLease();
+ Mockito.verify(mockClient2, Mockito.atLeastOnce()).renewLease();
+ return true;
+ } catch (AssertionError err) {
+ LeaseRenewer.LOG.warn("Not yet satisfied", err);
+ return false;
+ } catch (IOException e) {
+ // should not throw!
+ throw new RuntimeException(e);
+ }
+ }
+ }, 100, 10000);
+
+ renewer.closeFile(filePath, mockClient1);
+ renewer.closeFile(filePath, mockClient2);
+ }
+
@Test
public void testThreadName() throws Exception {
DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
From f7d20b2198e47926f5a1203cad3655a4afdfe7be Mon Sep 17 00:00:00 2001
From: Todd Lipcon
Date: Fri, 20 Jan 2012 07:32:06 +0000
Subject: [PATCH 5/5] HDFS-2751. Datanode may incorrectly drop OS cache behind
reads even for short reads. Contributed by Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1233796 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../org/apache/hadoop/hdfs/server/datanode/BlockSender.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 90ae91617d..f7bc7e10bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -340,6 +340,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2810. Leases not getting renewed properly by clients (todd)
+ HDFS-2751. Datanode may incorrectly drop OS cache behind reads
+ even for short reads. (todd)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index cf4e803260..a59a559636 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -315,7 +315,7 @@ class BlockSender implements java.io.Closeable {
* close opened files.
*/
public void close() throws IOException {
- if (blockInFd != null && shouldDropCacheBehindRead) {
+ if (blockInFd != null && shouldDropCacheBehindRead && isLongRead()) {
// drop the last few MB of the file from cache
try {
NativeIO.posixFadviseIfPossible(