HDFS-2768. BackupNode stop can not close proxy connections because it is not a proxy instance. Contributed by Uma Maheswara Rao G.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1233584 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-01-19 21:41:50 +00:00
parent 1d2e706dd1
commit fa6033a029
3 changed files with 10 additions and 7 deletions

View File

@ -169,6 +169,9 @@ Trunk (unreleased changes)
HDFS-2776. Missing interface annotation on JournalSet. HDFS-2776. Missing interface annotation on JournalSet.
(Brandon Li via jitendra) (Brandon Li via jitendra)
HDFS-2768. BackupNode stop can not close proxy connections because
it is not a proxy instance. (Uma Maheswara Rao G via eli)
Release 0.23.1 - UNRELEASED Release 0.23.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -41,6 +41,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -69,7 +70,7 @@ public class BackupNode extends NameNode {
private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY; private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
/** Name-node proxy */ /** Name-node proxy */
NamenodeProtocol namenode; NamenodeProtocolTranslatorPB namenode;
/** Name-node RPC address */ /** Name-node RPC address */
String nnRpcAddress; String nnRpcAddress;
/** Name-node HTTP address */ /** Name-node HTTP address */
@ -189,7 +190,7 @@ public void stop() {
} }
// Stop the RPC client // Stop the RPC client
if (namenode != null) { if (namenode != null) {
RPC.stopProxy(namenode); IOUtils.cleanup(LOG, namenode);
} }
namenode = null; namenode = null;
// Stop the checkpoint manager // Stop the checkpoint manager

View File

@ -24,10 +24,9 @@
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
/** /**
@ -41,7 +40,7 @@
class EditLogBackupOutputStream extends EditLogOutputStream { class EditLogBackupOutputStream extends EditLogOutputStream {
static int DEFAULT_BUFFER_SIZE = 256; static int DEFAULT_BUFFER_SIZE = 256;
private JournalProtocol backupNode; // RPC proxy to backup node private JournalProtocolTranslatorPB backupNode; // RPC proxy to backup node
private NamenodeRegistration bnRegistration; // backup node registration private NamenodeRegistration bnRegistration; // backup node registration
private NamenodeRegistration nnRegistration; // active node registration private NamenodeRegistration nnRegistration; // active node registration
private EditsDoubleBuffer doubleBuf; private EditsDoubleBuffer doubleBuf;
@ -94,14 +93,14 @@ public void close() throws IOException {
throw new IOException("BackupEditStream has " + size + throw new IOException("BackupEditStream has " + size +
" records still to be flushed and cannot be closed."); " records still to be flushed and cannot be closed.");
} }
RPC.stopProxy(backupNode); // stop the RPC threads IOUtils.cleanup(Storage.LOG, backupNode); // stop the RPC threads
doubleBuf.close(); doubleBuf.close();
doubleBuf = null; doubleBuf = null;
} }
@Override @Override
public void abort() throws IOException { public void abort() throws IOException {
RPC.stopProxy(backupNode); IOUtils.cleanup(Storage.LOG, backupNode);
doubleBuf = null; doubleBuf = null;
} }