From aba0e225fc2d8a6b29dea0aa7b73eb173b02e373 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 30 Aug 2011 04:02:22 +0000 Subject: [PATCH] HDFS-1217. Change some NameNode methods from public to package private. Constributed by Laxman git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1163081 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/NameNode.java | 9 ++++---- .../TestDataNodeMultipleRegistrations.java | 23 ++++++++++--------- .../hdfs/server/namenode/FSImageTestUtil.java | 6 ++++- .../TestOfflineImageViewer.java | 3 ++- 5 files changed, 27 insertions(+), 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c5baf5b8eb..f693cd746d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -687,6 +687,9 @@ Release 0.23.0 - Unreleased HDFS-2266. Add Namesystem and SafeMode interfaces to avoid directly referring to FSNamesystem in BlockManager. (szetszwo) + HDFS-1217. Change some NameNode methods from public to package private. + (Laxman via szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 7f0681a2fd..52f576484a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -301,7 +301,7 @@ public static InetSocketAddress getAddress(Configuration conf) { * @param filesystemURI * @return address of file system */ - public static InetSocketAddress getAddress(URI filesystemURI) { + static InetSocketAddress getAddress(URI filesystemURI) { String authority = filesystemURI.getAuthority(); if (authority == null) { throw new IllegalArgumentException(String.format( @@ -1262,7 +1262,7 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOExcept * @param nodeReg data node registration * @throws IOException */ - public void verifyRequest(NodeRegistration nodeReg) throws IOException { + void verifyRequest(NodeRegistration nodeReg) throws IOException { verifyVersion(nodeReg.getVersion()); if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) { LOG.warn("Invalid registrationID - expected: " @@ -1278,12 +1278,13 @@ public void verifyRequest(NodeRegistration nodeReg) throws IOException { * @param version * @throws IOException */ - public void verifyVersion(int version) throws IOException { + void verifyVersion(int version) throws IOException { if (version != FSConstants.LAYOUT_VERSION) throw new IncorrectVersionException(version, "data node"); } - public FSImage getFSImage() { + /** get FSImage */ + FSImage getFSImage() { return namesystem.dir.fsImage; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index a98b0afd6e..7ad0b78817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo; +import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.junit.Assert; import org.junit.Before; @@ -65,14 +66,14 @@ public void test2NNRegistration() throws IOException { assertNotNull("cannot create nn1", nn1); assertNotNull("cannot create nn2", nn2); - String bpid1 = nn1.getFSImage().getBlockPoolID(); - String bpid2 = nn2.getFSImage().getBlockPoolID(); - String cid1 = nn1.getFSImage().getClusterID(); - String cid2 = nn2.getFSImage().getClusterID(); - int lv1 = nn1.getFSImage().getLayoutVersion(); - int lv2 = nn2.getFSImage().getLayoutVersion(); - int ns1 = nn1.getFSImage().getNamespaceID(); - int ns2 = nn2.getFSImage().getNamespaceID(); + String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); + String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID(); + String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID(); + String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID(); + int lv1 =FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); + int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion(); + int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID(); + int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID(); assertNotSame("namespace ids should be different", ns1, ns2); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); @@ -135,9 +136,9 @@ public void testFedSingleNN() throws IOException { NameNode nn1 = cluster.getNameNode(); assertNotNull("cannot create nn1", nn1); - String bpid1 = nn1.getFSImage().getBlockPoolID(); - String cid1 = nn1.getFSImage().getClusterID(); - int lv1 = nn1.getFSImage().getLayoutVersion(); + String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); + String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID(); + int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 5deccd5c22..4a8edb8475 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -27,7 +27,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; @@ -411,4 +410,9 @@ public static void logStorageContents(Log LOG, NNStorage storage) { } } } + + /** get the fsImage*/ + public static FSImage getFSImage(NameNode node) { + return node.getFSImage(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 5fc96882f6..05311f5359 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -129,7 +129,8 @@ private File initFsimage() throws IOException { // Determine location of fsimage file orig = FSImageTestUtil.findLatestImageFile( - cluster.getNameNode().getFSImage().getStorage().getStorageDir(0)); + FSImageTestUtil.getFSImage( + cluster.getNameNode()).getStorage().getStorageDir(0)); if (orig == null) { fail("Didn't generate or can't find fsimage"); }