From 7ae5255a1613ccfb43646f33eabacf1062c86e93 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Fri, 20 Feb 2015 09:06:07 -0600 Subject: [PATCH] HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes created with an old release. Contributed by Rushabh Shah. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/INodeFile.java | 3 ++ .../hadoop/hdfs/util/LongBitFormat.java | 4 ++ .../hdfs/server/namenode/TestFSImage.java | 48 ++++++++++++++++++ .../image-with-zero-block-size.tar.gz | Bin 0 -> 1452 bytes 5 files changed, 58 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/image-with-zero-block-size.tar.gz diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5f3cc02e37..71ce48fc96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -992,6 +992,9 @@ Release 2.7.0 - UNRELEASED HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java. (Arshad Mohammad via wheat9) + HDFS-7788. Post-2.6 namenode may not start up with an image containing + inodes created with an old release. (Rushabh Shah via kihwal) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 24e25ecf6b..3743bf0b45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -107,6 +107,9 @@ static byte getStoragePolicyID(long header) { static long toLong(long preferredBlockSize, short replication, byte storagePolicyID) { long h = 0; + if (preferredBlockSize == 0) { + preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin(); + } h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h); h = REPLICATION.BITS.combine(replication, h); h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java index 863d9f744f..9399d84f1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java @@ -64,4 +64,8 @@ public long combine(long value, long record) { } return (record & ~MASK) | (value << OFFSET); } + + public long getMin() { + return MIN; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index c68ae046df..f7dad183c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -28,10 +28,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -40,10 +43,14 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.util.MD5FileUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; public class TestFSImage { + private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ = + "image-with-zero-block-size.tar.gz"; @Test public void testPersist() throws IOException { Configuration conf = new Configuration(); @@ -183,4 +190,45 @@ public void testLoadMtimeAtime() throws Exception { } } } + + /** + * In this test case, I have created an image with a file having + * preferredblockSize = 0. We are trying to read this image (since file with + * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode + * after 2.6 version will not be able to read this particular file. + * See HDFS-7788 for more information. + * @throws Exception + */ + @Test + public void testZeroBlockSize() throws Exception { + final Configuration conf = new HdfsConfiguration(); + String tarFile = System.getProperty("test.cache.data", "build/test/cache") + + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ; + String testDir = PathUtils.getTestDirName(getClass()); + File dfsDir = new File(testDir, "image-with-zero-block-size"); + if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) { + throw new IOException("Could not delete dfs directory '" + dfsDir + "'"); + } + FileUtil.unTar(new File(tarFile), new File(testDir)); + File nameDir = new File(dfsDir, "name"); + GenericTestUtils.assertExists(nameDir); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + nameDir.getAbsolutePath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .format(false) + .manageDataDfsDirs(false) + .manageNameDfsDirs(false) + .waitSafeMode(false) + .build(); + try { + FileSystem fs = cluster.getFileSystem(); + Path testPath = new Path("/tmp/zeroBlockFile"); + assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath)); + assertTrue("Name node didn't come up", cluster.isNameNodeUp(0)); + } finally { + cluster.shutdown(); + //Clean up + FileUtil.fullyDelete(dfsDir); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/image-with-zero-block-size.tar.gz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/image-with-zero-block-size.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..8bfc4547ff6b583cdb9108b902c23576f673818d GIT binary patch literal 1452 zcmV;d1ylMTiwFSEtmISx1MQp7Z`))T$DM3lU8~zn8JpRGd6_`0QquRQ?NrJkO`J7S z>r_d)X%bT8Chit3O{yei-7b_N&in@m362xu!U4o(N3NU@LWm2}IB-C4FRzof*^;)g z`@`+n`>C2Zk6!0}?f3mY-zScdx~k0@T%e)k*>xl*ZYl7!m?%xm{gx5|AK*Akb(h zX7^$`f*O#pq0VJzlKIqZ_O=--HyzfZQSY?^h5_Re!fJuFn7bRRI3FgU#=1ZVyb!L| z8wrdJtYZKrXl#e;wQBuA0!t@-dj9w9t@^1W>!V-)8!Yz5$nSa3qfh@304S*R$VRXa zuetvBng4|yYpB~RhcN-K`5&?eg53GXg!23!2t)F}4mC^;l>eD?K+SXhr8oAQ+neb+?K? zK>puy4sZEC9%t1b8p!`T&;Q5GF#T_-{!l*PC;xZ#j|sp${|ABB`S1CFQ>nIFB`^Su z4#u=jAMOSrd*^?aV|w$Ci3%`+81wue1jF+`7`Xn^jd{=PkPc*a$TR!tz^nfKtG-9S z{x^ia{7+Gz{sD%p{sTdrfY)6Ao12@@+-HUTk8j0B9>&PmS6&F5`|-{<*H2<}oa4s7 z|1Y;X`|Oen$R2(8@_$HO{ZkCN{|CYF>wj@P;QyVN5C0$hWXJvgl;BnW{#D;&i2p;- zuYaWJ-2a1NHk~PC=WoHw;=iB$_Rom?d{lWpstWIKFwTOr=fqDoLh_j@F)V+)5xOc& zzAOIv{TF|JcT?Wn;v;2yYcq8HuTWSJLV|!<`zNgZnfA)+DFYmG$+HId@F(k9j zuAX<-;f%WsGu4_C$+2*Ef2I4R3OB#n+GQF@H`w@7wJKYpB`asmCl~T&dNE~|poTF( z6d6VdPK-gM#{m0-3&NPl4rJmC8{L@r#NA?jD(+EEOiZ!SPQBFoS&LGiS=zlpom17k)YE zh08e-jc2fy-(%?YA6@IK|8xR){TB$f zX4n{0xa=%Js#R=fsiSWjUC{eK$L0r&qPIFSFlpZ`7F|L5~6!Qk(Ic$IMMc-6mu)%O_Y|48ev{|TXp`+pGJ zxZ_mRjI*RN?Ezz&=+i(`Q-z|60Hl(;1X`j7a<%Dhr{7;I2;a#!{Kl^91e%$k^cY