From dc7a061668a3f4d86fe1b07a40d46774b5386938 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 14 Aug 2015 14:42:43 -0700 Subject: [PATCH] HDFS-8891. HDFS concat should keep srcs order. Contributed by Yong Zhang. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hdfs/server/namenode/FSDirConcatOp.java | 5 ++-- .../hdfs/server/namenode/TestHDFSConcat.java | 24 +++++++++++++++---- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index be799aff3a..20b5467079 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1141,6 +1141,8 @@ Release 2.8.0 - UNRELEASED HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao) + HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index bb001304be..786284d1ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Preconditions; + import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.StorageType; @@ -28,7 +29,7 @@ import java.io.IOException; import java.util.Arrays; -import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.Set; import java.util.List; @@ -103,7 +104,7 @@ private static void verifyTargetFile(FSDirectory fsd, final String target, private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs, INodesInPath targetIIP, FSPermissionChecker pc) throws IOException { // to make sure no two files are the same - Set si = new HashSet<>(); + Set si = new LinkedHashSet<>(); final INodeFile targetINode = targetIIP.getLastINode().asFile(); final INodeDirectory targetParent = targetINode.getParent(); // now check the srcs diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index e1c3c0f5f6..4685eb907c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -111,18 +111,21 @@ public void testConcat() throws IOException, InterruptedException { long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount(); Path [] files = new Path[numFiles]; - byte [] [] bytes = new byte [numFiles][(int)fileLen]; + byte[][] bytes = new byte[numFiles + 1][(int) fileLen]; LocatedBlocks [] lblocks = new LocatedBlocks[numFiles]; long [] lens = new long [numFiles]; - + stm = dfs.open(trgPath); + stm.readFully(0, bytes[0]); + stm.close(); int i; for(i=0; i