diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 08f705a223..cca755ebf4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -143,6 +143,9 @@ Trunk (Unreleased) HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple threads (cmccabe) + HDFS-7659. truncate should check negative value of the new length. + (Yi Liu via shv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 1bb7f4a787..21f75a586a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1984,6 +1984,10 @@ public void rename(String src, String dst, Options.Rename... options) */ public boolean truncate(String src, long newLength) throws IOException { checkOpen(); + if (newLength < 0) { + throw new HadoopIllegalArgumentException( + "Cannot truncate to a negative file size: " + newLength + "."); + } TraceScope scope = getPathTraceScope("truncate", src); try { return namenode.truncate(src, newLength, clientName); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6a8f57452d..fae1641e0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1911,6 +1911,10 @@ boolean truncateInt(String srcArg, long newLength, NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src=" + src + " newLength=" + newLength); } + if (newLength < 0) { + throw new HadoopIllegalArgumentException( + "Cannot truncate to a negative file size: " + newLength + "."); + } HdfsFileStatus stat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 5498b128c2..1612a24cf5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataOutputStream; @@ -443,6 +444,14 @@ public void testTruncateFailure() throws IOException { } catch(IOException expected) {} out.close(); + try { + fs.truncate(p, -1); + fail("Truncate must fail for a negative new length."); + } catch (HadoopIllegalArgumentException expected) { + GenericTestUtils.assertExceptionContains( + "Cannot truncate to a negative file size", expected); + } + cluster.shutdownDataNodes(); NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) .setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);