diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java index 690194d593..94c7861016 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java @@ -100,6 +100,14 @@ private PathHandle getPathHandle(Path filePath) throws IOException { return fs.getPathHandle(status); } + private long totalPartsLen(List partHandles) throws IOException { + long totalLen = 0; + for (Path p: partHandles) { + totalLen += fs.getFileStatus(p).getLen(); + } + return totalLen; + } + @Override @SuppressWarnings("deprecation") // rename w/ OVERWRITE public PathHandle complete(Path filePath, @@ -127,12 +135,17 @@ public PathHandle complete(Path filePath, .collect(Collectors.toList()); Path collectorPath = createCollectorPath(filePath); - Path filePathInsideCollector = mergePaths(collectorPath, - new Path(Path.SEPARATOR + filePath.getName())); - fs.create(filePathInsideCollector).close(); - fs.concat(filePathInsideCollector, - partHandles.toArray(new Path[handles.size()])); - fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE); + boolean emptyFile = totalPartsLen(partHandles) == 0; + if (emptyFile) { + fs.create(filePath).close(); + } else { + Path filePathInsideCollector = mergePaths(collectorPath, + new Path(Path.SEPARATOR + filePath.getName())); + fs.create(filePathInsideCollector).close(); + fs.concat(filePathInsideCollector, + partHandles.toArray(new Path[handles.size()])); + fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE); + } fs.delete(collectorPath, true); return getPathHandle(filePath); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 85a6861637..7cee5a6081 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -164,6 +164,28 @@ public void testMultipartUpload() throws Exception { payloadCount * partSizeInBytes()); } + /** + * Assert that a multipart upload is successful when a single empty part is + * uploaded. + * @throws Exception failure + */ + @Test + public void testMultipartUploadEmptyPart() throws Exception { + FileSystem fs = getFileSystem(); + Path file = path("testMultipartUpload"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + List> partHandles = new ArrayList<>(); + MessageDigest origDigest = DigestUtils.getMd5Digest(); + byte[] payload = new byte[0]; + origDigest.update(payload); + InputStream is = new ByteArrayInputStream(payload); + PartHandle partHandle = mpu.putPart(file, is, 0, uploadHandle, + payload.length); + partHandles.add(Pair.of(0, partHandle)); + completeUpload(file, mpu, uploadHandle, partHandles, origDigest, 0); + } + /** * Assert that a multipart upload is successful even when the parts are * given in the reverse order.