diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 0019b3a629..9065ff5c00 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -555,6 +555,8 @@ Release 2.7.0 - UNRELEASED HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a file if creat succeeds but chmod fails. (Varun Saxena via Colin P. McCabe) + HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws, + hadoop-azure, and hadoop-openstack. (Li Lu via wheat9) Release 2.6.0 - 2014-11-18 diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 6bdd233506..457351d024 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -875,6 +875,8 @@ public void progressChanged(ProgressEvent progressEvent) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; + default: + break; } } }; @@ -933,6 +935,8 @@ public void progressChanged(ProgressEvent progressEvent) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; + default: + break; } } }; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index ad2e2e6635..c136002220 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -25,6 +25,7 @@ import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.Charset; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; @@ -153,7 +154,7 @@ public FolderRenamePending(Path redoFile, NativeAzureFileSystem fs) "Error reading pending rename file contents -- " + "maximum file size exceeded"); } - String contents = new String(bytes, 0, l); + String contents = new String(bytes, 0, l, Charset.forName("UTF-8")); // parse the JSON ObjectMapper objMapper = new ObjectMapper(); @@ -253,7 +254,7 @@ public void writeFile(FileSystem fs) throws IOException { // Write file. try { output = fs.create(path); - output.write(contents.getBytes()); + output.write(contents.getBytes(Charset.forName("UTF-8"))); } catch (IOException e) { throw new IOException("Unable to write RenamePending file for folder rename from " + srcKey + " to " + dstKey, e); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 2d5c0c8ebd..bda6006d60 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -18,7 +18,6 @@ package org.apache.hadoop.fs.azure; -import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; @@ -27,6 +26,8 @@ import com.microsoft.windowsazure.storage.StorageException; import com.microsoft.windowsazure.storage.blob.CloudBlob; +import java.util.concurrent.atomic.AtomicInteger; + /** * An Azure blob lease that automatically renews itself indefinitely * using a background thread. Use it to synchronize distributed processes, @@ -56,7 +57,7 @@ public class SelfRenewingLease { private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class); // Used to allocate thread serial numbers in thread name - private static volatile int threadNumber = 0; + private static AtomicInteger threadNumber = new AtomicInteger(0); // Time to wait to retry getting the lease in milliseconds @@ -99,7 +100,7 @@ public SelfRenewingLease(CloudBlobWrapper blobWrapper) // A Renewer running should not keep JVM from exiting, so make it a daemon. renewer.setDaemon(true); - renewer.setName("AzureLeaseRenewer-" + threadNumber++); + renewer.setName("AzureLeaseRenewer-" + threadNumber.getAndIncrement()); renewer.start(); LOG.debug("Acquired lease " + leaseID + " on " + blob.getUri() + " managed by thread " + renewer.getName()); diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java index 0fe93c2f13..2bc343e172 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java @@ -27,6 +27,7 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; @@ -74,7 +75,8 @@ private List fetchFileList(Path sourceListing) throws IOException { FileSystem fs = sourceListing.getFileSystem(getConf()); BufferedReader input = null; try { - input = new BufferedReader(new InputStreamReader(fs.open(sourceListing))); + input = new BufferedReader(new InputStreamReader(fs.open(sourceListing), + Charset.forName("UTF-8"))); String line = input.readLine(); while (line != null) { result.add(new Path(line)); diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java index b3e6b94179..0138eae412 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java @@ -45,6 +45,7 @@ import java.io.InterruptedIOException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.Charset; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -352,8 +353,8 @@ private List listDirectory(SwiftObjectPath path, final CollectionType collectionType = JSONUtil.getJsonMapper().getTypeFactory(). constructCollectionType(List.class, SwiftObjectFileStatus.class); - final List fileStatusList = - JSONUtil.toObject(new String(bytes), collectionType); + final List fileStatusList = JSONUtil.toObject( + new String(bytes, Charset.forName("UTF-8")), collectionType); //this can happen if user lists file /data/files/file //in this case swift will return empty array @@ -447,7 +448,7 @@ public List getObjectLocation(Path path) throws IOException { //no object location, return an empty list return new LinkedList(); } - return extractUris(new String(objectLocation), path); + return extractUris(new String(objectLocation, Charset.forName("UTF-8")), path); } /** diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java index 7e850e713d..c9e26acf3d 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java @@ -219,9 +219,9 @@ public static void compareByteArrays(byte[] src, byte actual = dest[i]; byte expected = src[i]; String letter = toChar(actual); - String line = String.format("[%04d] %2x %s\n", i, actual, letter); + String line = String.format("[%04d] %2x %s%n", i, actual, letter); if (expected != actual) { - line = String.format("[%04d] %2x %s -expected %2x %s\n", + line = String.format("[%04d] %2x %s -expected %2x %s%n", i, actual, letter,