diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e30c52f976..2c1a51d38c 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -641,6 +641,9 @@ Release 2.7.0 - UNRELEASED HADOOP-11431. clean up redundant maven-site-plugin configuration. (Herve Boutemy via wheat9) + HADOOP-11429. Findbugs warnings in hadoop extras. + (Varun Saxena via wheat9) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCh.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCh.java index 8779e06555..ed08139623 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCh.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCh.java @@ -238,11 +238,10 @@ public InputSplit[] getSplits(JobConf job, int numSplits Text key = new Text(); FileOperation value = new FileOperation(); - SequenceFile.Reader in = null; long prev = 0L; int count = 0; //count src - try { - for(in = new SequenceFile.Reader(fs, srcs, job); in.next(key, value); ) { + try (SequenceFile.Reader in = new SequenceFile.Reader(fs, srcs, job)) { + for ( ; in.next(key, value); ) { long curr = in.getPosition(); long delta = curr - prev; if (++count > targetcount) { @@ -252,9 +251,6 @@ public InputSplit[] getSplits(JobConf job, int numSplits } } } - finally { - in.close(); - } long remaining = fs.getFileStatus(srcs).getLen() - prev; if (remaining != 0) { splits.add(new FileSplit(srcs, prev, remaining, (String[])null)); @@ -449,10 +445,8 @@ private boolean setup(List ops, Path log) Path opList = new Path(jobdir, "_" + OP_LIST_LABEL); jobconf.set(OP_LIST_LABEL, opList.toString()); int opCount = 0, synCount = 0; - SequenceFile.Writer opWriter = null; - try { - opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class, - FileOperation.class, SequenceFile.CompressionType.NONE); + try (SequenceFile.Writer opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class, + FileOperation.class, SequenceFile.CompressionType.NONE)) { for(FileOperation op : ops) { FileStatus srcstat = fs.getFileStatus(op.src); if (srcstat.isDirectory() && op.isDifferent(srcstat)) { @@ -479,8 +473,6 @@ private boolean setup(List ops, Path log) } } } - } finally { - opWriter.close(); } checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf); @@ -496,9 +488,7 @@ private static void checkDuplication(FileSystem fs, Path file, Path sorted, SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, new Text.Comparator(), Text.class, FileOperation.class, conf); sorter.sort(file, sorted); - SequenceFile.Reader in = null; - try { - in = new SequenceFile.Reader(fs, sorted, conf); + try (SequenceFile.Reader in = new SequenceFile.Reader(fs, sorted, conf)) { FileOperation curop = new FileOperation(); Text prevsrc = null, cursrc = new Text(); for(; in.next(cursrc, curop); ) { @@ -512,9 +502,6 @@ private static void checkDuplication(FileSystem fs, Path file, Path sorted, curop = new FileOperation(); } } - finally { - in.close(); - } } public static void main(String[] args) throws Exception { diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java index d1e65e2fe6..6801d6f275 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java @@ -24,6 +24,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.EnumSet; import java.util.HashSet; @@ -697,16 +698,13 @@ private static List fetchFileList(Configuration conf, Path srcList) throws IOException { List result = new ArrayList(); FileSystem fs = srcList.getFileSystem(conf); - BufferedReader input = null; - try { - input = new BufferedReader(new InputStreamReader(fs.open(srcList))); + try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(srcList), + Charset.forName("UTF-8")))) { String line = input.readLine(); while (line != null) { result.add(new Path(line)); line = input.readLine(); } - } finally { - checkAndClose(input); } return result; } @@ -957,7 +955,7 @@ else if (opt[i] == Options.SIZE_LIMIT) { throw new IllegalArgumentException("num_maps not specified in -m"); } try { - conf.setInt(MAX_MAPS_LABEL, Integer.valueOf(args[idx])); + conf.setInt(MAX_MAPS_LABEL, Integer.parseInt(args[idx])); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid argument to -m: " + args[idx]); diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java index 4e1a6aa77c..2c89cb084d 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java @@ -23,6 +23,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -96,14 +97,11 @@ protected static List readFile(Configuration conf, Path inputfile ) throws IOException { List result = new ArrayList(); FileSystem fs = inputfile.getFileSystem(conf); - BufferedReader input = null; - try { - input = new BufferedReader(new InputStreamReader(fs.open(inputfile))); + try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(inputfile), + Charset.forName("UTF-8")))) { for(String line; (line = input.readLine()) != null;) { result.add(line); } - } finally { - input.close(); } return result; } diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java index c3c8e90b2d..050bfbe2a2 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java @@ -21,6 +21,7 @@ import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; +import java.nio.charset.Charset; import java.util.Random; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -155,15 +156,15 @@ public int compare(byte[] b1, int s1, int l1, //Compare column-wise according to *sortSpec* for(int i=0; i < sortSpec.length; ++i) { - int column = (Integer.valueOf(sortSpec[i]).intValue()); + int column = Integer.parseInt(sortSpec[i]); String c1 = logColumns1[column]; String c2 = logColumns2[column]; //Compare columns int comparision = super.compareBytes( - c1.getBytes(), 0, c1.length(), - c2.getBytes(), 0, c2.length() - ); + c1.getBytes(Charset.forName("UTF-8")), 0, c1.length(), + c2.getBytes(Charset.forName("UTF-8")), 0, c2.length() + ); //They differ! if (comparision != 0) {