MAPREDUCE-6746. Replace org.apache.commons.io.Charsets with java.nio.charset.StandardCharsets. Contributed by Vincent Poon.

This commit is contained in:
Akira Ajisaka 2016-07-30 11:45:12 +09:00
parent 95f2b98597
commit 3f0bffea2d
2 changed files with 9 additions and 9 deletions

View File

@ -34,7 +34,7 @@
import java.util.Set; import java.util.Set;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
import org.apache.commons.io.Charsets; import java.nio.charset.StandardCharsets;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -72,7 +72,7 @@ private void testSplitRecordsForFile(Configuration conf,
String delimiter = conf.get("textinputformat.record.delimiter"); String delimiter = conf.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null; byte[] recordDelimiterBytes = null;
if (null != delimiter) { if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
} }
// read the data without splitting to count the records // read the data without splitting to count the records
FileSplit split = new FileSplit(testFilePath, 0, testFileSize, FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
@ -120,7 +120,7 @@ private void testLargeSplitRecordForFile(Configuration conf,
String delimiter = conf.get("textinputformat.record.delimiter"); String delimiter = conf.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null; byte[] recordDelimiterBytes = null;
if (null != delimiter) { if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
} }
// read the data without splitting to count the records // read the data without splitting to count the records
FileSplit split = new FileSplit(testFilePath, 0, testFileSize, FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
@ -482,7 +482,7 @@ public void testUncompressedInputCustomDelimiterPosValue()
String inputData = "abcdefghij++kl++mno"; String inputData = "abcdefghij++kl++mno";
Path inputFile = createInputFile(conf, inputData); Path inputFile = createInputFile(conf, inputData);
String delimiter = "++"; String delimiter = "++";
byte[] recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); byte[] recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
// the first split must contain two records to make sure that it also pulls // the first split must contain two records to make sure that it also pulls
// in the record from the 2nd split // in the record from the 2nd split
int splitLength = 15; int splitLength = 15;
@ -555,7 +555,7 @@ public void testUncompressedInputCustomDelimiterPosValue()
inputData = "abcd|efgh|+|ij|kl|+|mno|pqr"; inputData = "abcd|efgh|+|ij|kl|+|mno|pqr";
inputFile = createInputFile(conf, inputData); inputFile = createInputFile(conf, inputData);
delimiter = "|+|"; delimiter = "|+|";
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
// walking over the buffer and split sizes checks for proper processing // walking over the buffer and split sizes checks for proper processing
// of the ambiguous bytes of the delimiter // of the ambiguous bytes of the delimiter
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) { for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {

View File

@ -35,7 +35,7 @@
import java.util.Set; import java.util.Set;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
import org.apache.commons.io.Charsets; import java.nio.charset.StandardCharsets;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -76,7 +76,7 @@ private void testSplitRecordsForFile(Configuration conf,
String delimiter = conf.get("textinputformat.record.delimiter"); String delimiter = conf.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null; byte[] recordDelimiterBytes = null;
if (null != delimiter) { if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
} }
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID()); TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
@ -416,7 +416,7 @@ public void testUncompressedInputCustomDelimiterPosValue()
String inputData = "abcdefghij++kl++mno"; String inputData = "abcdefghij++kl++mno";
Path inputFile = createInputFile(conf, inputData); Path inputFile = createInputFile(conf, inputData);
String delimiter = "++"; String delimiter = "++";
byte[] recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); byte[] recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
int splitLength = 15; int splitLength = 15;
FileSplit split = new FileSplit(inputFile, 0, splitLength, (String[])null); FileSplit split = new FileSplit(inputFile, 0, splitLength, (String[])null);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, TaskAttemptContext context = new TaskAttemptContextImpl(conf,
@ -500,7 +500,7 @@ public void testUncompressedInputCustomDelimiterPosValue()
inputData = "abcd|efgh|+|ij|kl|+|mno|pqr"; inputData = "abcd|efgh|+|ij|kl|+|mno|pqr";
inputFile = createInputFile(conf, inputData); inputFile = createInputFile(conf, inputData);
delimiter = "|+|"; delimiter = "|+|";
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
// walking over the buffer and split sizes checks for proper processing // walking over the buffer and split sizes checks for proper processing
// of the ambiguous bytes of the delimiter // of the ambiguous bytes of the delimiter
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) { for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {