MAPREDUCE-4723. Fix warnings found by findbugs 2. Contributed by Sandy Ryza
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1409601 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7e2ecffd88
commit
905b17876c
@ -166,6 +166,8 @@ Release 2.0.3-alpha - Unreleased
|
||||
HADOOP-8911. CRLF characters in source and text files.
|
||||
(Raja Aluri via suresh)
|
||||
|
||||
MAPREDUCE-4723. Fix warnings found by findbugs 2. (Sandy Ryza via eli)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -479,4 +479,28 @@
|
||||
<Field name="sslFileBufferSize" />
|
||||
<Bug pattern="IS2_INCONSISTENT_SYNC" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.mapreduce.util.ProcessTree" />
|
||||
<Method name="sendSignal" />
|
||||
<Bug pattern="NP_GUARANTEED_DEREF_ON_EXCEPTION_PATH" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.mapreduce.util.ProcessTree" />
|
||||
<Method name="isSetsidSupported" />
|
||||
<Bug pattern="NP_GUARANTEED_DEREF_ON_EXCEPTION_PATH" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.mapreduce.util.ProcessTree" />
|
||||
<Method name="isSetsidSupported" />
|
||||
<Bug pattern="NP_NULL_ON_SOME_PATH_EXCEPTION" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.mapreduce.v2.hs.CachedHistoryStorage$1" />
|
||||
<Bug pattern="SE_BAD_FIELD_INNER_CLASS" />
|
||||
</Match>
|
||||
|
||||
</FindBugsFilter>
|
||||
|
@ -359,9 +359,8 @@ private void runSubtask(org.apache.hadoop.mapred.Task task,
|
||||
+ StringUtils.stringifyException(e));
|
||||
}
|
||||
// Report back any failures, for diagnostic purposes
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
exception.printStackTrace(new PrintStream(baos));
|
||||
umbilical.reportDiagnosticInfo(classicAttemptID, baos.toString());
|
||||
umbilical.reportDiagnosticInfo(classicAttemptID,
|
||||
StringUtils.stringifyException(exception));
|
||||
throw new RuntimeException();
|
||||
|
||||
} catch (Throwable throwable) {
|
||||
|
@ -315,8 +315,6 @@ public boolean statusUpdate(TaskAttemptID taskAttemptID,
|
||||
+ taskStatus.getProgress());
|
||||
// Task sends the updated state-string to the TT.
|
||||
taskAttemptStatus.stateString = taskStatus.getStateString();
|
||||
// Set the output-size when map-task finishes. Set by the task itself.
|
||||
taskAttemptStatus.outputSize = taskStatus.getOutputSize();
|
||||
// Task sends the updated phase to the TT.
|
||||
taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
|
||||
// Counters are updated by the task. Convert counters into new format as
|
||||
|
@ -184,10 +184,8 @@ public Object run() throws Exception {
|
||||
LOG.info("Exception cleaning up: " + StringUtils.stringifyException(e));
|
||||
}
|
||||
// Report back any failures, for diagnostic purposes
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
exception.printStackTrace(new PrintStream(baos));
|
||||
if (taskid != null) {
|
||||
umbilical.fatalError(taskid, baos.toString());
|
||||
umbilical.fatalError(taskid, StringUtils.stringifyException(exception));
|
||||
}
|
||||
} catch (Throwable throwable) {
|
||||
LOG.fatal("Error running child : "
|
||||
|
@ -600,6 +600,8 @@ public void processEventForJobSummary(HistoryEvent event, JobSummary summary,
|
||||
summary.setJobFinishTime(juce.getFinishTime());
|
||||
setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
|
||||
break;
|
||||
default:
|
||||
throw new YarnException("Invalid event type");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,6 @@ public static class TaskAttemptStatus {
|
||||
public Counters counters;
|
||||
public String stateString;
|
||||
public Phase phase;
|
||||
public long outputSize;
|
||||
public List<TaskAttemptId> fetchFailedMaps;
|
||||
public long mapFinishTime;
|
||||
public long shuffleFinishTime;
|
||||
|
@ -833,6 +833,9 @@ JobStateInternal finished(JobStateInternal finalState) {
|
||||
break;
|
||||
case SUCCEEDED:
|
||||
metrics.completedJob(this);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Illegal job state: " + finalState);
|
||||
}
|
||||
return finalState;
|
||||
}
|
||||
@ -1311,6 +1314,9 @@ public void constructFinalFullcounters() {
|
||||
case REDUCE:
|
||||
this.finalReduceCounters.incrAllCounters(counters);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Task type neither map nor reduce: " +
|
||||
t.getType());
|
||||
}
|
||||
this.fullCounters.incrAllCounters(counters);
|
||||
}
|
||||
|
@ -1335,6 +1335,8 @@ public void transition(TaskAttemptImpl taskAttempt,
|
||||
taskAttempt.attemptId,
|
||||
TaskEventType.T_ATTEMPT_KILLED));
|
||||
break;
|
||||
default:
|
||||
LOG.error("Task final state is not FAILED or KILLED: " + finalState);
|
||||
}
|
||||
if (taskAttempt.getLaunchTime() != 0) {
|
||||
TaskAttemptUnsuccessfulCompletionEvent tauce =
|
||||
|
@ -210,7 +210,7 @@ protected void containerFailedOnHost(String hostName) {
|
||||
return; //already blacklisted
|
||||
}
|
||||
Integer failures = nodeFailures.remove(hostName);
|
||||
failures = failures == null ? 0 : failures;
|
||||
failures = failures == null ? Integer.valueOf(0) : failures;
|
||||
failures++;
|
||||
LOG.info(failures + " failures on node " + hostName);
|
||||
if (failures >= maxTaskFailuresPerNode) {
|
||||
|
@ -43,7 +43,6 @@ abstract class StartEndTimesBase implements TaskRuntimeEstimator {
|
||||
static final int MINIMUM_COMPLETE_NUMBER_TO_SPECULATE
|
||||
= 1;
|
||||
|
||||
protected Configuration conf = null;
|
||||
protected AppContext context = null;
|
||||
|
||||
protected final Map<TaskAttemptId, Long> startTimes
|
||||
@ -82,7 +81,6 @@ public long attemptEnrolledTime(TaskAttemptId attemptID) {
|
||||
|
||||
@Override
|
||||
public void contextualize(Configuration conf, AppContext context) {
|
||||
this.conf = conf;
|
||||
this.context = context;
|
||||
|
||||
Map<JobId, Job> allJobs = context.getAllJobs();
|
||||
|
@ -285,6 +285,8 @@ private void countTasksAndAttempts(Job job) {
|
||||
case SCHEDULED:
|
||||
++this.mapsPending;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case REDUCE:
|
||||
@ -296,8 +298,13 @@ private void countTasksAndAttempts(Job job) {
|
||||
case SCHEDULED:
|
||||
++this.reducesPending;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException(
|
||||
"Task type is neither map nor reduce: " + task.getType());
|
||||
}
|
||||
// Attempts counts
|
||||
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
|
||||
@ -337,6 +344,9 @@ private void countTasksAndAttempts(Job job) {
|
||||
this.failedReduceAttempts += failed;
|
||||
this.killedReduceAttempts += killed;
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Task type neither map nor reduce: " +
|
||||
task.getType());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -412,7 +412,6 @@ private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
|
||||
status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
|
||||
status.id = attempt.getID();
|
||||
status.mapFinishTime = 0;
|
||||
status.outputSize = 0;
|
||||
status.phase = phase;
|
||||
status.progress = 0.5f;
|
||||
status.shuffleFinishTime = 0;
|
||||
|
@ -86,7 +86,6 @@ public void test() throws Exception {
|
||||
taskAttemptStatus.stateString = "RUNNING";
|
||||
taskAttemptStatus.taskState = TaskAttemptState.RUNNING;
|
||||
taskAttemptStatus.phase = Phase.MAP;
|
||||
taskAttemptStatus.outputSize = 3;
|
||||
// send the status update
|
||||
app.getContext().getEventHandler().handle(
|
||||
new TaskAttemptStatusUpdateEvent(attempt.getID(), taskAttemptStatus));
|
||||
|
@ -59,6 +59,8 @@
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* Helper class for MR applications
|
||||
*/
|
||||
@ -159,7 +161,8 @@ private static void setMRFrameworkClasspath(
|
||||
}
|
||||
|
||||
if (classpathFileStream != null) {
|
||||
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
|
||||
reader = new BufferedReader(new InputStreamReader(classpathFileStream,
|
||||
Charsets.UTF_8));
|
||||
String cp = reader.readLine();
|
||||
if (cp != null) {
|
||||
Apps.addToEnvironment(environment, Environment.CLASSPATH.name(),
|
||||
|
@ -420,6 +420,8 @@ private static String[] getPathStrings(String commaSeparatedPaths) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
continue; // nothing special to do for this character
|
||||
}
|
||||
}
|
||||
pathStrings.add(commaSeparatedPaths.substring(pathStart, length));
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.mapred;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.Writer;
|
||||
import java.util.List;
|
||||
@ -30,6 +31,8 @@
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* <code>JobQueueClient</code> is interface provided to the user in order to get
|
||||
* JobQueue related information from the {@link JobTracker}
|
||||
@ -144,7 +147,8 @@ void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer,
|
||||
private void displayQueueList() throws IOException {
|
||||
JobQueueInfo[] rootQueues = jc.getRootQueues();
|
||||
for (JobQueueInfo queue : rootQueues) {
|
||||
printJobQueueInfo(queue, new PrintWriter(System.out));
|
||||
printJobQueueInfo(queue, new PrintWriter(new OutputStreamWriter(
|
||||
System.out, Charsets.UTF_8)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,7 +186,8 @@ private void displayQueueInfo(String queue, boolean showJobs)
|
||||
System.out.println("Queue \"" + queue + "\" does not exist.");
|
||||
return;
|
||||
}
|
||||
printJobQueueInfo(jobQueueInfo, new PrintWriter(System.out));
|
||||
printJobQueueInfo(jobQueueInfo, new PrintWriter(new OutputStreamWriter(
|
||||
System.out, Charsets.UTF_8)));
|
||||
if (showJobs && (jobQueueInfo.getChildren() == null ||
|
||||
jobQueueInfo.getChildren().size() == 0)) {
|
||||
JobStatus[] jobs = jobQueueInfo.getJobStatuses();
|
||||
@ -223,10 +228,10 @@ private void displayUsage(String cmd) {
|
||||
if ("-queueinfo".equals(cmd)) {
|
||||
System.err.println(prefix + "[" + cmd + "<job-queue-name> [-showJobs]]");
|
||||
} else {
|
||||
System.err.printf(prefix + "<command> <args>\n");
|
||||
System.err.printf("\t[-list]\n");
|
||||
System.err.printf("\t[-info <job-queue-name> [-showJobs]]\n");
|
||||
System.err.printf("\t[-showacls] \n\n");
|
||||
System.err.printf(prefix + "<command> <args>%n");
|
||||
System.err.printf("\t[-list]%n");
|
||||
System.err.printf("\t[-info <job-queue-name> [-showJobs]]%n");
|
||||
System.err.printf("\t[-showacls] %n%n");
|
||||
ToolRunner.printGenericCommandUsage(System.out);
|
||||
}
|
||||
}
|
||||
|
@ -49,6 +49,8 @@
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* A simple logger to handle the task-specific user logs.
|
||||
* This class uses the system property <code>hadoop.log.dir</code>.
|
||||
@ -104,7 +106,8 @@ private static LogFileDetail getLogFileDetail(TaskAttemptID taskid,
|
||||
throws IOException {
|
||||
File indexFile = getIndexFile(taskid, isCleanup);
|
||||
BufferedReader fis = new BufferedReader(new InputStreamReader(
|
||||
SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null)));
|
||||
SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null),
|
||||
Charsets.UTF_8));
|
||||
//the format of the index file is
|
||||
//LOG_DIR: <the dir where the task logs are really stored>
|
||||
//stdout:<start-offset in the stdout file> <length>
|
||||
|
@ -27,6 +27,8 @@
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.*;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* An {@link InputFormat} for plain text files. Files are broken into lines.
|
||||
* Either linefeed or carriage-return are used to signal end of line. Keys are
|
||||
@ -59,7 +61,9 @@ public RecordReader<LongWritable, Text> getRecordReader(
|
||||
reporter.setStatus(genericSplit.toString());
|
||||
String delimiter = job.get("textinputformat.record.delimiter");
|
||||
byte[] recordDelimiterBytes = null;
|
||||
if (null != delimiter) recordDelimiterBytes = delimiter.getBytes();
|
||||
if (null != delimiter) {
|
||||
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
|
||||
}
|
||||
return new LineRecordReader(job, (FileSplit) genericSplit,
|
||||
recordDelimiterBytes);
|
||||
}
|
||||
|
@ -49,9 +49,7 @@ public class CombineFileRecordReader<K, V> implements RecordReader<K, V> {
|
||||
protected CombineFileSplit split;
|
||||
protected JobConf jc;
|
||||
protected Reporter reporter;
|
||||
protected Class<RecordReader<K, V>> rrClass;
|
||||
protected Constructor<RecordReader<K, V>> rrConstructor;
|
||||
protected FileSystem fs;
|
||||
|
||||
protected int idx;
|
||||
protected long progress;
|
||||
@ -106,7 +104,6 @@ public CombineFileRecordReader(JobConf job, CombineFileSplit split,
|
||||
throws IOException {
|
||||
this.split = split;
|
||||
this.jc = job;
|
||||
this.rrClass = rrClass;
|
||||
this.reporter = reporter;
|
||||
this.idx = 0;
|
||||
this.curReader = null;
|
||||
|
@ -56,6 +56,8 @@
|
||||
import org.codehaus.jackson.map.JsonMappingException;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
class JobSubmitter {
|
||||
@ -550,7 +552,7 @@ private void readTokensFromFiles(Configuration conf, Credentials credentials)
|
||||
|
||||
for(Map.Entry<String, String> ent: nm.entrySet()) {
|
||||
credentials.addSecretKey(new Text(ent.getKey()), ent.getValue()
|
||||
.getBytes());
|
||||
.getBytes(Charsets.UTF_8));
|
||||
}
|
||||
} catch (JsonMappingException e) {
|
||||
json_error = true;
|
||||
|
@ -188,7 +188,7 @@ private void printCounters(StringBuffer buff, Counters totalCounters,
|
||||
decimal.format(counter.getValue());
|
||||
|
||||
buff.append(
|
||||
String.format("\n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s",
|
||||
String.format("%n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s",
|
||||
totalGroup.getDisplayName(),
|
||||
counter.getDisplayName(),
|
||||
mapValue, reduceValue, totalValue));
|
||||
|
@ -30,6 +30,8 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.mapreduce.InputFormat;
|
||||
@ -58,6 +60,8 @@
|
||||
public class DBInputFormat<T extends DBWritable>
|
||||
extends InputFormat<LongWritable, T> implements Configurable {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(DBInputFormat.class);
|
||||
|
||||
private String dbProductName = "DEFAULT";
|
||||
|
||||
/**
|
||||
@ -354,6 +358,8 @@ protected void closeConnection() {
|
||||
this.connection.close();
|
||||
this.connection = null;
|
||||
}
|
||||
} catch (SQLException sqlE) { } // ignore exception on close.
|
||||
} catch (SQLException sqlE) {
|
||||
LOG.debug("Exception on close", sqlE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -219,7 +219,6 @@ public List<InputSplit> getSplits(JobContext job)
|
||||
Path p = fs.makeQualified(paths[i]);
|
||||
newpaths.add(p);
|
||||
}
|
||||
paths = null;
|
||||
|
||||
// In one single iteration, process all the paths in a single pool.
|
||||
// Processing one pool at a time ensures that a split contains paths
|
||||
|
@ -46,9 +46,7 @@ public class CombineFileRecordReader<K, V> extends RecordReader<K, V> {
|
||||
Integer.class};
|
||||
|
||||
protected CombineFileSplit split;
|
||||
protected Class<? extends RecordReader<K,V>> rrClass;
|
||||
protected Constructor<? extends RecordReader<K,V>> rrConstructor;
|
||||
protected FileSystem fs;
|
||||
protected TaskAttemptContext context;
|
||||
|
||||
protected int idx;
|
||||
@ -111,7 +109,6 @@ public CombineFileRecordReader(CombineFileSplit split,
|
||||
throws IOException {
|
||||
this.split = split;
|
||||
this.context = context;
|
||||
this.rrClass = rrClass;
|
||||
this.idx = 0;
|
||||
this.curReader = null;
|
||||
this.progress = 0;
|
||||
|
@ -425,6 +425,8 @@ private static String[] getPathStrings(String commaSeparatedPaths) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
continue; // nothing special to do for this character
|
||||
}
|
||||
}
|
||||
pathStrings.add(commaSeparatedPaths.substring(pathStart, length));
|
||||
|
@ -32,6 +32,8 @@
|
||||
import org.apache.hadoop.mapreduce.RecordReader;
|
||||
import org.apache.hadoop.mapreduce.TaskAttemptContext;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/** An {@link InputFormat} for plain text files. Files are broken into lines.
|
||||
* Either linefeed or carriage-return are used to signal end of line. Keys are
|
||||
* the position in the file, and values are the line of text.. */
|
||||
@ -47,7 +49,7 @@ public class TextInputFormat extends FileInputFormat<LongWritable, Text> {
|
||||
"textinputformat.record.delimiter");
|
||||
byte[] recordDelimiterBytes = null;
|
||||
if (null != delimiter)
|
||||
recordDelimiterBytes = delimiter.getBytes();
|
||||
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
|
||||
return new LineRecordReader(recordDelimiterBytes);
|
||||
}
|
||||
|
||||
|
@ -18,21 +18,23 @@
|
||||
package org.apache.hadoop.mapreduce.security;
|
||||
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.net.URL;
|
||||
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.WritableComparator;
|
||||
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
||||
import org.apache.hadoop.record.Utils;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
*
|
||||
* utilities for generating kyes, hashes and verifying them for shuffle
|
||||
@ -41,6 +43,8 @@
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class SecureShuffleUtils {
|
||||
private static final Log LOG = LogFactory.getLog(SecureShuffleUtils.class);
|
||||
|
||||
public static final String HTTP_HEADER_URL_HASH = "UrlHash";
|
||||
public static final String HTTP_HEADER_REPLY_URL_HASH = "ReplyHash";
|
||||
|
||||
@ -49,7 +53,8 @@ public class SecureShuffleUtils {
|
||||
* @param msg
|
||||
*/
|
||||
public static String generateHash(byte[] msg, SecretKey key) {
|
||||
return new String(Base64.encodeBase64(generateByteHash(msg, key)));
|
||||
return new String(Base64.encodeBase64(generateByteHash(msg, key)),
|
||||
Charsets.UTF_8);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -80,7 +85,7 @@ private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
|
||||
*/
|
||||
public static String hashFromString(String enc_str, SecretKey key)
|
||||
throws IOException {
|
||||
return generateHash(enc_str.getBytes(), key);
|
||||
return generateHash(enc_str.getBytes(Charsets.UTF_8), key);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -91,9 +96,9 @@ public static String hashFromString(String enc_str, SecretKey key)
|
||||
*/
|
||||
public static void verifyReply(String base64Hash, String msg, SecretKey key)
|
||||
throws IOException {
|
||||
byte[] hash = Base64.decodeBase64(base64Hash.getBytes());
|
||||
byte[] hash = Base64.decodeBase64(base64Hash.getBytes(Charsets.UTF_8));
|
||||
|
||||
boolean res = verifyHash(hash, msg.getBytes(), key);
|
||||
boolean res = verifyHash(hash, msg.getBytes(Charsets.UTF_8), key);
|
||||
|
||||
if(res != true) {
|
||||
throw new IOException("Verification of the hashReply failed");
|
||||
@ -126,19 +131,4 @@ public static String buildMsgFrom(HttpServletRequest request ) {
|
||||
private static String buildMsgFrom(String uri_path, String uri_query, int port) {
|
||||
return String.valueOf(port) + uri_path + "?" + uri_query;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* byte array to Hex String
|
||||
* @param ba
|
||||
* @return string with HEX value of the key
|
||||
*/
|
||||
public static String toHex(byte[] ba) {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
PrintStream ps = new PrintStream(baos);
|
||||
for(byte b: ba) {
|
||||
ps.printf("%x", b);
|
||||
}
|
||||
return baos.toString();
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,6 @@ public RawKeyValueIterator run() throws IOException, InterruptedException {
|
||||
for (Fetcher<K,V> fetcher : fetchers) {
|
||||
fetcher.shutDown();
|
||||
}
|
||||
fetchers = null;
|
||||
|
||||
// stop the scheduler
|
||||
scheduler.close();
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.mapreduce.tools;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@ -53,6 +54,8 @@
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.hadoop.yarn.logaggregation.LogDumper;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* Interprets the map reduce cli options
|
||||
*/
|
||||
@ -426,25 +429,25 @@ private void displayUsage(String cmd) {
|
||||
" <job-id> <task-attempt-id>]. " +
|
||||
" <task-attempt-id> is optional to get task attempt logs.");
|
||||
} else {
|
||||
System.err.printf(prefix + "<command> <args>\n");
|
||||
System.err.printf("\t[-submit <job-file>]\n");
|
||||
System.err.printf("\t[-status <job-id>]\n");
|
||||
System.err.printf("\t[-counter <job-id> <group-name> <counter-name>]\n");
|
||||
System.err.printf("\t[-kill <job-id>]\n");
|
||||
System.err.printf(prefix + "<command> <args>%n");
|
||||
System.err.printf("\t[-submit <job-file>]%n");
|
||||
System.err.printf("\t[-status <job-id>]%n");
|
||||
System.err.printf("\t[-counter <job-id> <group-name> <counter-name>]%n");
|
||||
System.err.printf("\t[-kill <job-id>]%n");
|
||||
System.err.printf("\t[-set-priority <job-id> <priority>]. " +
|
||||
"Valid values for priorities are: " + jobPriorityValues + "\n");
|
||||
System.err.printf("\t[-events <job-id> <from-event-#> <#-of-events>]\n");
|
||||
System.err.printf("\t[-history <jobHistoryFile>]\n");
|
||||
System.err.printf("\t[-list [all]]\n");
|
||||
System.err.printf("\t[-list-active-trackers]\n");
|
||||
System.err.printf("\t[-list-blacklisted-trackers]\n");
|
||||
"Valid values for priorities are: " + jobPriorityValues + "%n");
|
||||
System.err.printf("\t[-events <job-id> <from-event-#> <#-of-events>]%n");
|
||||
System.err.printf("\t[-history <jobHistoryFile>]%n");
|
||||
System.err.printf("\t[-list [all]]%n");
|
||||
System.err.printf("\t[-list-active-trackers]%n");
|
||||
System.err.printf("\t[-list-blacklisted-trackers]%n");
|
||||
System.err.println("\t[-list-attempt-ids <job-id> <task-type> " +
|
||||
"<task-state>]. " +
|
||||
"Valid values for <task-type> are " + taskTypes + ". " +
|
||||
"Valid values for <task-state> are " + taskStates);
|
||||
System.err.printf("\t[-kill-task <task-attempt-id>]\n");
|
||||
System.err.printf("\t[-fail-task <task-attempt-id>]\n");
|
||||
System.err.printf("\t[-logs <job-id> <task-attempt-id>]\n\n");
|
||||
System.err.printf("\t[-kill-task <task-attempt-id>]%n");
|
||||
System.err.printf("\t[-fail-task <task-attempt-id>]%n");
|
||||
System.err.printf("\t[-logs <job-id> <task-attempt-id>]%n%n");
|
||||
ToolRunner.printGenericCommandUsage(System.out);
|
||||
}
|
||||
}
|
||||
@ -584,7 +587,8 @@ protected void displayTasks(Job job, String type, String state)
|
||||
|
||||
public void displayJobList(JobStatus[] jobs)
|
||||
throws IOException, InterruptedException {
|
||||
displayJobList(jobs, new PrintWriter(System.out));
|
||||
displayJobList(jobs, new PrintWriter(new OutputStreamWriter(System.out,
|
||||
Charsets.UTF_8)));
|
||||
}
|
||||
|
||||
@Private
|
||||
|
@ -19,9 +19,10 @@
|
||||
package org.apache.hadoop.mapreduce.util;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
@ -30,6 +31,8 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* Plugin to calculate resource information on Linux systems.
|
||||
*/
|
||||
@ -152,9 +155,10 @@ private void readProcMemInfoFile(boolean readAgain) {
|
||||
|
||||
// Read "/proc/memInfo" file
|
||||
BufferedReader in = null;
|
||||
FileReader fReader = null;
|
||||
InputStreamReader fReader = null;
|
||||
try {
|
||||
fReader = new FileReader(procfsMemFile);
|
||||
fReader = new InputStreamReader(new FileInputStream(procfsMemFile),
|
||||
Charsets.UTF_8);
|
||||
in = new BufferedReader(fReader);
|
||||
} catch (FileNotFoundException f) {
|
||||
// shouldn't happen....
|
||||
@ -211,9 +215,10 @@ private void readProcCpuInfoFile() {
|
||||
}
|
||||
// Read "/proc/cpuinfo" file
|
||||
BufferedReader in = null;
|
||||
FileReader fReader = null;
|
||||
InputStreamReader fReader = null;
|
||||
try {
|
||||
fReader = new FileReader(procfsCpuFile);
|
||||
fReader = new InputStreamReader(new FileInputStream(procfsCpuFile),
|
||||
Charsets.UTF_8);
|
||||
in = new BufferedReader(fReader);
|
||||
} catch (FileNotFoundException f) {
|
||||
// shouldn't happen....
|
||||
@ -258,9 +263,10 @@ private void readProcCpuInfoFile() {
|
||||
private void readProcStatFile() {
|
||||
// Read "/proc/stat" file
|
||||
BufferedReader in = null;
|
||||
FileReader fReader = null;
|
||||
InputStreamReader fReader = null;
|
||||
try {
|
||||
fReader = new FileReader(procfsStatFile);
|
||||
fReader = new InputStreamReader(new FileInputStream(procfsStatFile),
|
||||
Charsets.UTF_8);
|
||||
in = new BufferedReader(fReader);
|
||||
} catch (FileNotFoundException f) {
|
||||
// shouldn't happen....
|
||||
|
@ -26,7 +26,6 @@
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Shell.ExitCodeException;
|
||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||
|
||||
|
@ -20,9 +20,10 @@
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.math.BigInteger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@ -39,6 +40,8 @@
|
||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* A Proc file-system based ProcessTree. Works only on Linux.
|
||||
*/
|
||||
@ -350,7 +353,7 @@ public void destroy(boolean inBackground) {
|
||||
}
|
||||
|
||||
private static final String PROCESSTREE_DUMP_FORMAT =
|
||||
"\t|- %s %s %d %d %s %d %d %d %d %s\n";
|
||||
"\t|- %s %s %d %d %s %d %d %d %d %s%n";
|
||||
|
||||
/**
|
||||
* Get a dump of the process-tree.
|
||||
@ -363,7 +366,7 @@ public String getProcessTreeDump() {
|
||||
// The header.
|
||||
ret.append(String.format("\t|- PID PPID PGRPID SESSID CMD_NAME "
|
||||
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) "
|
||||
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
|
||||
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE%n"));
|
||||
for (ProcessInfo p : processTree.values()) {
|
||||
if (p != null) {
|
||||
ret.append(String.format(PROCESSTREE_DUMP_FORMAT, p.getPid(), p
|
||||
@ -505,10 +508,11 @@ private static ProcessInfo constructProcessInfo(ProcessInfo pinfo,
|
||||
ProcessInfo ret = null;
|
||||
// Read "procfsDir/<pid>/stat" file - typically /proc/<pid>/stat
|
||||
BufferedReader in = null;
|
||||
FileReader fReader = null;
|
||||
InputStreamReader fReader = null;
|
||||
try {
|
||||
File pidDir = new File(procfsDir, pinfo.getPid());
|
||||
fReader = new FileReader(new File(pidDir, PROCFS_STAT_FILE));
|
||||
fReader = new InputStreamReader(new FileInputStream(
|
||||
new File(pidDir, PROCFS_STAT_FILE)), Charsets.UTF_8);
|
||||
in = new BufferedReader(fReader);
|
||||
} catch (FileNotFoundException f) {
|
||||
// The process vanished in the interim!
|
||||
@ -695,11 +699,11 @@ public String getCmdLine(String procfsDir) {
|
||||
return ret;
|
||||
}
|
||||
BufferedReader in = null;
|
||||
FileReader fReader = null;
|
||||
InputStreamReader fReader = null;
|
||||
try {
|
||||
fReader =
|
||||
new FileReader(new File(new File(procfsDir, pid),
|
||||
PROCFS_CMDLINE_FILE));
|
||||
fReader = new InputStreamReader(new FileInputStream(
|
||||
new File(new File(procfsDir, pid), PROCFS_CMDLINE_FILE)),
|
||||
Charsets.UTF_8);
|
||||
} catch (FileNotFoundException f) {
|
||||
// The process vanished in the interim!
|
||||
return ret;
|
||||
|
@ -535,8 +535,9 @@ private void addDirectoryToSerialNumberIndex(Path serialDirPath) {
|
||||
if (serialPart == null) {
|
||||
LOG.warn("Could not find serial portion from path: "
|
||||
+ serialDirPath.toString() + ". Continuing with next");
|
||||
} else {
|
||||
serialNumberIndex.add(serialPart, timestampPart);
|
||||
}
|
||||
serialNumberIndex.add(serialPart, timestampPart);
|
||||
}
|
||||
|
||||
private void addDirectoryToJobListCache(Path path) throws IOException {
|
||||
|
@ -38,7 +38,7 @@
|
||||
*/
|
||||
public class HsJobsBlock extends HtmlBlock {
|
||||
final AppContext appContext;
|
||||
static final SimpleDateFormat dateFormat =
|
||||
final SimpleDateFormat dateFormat =
|
||||
new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z");
|
||||
|
||||
@Inject HsJobsBlock(AppContext appCtx) {
|
||||
|
@ -106,6 +106,7 @@
|
||||
import org.jboss.netty.handler.stream.ChunkedWriteHandler;
|
||||
import org.jboss.netty.util.CharsetUtil;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
public class ShuffleHandler extends AbstractService
|
||||
@ -490,7 +491,8 @@ private void verifyRequest(String appid, ChannelHandlerContext ctx,
|
||||
SecureShuffleUtils.verifyReply(urlHashStr, enc_str, tokenSecret);
|
||||
// verification passed - encode the reply
|
||||
String reply =
|
||||
SecureShuffleUtils.generateHash(urlHashStr.getBytes(), tokenSecret);
|
||||
SecureShuffleUtils.generateHash(urlHashStr.getBytes(Charsets.UTF_8),
|
||||
tokenSecret);
|
||||
response.setHeader(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
int len = reply.length();
|
||||
|
@ -60,4 +60,10 @@
|
||||
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.examples.terasort.TeraInputFormat" />
|
||||
<Method name="getSplits" />
|
||||
<Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" />
|
||||
</Match>
|
||||
|
||||
</FindBugsFilter>
|
||||
|
@ -103,6 +103,11 @@
|
||||
<artifactId>hsqldb</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
@ -22,7 +22,9 @@
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
@ -50,6 +52,8 @@
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* A map/reduce program that uses Bailey-Borwein-Plouffe to compute exact
|
||||
* digits of Pi.
|
||||
@ -151,7 +155,8 @@ protected void cleanup(Context context
|
||||
LOG.info("Writing text output to " + outfile);
|
||||
final OutputStream outputstream = fs.create(outfile);
|
||||
try {
|
||||
final PrintStream out = new PrintStream(outputstream, true);
|
||||
final PrintWriter out = new PrintWriter(
|
||||
new OutputStreamWriter(outputstream, Charsets.UTF_8), true);
|
||||
// write hex text
|
||||
print(out, hex.iterator(), "Pi = 0x3.", "%02X", 5, 5);
|
||||
out.println("Total number of hexadecimal digits is "
|
||||
@ -184,7 +189,7 @@ public void remove() {
|
||||
}
|
||||
|
||||
/** Print out elements in a nice format. */
|
||||
private static <T> void print(PrintStream out, Iterator<T> iterator,
|
||||
private static <T> void print(PrintWriter out, Iterator<T> iterator,
|
||||
String prefix, String format, int elementsPerGroup, int groupsPerLine) {
|
||||
final StringBuilder sb = new StringBuilder("\n");
|
||||
for (int i = 0; i < prefix.length(); i++)
|
||||
|
@ -37,6 +37,8 @@
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
public class WordMean extends Configured implements Tool {
|
||||
|
||||
private double mean = 0;
|
||||
@ -125,7 +127,7 @@ private double readAndCalcMean(Path path, Configuration conf)
|
||||
|
||||
// average = total sum / number of elements;
|
||||
try {
|
||||
br = new BufferedReader(new InputStreamReader(fs.open(file)));
|
||||
br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));
|
||||
|
||||
long count = 0;
|
||||
long length = 0;
|
||||
@ -151,7 +153,9 @@ private double readAndCalcMean(Path path, Configuration conf)
|
||||
System.out.println("The mean is: " + theMean);
|
||||
return theMean;
|
||||
} finally {
|
||||
br.close();
|
||||
if (br != null) {
|
||||
br.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,8 @@
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
public class WordMedian extends Configured implements Tool {
|
||||
|
||||
private double median = 0;
|
||||
@ -127,7 +129,7 @@ private double readAndFindMedian(String path, int medianIndex1,
|
||||
BufferedReader br = null;
|
||||
|
||||
try {
|
||||
br = new BufferedReader(new InputStreamReader(fs.open(file)));
|
||||
br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));
|
||||
int num = 0;
|
||||
|
||||
String line;
|
||||
@ -157,7 +159,9 @@ private double readAndFindMedian(String path, int medianIndex1,
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
br.close();
|
||||
if (br != null) {
|
||||
br.close();
|
||||
}
|
||||
}
|
||||
// error, no median found
|
||||
return -1;
|
||||
|
@ -37,6 +37,8 @@
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
public class WordStandardDeviation extends Configured implements Tool {
|
||||
|
||||
private double stddev = 0;
|
||||
@ -135,7 +137,7 @@ private double readAndCalcStdDev(Path path, Configuration conf)
|
||||
double stddev = 0;
|
||||
BufferedReader br = null;
|
||||
try {
|
||||
br = new BufferedReader(new InputStreamReader(fs.open(file)));
|
||||
br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));
|
||||
long count = 0;
|
||||
long length = 0;
|
||||
long square = 0;
|
||||
@ -166,7 +168,9 @@ private double readAndCalcStdDev(Path path, Configuration conf)
|
||||
stddev = Math.sqrt((term - mean));
|
||||
System.out.println("The standard deviation is: " + stddev);
|
||||
} finally {
|
||||
br.close();
|
||||
if (br != null) {
|
||||
br.close();
|
||||
}
|
||||
}
|
||||
return stddev;
|
||||
}
|
||||
|
@ -33,6 +33,8 @@
|
||||
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
|
||||
import org.apache.hadoop.util.*;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* Launch a distributed pentomino solver.
|
||||
* It generates a complete list of prefixes of length N with each unique prefix
|
||||
@ -137,9 +139,9 @@ private static long createInputDirectory(FileSystem fs,
|
||||
fs.mkdirs(dir);
|
||||
List<int[]> splits = pent.getSplits(depth);
|
||||
Path input = new Path(dir, "part1");
|
||||
PrintStream file =
|
||||
new PrintStream(new BufferedOutputStream
|
||||
(fs.create(input), 64*1024));
|
||||
PrintWriter file =
|
||||
new PrintWriter(new OutputStreamWriter(new BufferedOutputStream
|
||||
(fs.create(input), 64*1024), Charsets.UTF_8));
|
||||
for(int[] prefix: splits) {
|
||||
for(int i=0; i < prefix.length; ++i) {
|
||||
if (i != 0) {
|
||||
|
@ -21,6 +21,8 @@
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* This class uses the dancing links algorithm from Knuth to solve sudoku
|
||||
* puzzles. It has solved 42x42 puzzles in 1.02 seconds.
|
||||
@ -133,7 +135,8 @@ public void solution(List<List<ColumnName>> names) {
|
||||
* @param stream The input stream to read the data from
|
||||
*/
|
||||
public Sudoku(InputStream stream) throws IOException {
|
||||
BufferedReader file = new BufferedReader(new InputStreamReader(stream));
|
||||
BufferedReader file = new BufferedReader(
|
||||
new InputStreamReader(stream, Charsets.UTF_8));
|
||||
String line = file.readLine();
|
||||
List<int[]> result = new ArrayList<int[]>();
|
||||
while (line != null) {
|
||||
|
@ -19,9 +19,11 @@
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.FileWriter;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
@ -32,6 +34,8 @@
|
||||
import org.apache.hadoop.examples.pi.math.Bellard;
|
||||
import org.apache.hadoop.examples.pi.math.Bellard.Parameter;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/** A class for parsing outputs */
|
||||
public final class Parser {
|
||||
static final String VERBOSE_PROPERTY = "pi.parser.verbose";
|
||||
@ -71,7 +75,8 @@ private void parse(File f, Map<Parameter, List<TaskResult>> sums) throws IOExcep
|
||||
for(Parameter p : Parameter.values())
|
||||
m.put(p, new ArrayList<TaskResult>());
|
||||
|
||||
final BufferedReader in = new BufferedReader(new FileReader(f));
|
||||
final BufferedReader in = new BufferedReader(
|
||||
new InputStreamReader(new FileInputStream(f), Charsets.UTF_8));
|
||||
try {
|
||||
for(String line; (line = in.readLine()) != null; )
|
||||
try {
|
||||
@ -127,7 +132,8 @@ Map<Parameter, List<TaskResult>> parse(String inputpath, String outputdir
|
||||
Collections.sort(results);
|
||||
|
||||
final PrintWriter out = new PrintWriter(
|
||||
new FileWriter(new File(outputdir, p + ".txt")), true);
|
||||
new OutputStreamWriter(new FileOutputStream(
|
||||
new File(outputdir, p + ".txt")), Charsets.UTF_8), true);
|
||||
try {
|
||||
for(int i = 0; i < results.size(); i++)
|
||||
out.println(DistSum.taskResult2string(p + "." + i, results.get(i)));
|
||||
|
@ -19,9 +19,10 @@
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.text.SimpleDateFormat;
|
||||
@ -46,6 +47,8 @@
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/** Utility methods */
|
||||
public class Util {
|
||||
/** Output stream */
|
||||
@ -81,7 +84,7 @@ public synchronized long tick(String s) {
|
||||
final long t = System.currentTimeMillis();
|
||||
final long delta = t - (isAccumulative? start: previous);
|
||||
if (s != null) {
|
||||
out.format("%15dms (=%-15s: %s\n", delta, millis2String(delta) + ")", s);
|
||||
out.format("%15dms (=%-15s: %s%n", delta, millis2String(delta) + ")", s);
|
||||
out.flush();
|
||||
}
|
||||
previous = t;
|
||||
@ -203,16 +206,16 @@ public static void checkDirectory(File dir) {
|
||||
throw new IllegalArgumentException("dir (=" + dir + ") is not a directory.");
|
||||
}
|
||||
|
||||
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("-yyyyMMdd-HHmmssSSS");
|
||||
/** Create a writer of a local file. */
|
||||
public static PrintWriter createWriter(File dir, String prefix) throws IOException {
|
||||
checkDirectory(dir);
|
||||
|
||||
|
||||
SimpleDateFormat dateFormat = new SimpleDateFormat("-yyyyMMdd-HHmmssSSS");
|
||||
for(;;) {
|
||||
final File f = new File(dir,
|
||||
prefix + DATE_FORMAT.format(new Date(System.currentTimeMillis())) + ".txt");
|
||||
prefix + dateFormat.format(new Date(System.currentTimeMillis())) + ".txt");
|
||||
if (!f.exists())
|
||||
return new PrintWriter(new FileWriter(f));
|
||||
return new PrintWriter(new OutputStreamWriter(new FileOutputStream(f), Charsets.UTF_8));
|
||||
|
||||
try {Thread.sleep(10);} catch (InterruptedException e) {}
|
||||
}
|
||||
@ -286,7 +289,8 @@ static List<TaskResult> readJobOutputs(FileSystem fs, Path outdir) throws IOExce
|
||||
final List<TaskResult> results = new ArrayList<TaskResult>();
|
||||
for(FileStatus status : fs.listStatus(outdir)) {
|
||||
if (status.getPath().getName().startsWith("part-")) {
|
||||
final BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(status.getPath())));
|
||||
final BufferedReader in = new BufferedReader(
|
||||
new InputStreamReader(fs.open(status.getPath()), Charsets.UTF_8));
|
||||
try {
|
||||
for(String line; (line = in.readLine()) != null; )
|
||||
results.add(TaskResult.valueOf(line));
|
||||
@ -305,7 +309,7 @@ static List<TaskResult> readJobOutputs(FileSystem fs, Path outdir) throws IOExce
|
||||
static void writeResults(String name, List<TaskResult> results, FileSystem fs, String dir) throws IOException {
|
||||
final Path outfile = new Path(dir, name + ".txt");
|
||||
Util.out.println(name + "> writing results to " + outfile);
|
||||
final PrintStream out = new PrintStream(fs.create(outfile), true);
|
||||
final PrintWriter out = new PrintWriter(new OutputStreamWriter(fs.create(outfile), Charsets.UTF_8), true);
|
||||
try {
|
||||
for(TaskResult r : results)
|
||||
out.println(r);
|
||||
|
@ -29,6 +29,8 @@
|
||||
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
|
||||
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
class TeraScheduler {
|
||||
static String USE = "mapreduce.terasort.use.terascheduler";
|
||||
private static final Log LOG = LogFactory.getLog(TeraScheduler.class);
|
||||
@ -73,7 +75,8 @@ public String toString() {
|
||||
|
||||
List<String> readFile(String filename) throws IOException {
|
||||
List<String> result = new ArrayList<String>(10000);
|
||||
BufferedReader in = new BufferedReader(new FileReader(filename));
|
||||
BufferedReader in = new BufferedReader(
|
||||
new InputStreamReader(new FileInputStream(filename), Charsets.UTF_8));
|
||||
String line = in.readLine();
|
||||
while (line != null) {
|
||||
result.add(line);
|
||||
|
Loading…
Reference in New Issue
Block a user