diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
index c745f45836..5e855de7ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
@@ -36,7 +36,9 @@
* and uses its lifecycle to start and stop the server.
*/
@InterfaceAudience.Private
-public abstract class ServerWebApp extends Server implements ServletContextListener {
+public abstract class ServerWebApp
+ extends Server
+ implements ServletContextListener {
private static final String HOME_DIR = ".home.dir";
private static final String CONFIG_DIR = ".config.dir";
@@ -61,8 +63,8 @@ public static void setHomeDirForCurrentThread(String homeDir) {
/**
* Constructor for testing purposes.
*/
- protected ServerWebApp(String name, String homeDir, String configDir, String logDir, String tempDir,
- Configuration config) {
+ protected ServerWebApp(String name, String homeDir, String configDir,
+ String logDir, String tempDir, Configuration config) {
super(name, homeDir, configDir, logDir, tempDir, config);
}
@@ -120,7 +122,8 @@ static String getHomeDir(String name) {
String sysProp = name + HOME_DIR;
homeDir = System.getProperty(sysProp);
if (homeDir == null) {
- throw new IllegalArgumentException(MessageFormat.format("System property [{0}] not defined", sysProp));
+ throw new IllegalArgumentException(MessageFormat.format(
+ "System property [{0}] not defined", sysProp));
}
}
return homeDir;
@@ -160,7 +163,8 @@ public void contextInitialized(ServletContextEvent event) {
}
/**
- * Resolves the host and port InetSocketAddress the web server is listening to.
+ * Resolves the host and port InetSocketAddress the
+ * web server is listening to.
*
* This implementation looks for the following 2 properties:
*
@@ -168,8 +172,10 @@ public void contextInitialized(ServletContextEvent event) {
* - #SERVER_NAME#.http.port
*
*
- * @return the host and port InetSocketAddress the web server is listening to.
- * @throws ServerException thrown if any of the above 2 properties is not defined.
+ * @return the host and port InetSocketAddress the
+ * web server is listening to.
+ * @throws ServerException thrown
+ * if any of the above 2 properties is not defined.
*/
protected InetSocketAddress resolveAuthority() throws ServerException {
String hostnameKey = getName() + HTTP_HOSTNAME;
@@ -233,6 +239,7 @@ public void setAuthority(InetSocketAddress authority) {
*
*/
public boolean isSslEnabled() {
- return Boolean.valueOf(System.getProperty(getName() + SSL_ENABLED, "false"));
+ return Boolean.parseBoolean(
+ System.getProperty(getName() + SSL_ENABLED, "false"));
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 80be32399e..c79e0b5632 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1954,6 +1954,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9726. Refactor IBR code to a new class. (szetszwo)
+ HDFS-9686. Remove useless boxing/unboxing code.
+ (Kousuke Saruta via aajisaka)
+
BUG FIXES
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 93d1b33d67..a8389f00fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -414,7 +414,9 @@ private static List readXAttrsFromEditLog(DataInputStream in,
}
@SuppressWarnings("unchecked")
- static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp {
+ static abstract class AddCloseOp
+ extends FSEditLogOp
+ implements BlockListUpdatingOp {
int length;
long inodeId;
String path;
@@ -635,7 +637,8 @@ void readFields(DataInputStream in, int logVersion)
NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) {
this.storagePolicyId = FSImageSerialization.readByte(in);
} else {
- this.storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+ this.storagePolicyId =
+ HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
// read clientId and callId
readRpcIds(in, logVersion);
@@ -715,7 +718,7 @@ protected void toXml(ContentHandler contentHandler) throws SAXException {
Long.toString(inodeId));
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "REPLICATION",
- Short.valueOf(replication).toString());
+ Short.toString(replication));
XMLUtils.addSaxString(contentHandler, "MTIME",
Long.toString(mtime));
XMLUtils.addSaxString(contentHandler, "ATIME",
@@ -743,7 +746,7 @@ void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.inodeId = Long.parseLong(st.getValue("INODEID"));
this.path = st.getValue("PATH");
- this.replication = Short.valueOf(st.getValue("REPLICATION"));
+ this.replication = Short.parseShort(st.getValue("REPLICATION"));
this.mtime = Long.parseLong(st.getValue("MTIME"));
this.atime = Long.parseLong(st.getValue("ATIME"));
this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE"));
@@ -1184,12 +1187,12 @@ public String toString() {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "REPLICATION",
- Short.valueOf(replication).toString());
+ Short.toString(replication));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.path = st.getValue("PATH");
- this.replication = Short.valueOf(st.getValue("REPLICATION"));
+ this.replication = Short.parseShort(st.getValue("REPLICATION"));
}
}
@@ -1977,13 +1980,13 @@ public String toString() {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "MODE",
- Short.valueOf(permissions.toShort()).toString());
+ Short.toString(permissions.toShort()));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.permissions = new FsPermission(
- Short.valueOf(st.getValue("MODE")));
+ Short.parseShort(st.getValue("MODE")));
}
}
@@ -4467,13 +4470,13 @@ public String toString() {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "POLICYID",
- Byte.valueOf(policyId).toString());
+ Byte.toString(policyId));
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.path = st.getValue("PATH");
- this.policyId = Byte.valueOf(st.getValue("POLICYID"));
+ this.policyId = Byte.parseByte(st.getValue("POLICYID"));
}
}
@@ -4950,7 +4953,8 @@ public static Block blockFromXml(Stanza st)
public static void delegationTokenToXml(ContentHandler contentHandler,
DelegationTokenIdentifier token) throws SAXException {
- contentHandler.startElement("", "", "DELEGATION_TOKEN_IDENTIFIER", new AttributesImpl());
+ contentHandler.startElement(
+ "", "", "DELEGATION_TOKEN_IDENTIFIER", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "KIND", token.getKind().toString());
XMLUtils.addSaxString(contentHandler, "SEQUENCE_NUMBER",
Integer.toString(token.getSequenceNumber()));
@@ -4996,7 +5000,8 @@ public static DelegationTokenIdentifier delegationTokenFromXml(Stanza st)
public static void delegationKeyToXml(ContentHandler contentHandler,
DelegationKey key) throws SAXException {
- contentHandler.startElement("", "", "DELEGATION_KEY", new AttributesImpl());
+ contentHandler.startElement(
+ "", "", "DELEGATION_KEY", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "KEY_ID",
Integer.toString(key.getKeyId()));
XMLUtils.addSaxString(contentHandler, "EXPIRY_DATE",
@@ -5024,7 +5029,8 @@ public static DelegationKey delegationKeyFromXml(Stanza st)
public static void permissionStatusToXml(ContentHandler contentHandler,
PermissionStatus perm) throws SAXException {
- contentHandler.startElement("", "", "PERMISSION_STATUS", new AttributesImpl());
+ contentHandler.startElement(
+ "", "", "PERMISSION_STATUS", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "USERNAME", perm.getUserName());
XMLUtils.addSaxString(contentHandler, "GROUPNAME", perm.getGroupName());
fsPermissionToXml(contentHandler, perm.getPermission());
@@ -5042,13 +5048,13 @@ public static PermissionStatus permissionStatusFromXml(Stanza st)
public static void fsPermissionToXml(ContentHandler contentHandler,
FsPermission mode) throws SAXException {
- XMLUtils.addSaxString(contentHandler, "MODE", Short.valueOf(mode.toShort())
- .toString());
+ XMLUtils.addSaxString(contentHandler, "MODE",
+ Short.toString(mode.toShort()));
}
public static FsPermission fsPermissionFromXml(Stanza st)
throws InvalidXmlException {
- short mode = Short.valueOf(st.getValue("MODE"));
+ short mode = Short.parseShort(st.getValue("MODE"));
return new FsPermission(mode);
}
@@ -5057,7 +5063,8 @@ private static void fsActionToXml(ContentHandler contentHandler, FsAction v)
XMLUtils.addSaxString(contentHandler, "PERM", v.SYMBOL);
}
- private static FsAction fsActionFromXml(Stanza st) throws InvalidXmlException {
+ private static FsAction fsActionFromXml(Stanza st)
+ throws InvalidXmlException {
FsAction v = FSACTION_SYMBOL_MAP.get(st.getValue("PERM"));
if (v == null)
throw new InvalidXmlException("Invalid value for FsAction");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index 63438ff415..2834ebb297 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -173,7 +173,8 @@ public RollingWindowManager(Configuration conf, int reportingPeriodMs) {
* @param user the user that updated the metric
* @param delta the amount of change in the metric, e.g., +1
*/
- public void recordMetric(long time, String command, String user, long delta) {
+ public void recordMetric(long time, String command,
+ String user, long delta) {
RollingWindow window = getRollingWindow(command, user);
window.incAt(time, delta);
}
@@ -208,7 +209,7 @@ public TopWindow snapshot(long time) {
}
for (int i = 0; i < size; i++) {
NameValuePair userEntry = reverse.pop();
- User user = new User(userEntry.name, Long.valueOf(userEntry.value));
+ User user = new User(userEntry.name, userEntry.value);
op.addUser(user);
}
}
@@ -243,7 +244,8 @@ private TopN getTopUsersForMetric(long time, String metricName,
metricName, userName, windowSum);
topN.offer(new NameValuePair(userName, windowSum));
}
- LOG.debug("topN users size for command {} is: {}", metricName, topN.size());
+ LOG.debug("topN users size for command {} is: {}",
+ metricName, topN.size());
return topN;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
index da4b321793..13dc997432 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
@@ -69,7 +69,8 @@
* For example, if the user wants to test reading 1024MB data with 10 clients,
* he/she should firstly generate 1024MB data with 10 (or more) clients.
*/
-public class ErasureCodeBenchmarkThroughput extends Configured implements Tool {
+public class ErasureCodeBenchmarkThroughput
+ extends Configured implements Tool {
private static final int BUFFER_SIZE_MB = 128;
private static final String DFS_TMP_DIR = System.getProperty(
@@ -114,13 +115,15 @@ private static void printUsage(String msg) {
System.out.println(msg);
}
System.err.println("Usage: ErasureCodeBenchmarkThroughput " +
- " [num clients] [stf|pos]\n"
- + "Stateful and positional option is only available for read.");
+ " " +
+ " [num clients] [stf|pos]\n" +
+ "Stateful and positional option is only available for read.");
System.exit(1);
}
- private List doBenchmark(boolean isRead, int dataSizeMB, int numClients,
- boolean isEc, boolean statefulRead, boolean isGen) throws Exception {
+ private List doBenchmark(boolean isRead, int dataSizeMB,
+ int numClients, boolean isEc, boolean statefulRead, boolean isGen)
+ throws Exception {
CompletionService cs = new ExecutorCompletionService(
Executors.newFixedThreadPool(numClients));
for (int i = 0; i < numClients; i++) {
@@ -217,7 +220,7 @@ public int run(String[] args) throws Exception {
printUsage("Unknown operation: " + args[0]);
}
try {
- dataSizeMB = Integer.valueOf(args[1]);
+ dataSizeMB = Integer.parseInt(args[1]);
if (dataSizeMB <= 0) {
printUsage("Invalid data size: " + dataSizeMB);
}
@@ -233,7 +236,7 @@ public int run(String[] args) throws Exception {
}
if (args.length >= 4 && type != OpType.CLEAN) {
try {
- numClients = Integer.valueOf(args[3]);
+ numClients = Integer.parseInt(args[3]);
if (numClients <= 0) {
printUsage("Invalid num of clients: " + numClients);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index e6ee7f3cb5..a069003909 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -72,9 +72,9 @@
/**
* Test for short circuit read functionality using {@link BlockReaderLocal}.
* When a block is being read by a client is on the local datanode, instead of
- * using {@link DataTransferProtocol} and connect to datanode, the short circuit
- * read allows reading the file directly from the files on the local file
- * system.
+ * using {@link DataTransferProtocol} and connect to datanode,
+ * the short circuit read allows reading the file directly
+ * from the files on the local file system.
*/
public class TestShortCircuitLocalRead {
private static TemporarySocketDirectory sockDir;
@@ -195,7 +195,8 @@ static void checkFileContentDirect(URI uri, Path name, byte[] expected,
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
- ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
+ ByteBuffer actual =
+ ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
@@ -230,7 +231,8 @@ static void checkFileContentDirect(URI uri, Path name, byte[] expected,
public void doTestShortCircuitReadLegacy(boolean ignoreChecksum, int size,
int readOffset, String shortCircuitUser, String readingUser,
- boolean legacyShortCircuitFails) throws IOException, InterruptedException {
+ boolean legacyShortCircuitFails)
+ throws IOException, InterruptedException {
doTestShortCircuitReadImpl(ignoreChecksum, size, readOffset,
shortCircuitUser, readingUser, legacyShortCircuitFails);
}
@@ -247,7 +249,8 @@ public void doTestShortCircuitRead(boolean ignoreChecksum, int size,
*/
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
int readOffset, String shortCircuitUser, String readingUser,
- boolean legacyShortCircuitFails) throws IOException, InterruptedException {
+ boolean legacyShortCircuitFails)
+ throws IOException, InterruptedException {
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
@@ -262,7 +265,8 @@ public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
if (shortCircuitUser != null) {
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
shortCircuitUser);
- conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
+ conf.setBoolean(
+ HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
}
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
@@ -324,7 +328,8 @@ public void testLocalReadLegacy() throws Exception {
*/
@Test(timeout=60000)
public void testLocalReadFallback() throws Exception {
- doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(), "notallowed", true);
+ doTestShortCircuitReadLegacy(
+ true, 13, 0, getCurrentUser(), "notallowed", true);
}
@Test(timeout=60000)
@@ -366,8 +371,9 @@ public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
Token token = lb.get(0).getBlockToken();
final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
- ClientDatanodeProtocol proxy =
- DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
+ ClientDatanodeProtocol proxy =
+ DFSUtilClient.createClientDatanodeProtocolProxy(
+ dnInfo, conf, 60000, false);
try {
proxy.getBlockLocalPathInfo(blk, token);
Assert.fail("The call should have failed as this user "
@@ -387,7 +393,8 @@ public void testSkipWithVerifyChecksum() throws IOException {
int size = blockSize;
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
- conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
+ conf.setBoolean(
+ HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath());
@@ -434,7 +441,8 @@ public void testHandleTruncatedBlockFile() throws IOException {
MiniDFSCluster cluster = null;
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
- conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
+ conf.setBoolean(
+ HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"testHandleTruncatedBlockFile._PORT.sock").getAbsolutePath());
@@ -523,8 +531,8 @@ public static void main(String[] args) throws Exception {
System.out.println("Usage: test shortcircuit checksum threadCount");
System.exit(1);
}
- boolean shortcircuit = Boolean.valueOf(args[0]);
- boolean checksum = Boolean.valueOf(args[1]);
+ boolean shortcircuit = Boolean.parseBoolean(args[0]);
+ boolean checksum = Boolean.parseBoolean(args[1]);
int threadCount = Integer.parseInt(args[2]);
// Setup create a file
@@ -535,7 +543,8 @@ public static void main(String[] args) throws Exception {
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
checksum);
- // Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
+ // Override fileSize and DATA_TO_WRITE to
+ // much larger values for benchmark test
int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);
@@ -557,7 +566,8 @@ public void run() {
for (int i = 0; i < iteration; i++) {
try {
String user = getCurrentUser();
- checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, true);
+ checkFileContent(
+ fs.getUri(), file1, dataToWrite, 0, user, conf, true);
} catch (IOException e) {
e.printStackTrace();
} catch (InterruptedException e) {
@@ -590,11 +600,13 @@ public void testReadWithRemoteBlockReader()
* through RemoteBlockReader
* @throws IOException
*/
- public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum,
- int size, String shortCircuitUser, int readOffset,
- boolean shortCircuitFails) throws IOException, InterruptedException {
+ public void doTestShortCircuitReadWithRemoteBlockReader(
+ boolean ignoreChecksum, int size, String shortCircuitUser,
+ int readOffset, boolean shortCircuitFails)
+ throws IOException, InterruptedException {
Configuration conf = new Configuration();
- conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
+ conf.setBoolean(
+ HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
@@ -603,7 +615,8 @@ public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum,
// check that / exists
Path path = new Path("/");
URI uri = cluster.getURI();
- assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
+ assertTrue(
+ "/ should be a directory", fs.getFileStatus(path).isDirectory());
byte[] fileData = AppendTestUtil.randomBytes(seed, size);
Path file1 = new Path("filelocal.dat");
@@ -615,10 +628,12 @@ public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum,
checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser,
conf, shortCircuitFails);
//RemoteBlockReader have unsupported method read(ByteBuffer bf)
- assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
- checkUnsupportedMethod(fs, file1, fileData, readOffset));
+ assertTrue(
+ "RemoteBlockReader unsupported method read(ByteBuffer bf) error",
+ checkUnsupportedMethod(fs, file1, fileData, readOffset));
} catch(IOException e) {
- throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
+ throw new IOException(
+ "doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
} catch(InterruptedException inEx) {
throw inEx;
} finally {
@@ -630,7 +645,8 @@ public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum,
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
- ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
+ ByteBuffer actual =
+ ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
try {
stm.read(actual);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
index fc64996a8e..e22a12e454 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
@@ -245,6 +245,63 @@ public void testDeletionofStagingOnKillLastTry() throws IOException {
verify(fs).delete(stagingJobPath, true);
}
+ @Test
+ public void testByPreserveFailedStaging() throws IOException {
+ conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
+ // Failed task's staging files should be kept
+ conf.setBoolean(MRJobConfig.PRESERVE_FAILED_TASK_FILES, true);
+ fs = mock(FileSystem.class);
+ when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
+ //Staging Dir exists
+ String user = UserGroupInformation.getCurrentUser().getShortUserName();
+ Path stagingDir = MRApps.getStagingAreaDir(conf, user);
+ when(fs.exists(stagingDir)).thenReturn(true);
+ ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
+ 0);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+ JobId jobid = recordFactory.newRecordInstance(JobId.class);
+ jobid.setAppId(appId);
+ ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
+ Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
+ MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
+ JobStateInternal.FAILED, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
+ appMaster.init(conf);
+ appMaster.start();
+ appMaster.shutDownJob();
+ //test whether notifyIsLastAMRetry called
+ Assert.assertEquals(true, ((TestMRApp)appMaster).getTestIsLastAMRetry());
+ verify(fs, times(0)).delete(stagingJobPath, true);
+ }
+
+ @Test
+ public void testPreservePatternMatchedStaging() throws IOException {
+ conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
+ // The staging files that are matched to the pattern
+ // should not be deleted
+ conf.set(MRJobConfig.PRESERVE_FILES_PATTERN, "JobDir");
+ fs = mock(FileSystem.class);
+ when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
+ //Staging Dir exists
+ String user = UserGroupInformation.getCurrentUser().getShortUserName();
+ Path stagingDir = MRApps.getStagingAreaDir(conf, user);
+ when(fs.exists(stagingDir)).thenReturn(true);
+ ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
+ 0);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+ JobId jobid = recordFactory.newRecordInstance(JobId.class);
+ jobid.setAppId(appId);
+ ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
+ Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
+ MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
+ JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
+ appMaster.init(conf);
+ appMaster.start();
+ appMaster.shutDownJob();
+ //test whether notifyIsLastAMRetry called
+ Assert.assertEquals(true, ((TestMRApp)appMaster).getTestIsLastAMRetry());
+ verify(fs, times(0)).delete(stagingJobPath, true);
+ }
+
private class TestMRApp extends MRAppMaster {
ContainerAllocator allocator;
boolean testIsLastAMRetry = false;