diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index c056d2112d..802197e33c 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -409,6 +409,13 @@
+
+
+
+
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index 63ec9a5d29..b29278bd20 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -375,7 +375,7 @@ public void onFailure(Throwable t) {
backgroundRefreshException.incrementAndGet();
backgroundRefreshRunning.decrementAndGet();
}
- });
+ }, MoreExecutors.directExecutor());
return listenableFuture;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
index a774677c5b..58a8ed278b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
@@ -196,7 +196,7 @@ private Map getNamenodesSubcluster(
try {
String nsId = nn.getNameserviceId();
String rpcAddress = nn.getRpcAddress();
- String hostname = HostAndPort.fromString(rpcAddress).getHostText();
+ String hostname = HostAndPort.fromString(rpcAddress).getHost();
ret.put(hostname, nsId);
if (hostname.equals(localHostname)) {
ret.put(localIp, nsId);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index f9d834ee43..8fd1d988b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -181,8 +181,8 @@
-
-
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index d462ef684a..324747618a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -447,7 +447,7 @@ public void onFailure(Throwable t) {
public void onSuccess(Void t) {
unreserveQueueSpace(data.length);
}
- });
+ }, MoreExecutors.directExecutor());
}
}
return ret;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
index dee74e6fcf..ef32eb11c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
@@ -22,6 +22,7 @@
import java.util.concurrent.TimeoutException;
import java.util.concurrent.TimeUnit;
+import com.google.common.util.concurrent.MoreExecutors;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.Timer;
@@ -80,7 +81,7 @@ public void onFailure(Throwable t) {
public void onSuccess(RESULT res) {
qr.addResult(e.getKey(), res);
}
- });
+ }, MoreExecutors.directExecutor());
}
return qr;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 9e204cb6e3..f0bdab1c00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -1084,11 +1084,12 @@ private void persistPaxosData(long segmentTxId,
fos.write('\n');
// Write human-readable data after the protobuf. This is only
// to assist in debugging -- it's not parsed at all.
- OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);
-
- writer.write(String.valueOf(newData));
- writer.write('\n');
- writer.flush();
+ try(OutputStreamWriter writer =
+ new OutputStreamWriter(fos, Charsets.UTF_8)) {
+ writer.write(String.valueOf(newData));
+ writer.write('\n');
+ writer.flush();
+ }
fos.flush();
success = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 5c590f6081..7c13ed0ea5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -24,6 +24,7 @@
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -224,12 +225,12 @@ public Set checkAllVolumes(
Futures.addCallback(olf.get(),
new ResultHandler(reference, healthyVolumes, failedVolumes,
numVolumes, new Callback() {
- @Override
- public void call(Set ignored1,
- Set ignored2) {
- latch.countDown();
- }
- }));
+ @Override
+ public void call(Set ignored1,
+ Set ignored2) {
+ latch.countDown();
+ }
+ }), MoreExecutors.directExecutor());
} else {
IOUtils.cleanup(null, reference);
if (numVolumes.decrementAndGet() == 0) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index bb1ed46969..8844453933 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -182,7 +182,7 @@ public void onFailure(@Nonnull Throwable t) {
t, timer.monotonicNow()));
}
}
- });
+ }, MoreExecutors.directExecutor());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
index 91a793b7d2..926747d457 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
@@ -20,6 +20,7 @@
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import org.apache.hadoop.util.FakeTimer;
import org.junit.Before;
import org.junit.Rule;
@@ -101,7 +102,7 @@ public void onFailure(Throwable t) {
numCallbackInvocationsFailure.incrementAndGet();
callbackResult.set(true);
}
- });
+ }, MoreExecutors.directExecutor());
while (!callbackResult.get()) {
// Wait for the callback
@@ -133,7 +134,8 @@ public void testDiskCheckTimeoutInvokesOneCallbackOnly() throws Exception {
.schedule(target, true);
assertTrue(olf1.isPresent());
- Futures.addCallback(olf1.get(), futureCallback);
+ Futures.addCallback(olf1.get(), futureCallback,
+ MoreExecutors.directExecutor());
// Verify that timeout results in only 1 onFailure call and 0 onSuccess
// calls.
@@ -149,7 +151,8 @@ public void testDiskCheckTimeoutInvokesOneCallbackOnly() throws Exception {
.schedule(target, true);
assertTrue(olf2.isPresent());
- Futures.addCallback(olf2.get(), futureCallback);
+ Futures.addCallback(olf2.get(), futureCallback,
+ MoreExecutors.directExecutor());
// Verify that normal check (dummy) results in only 1 onSuccess call.
// Number of times onFailure is invoked should remain the same i.e. 1.
@@ -187,7 +190,7 @@ public void onFailure(Throwable t) {
throwable[0] = t;
callbackResult.set(true);
}
- });
+ }, MoreExecutors.directExecutor());
while (!callbackResult.get()) {
// Wait for the callback
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
index 1b1025e9da..3869c493a0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
@@ -120,7 +120,8 @@ public Iterable getFileStatuses() throws InterruptedException,
runningTasks.incrementAndGet();
ListenableFuture future = exec
.submit(new ProcessInitialInputPathCallable(p, conf, inputFilter));
- Futures.addCallback(future, processInitialInputPathCallback);
+ Futures.addCallback(future, processInitialInputPathCallback,
+ MoreExecutors.directExecutor());
}
runningTasks.decrementAndGet();
@@ -267,7 +268,8 @@ public void onSuccess(ProcessInputDirCallable.Result result) {
ListenableFuture future = exec
.submit(new ProcessInputDirCallable(result.fs, fileStatus,
recursive, inputFilter));
- Futures.addCallback(future, processInputDirCallback);
+ Futures.addCallback(future, processInputDirCallback,
+ MoreExecutors.directExecutor());
}
}
decrementRunningAndCheckCompletion();
@@ -353,7 +355,8 @@ public void onSuccess(ProcessInitialInputPathCallable.Result result) {
ListenableFuture future = exec
.submit(new ProcessInputDirCallable(result.fs, matched,
recursive, inputFilter));
- Futures.addCallback(future, processInputDirCallback);
+ Futures.addCallback(future, processInputDirCallback,
+ MoreExecutors.directExecutor());
}
}
decrementRunningAndCheckCompletion();
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 7bd012df7b..aac03156d6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -92,7 +92,7 @@
3.1.0-RC1
2.1.7
- 11.0.2
+ 27.0-jre
4.0
2.9.9
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java
index c6e8525259..60eb9b4019 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java
@@ -87,7 +87,7 @@ public static List splitToHostsAndPorts(String hostPortQuorumList)
public static String buildHostsOnlyList(List hostAndPorts) {
StringBuilder sb = new StringBuilder();
for (HostAndPort hostAndPort : hostAndPorts) {
- sb.append(hostAndPort.getHostText()).append(",");
+ sb.append(hostAndPort.getHost()).append(",");
}
if (sb.length() > 0) {
sb.delete(sb.length() - 1, sb.length());