HADOOP-17138. Fix spotbugs warnings surfaced after upgrade to 4.0.6. (#2155)

This commit is contained in:
Masatake Iwasaki 2020-07-22 13:40:20 +09:00 committed by GitHub
parent d23cc9d85d
commit 1b29c9bfee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 50 additions and 27 deletions

View File

@ -16,8 +16,8 @@
--> -->
<FindBugsFilter> <FindBugsFilter>
<Match> <Match>
<Class name="org.apache.hadoop.fs.cosn.CosNInputStream.ReadBuffer"/> <Class name="org.apache.hadoop.fs.cosn.CosNInputStream$ReadBuffer"/>
<Method name="getBuffer"/> <Method name="getBuffer"/>
<Bug pattern="EI_EXPOSE_REP"/>h_LIB <Bug pattern="EI_EXPOSE_REP"/>
</Match> </Match>
</FindBugsFilter> </FindBugsFilter>

View File

@ -3714,7 +3714,7 @@ void incrUserConnections(String user) {
if (count == null) { if (count == null) {
count = 1; count = 1;
} else { } else {
count++; count = count + 1;
} }
userToConnectionsMap.put(user, count); userToConnectionsMap.put(user, count);
} }
@ -3726,7 +3726,7 @@ void decrUserConnections(String user) {
if (count == null) { if (count == null) {
return; return;
} else { } else {
count--; count = count - 1;
} }
if (count == 0) { if (count == 0) {
userToConnectionsMap.remove(user); userToConnectionsMap.remove(user);

View File

@ -354,23 +354,29 @@ private class ResultHandler
} }
@Override @Override
public void onSuccess(@Nonnull VolumeCheckResult result) { public void onSuccess(VolumeCheckResult result) {
switch(result) { if (result == null) {
case HEALTHY: LOG.error("Unexpected health check result null for volume {}",
case DEGRADED:
LOG.debug("Volume {} is {}.", reference.getVolume(), result);
markHealthy();
break;
case FAILED:
LOG.warn("Volume {} detected as being unhealthy",
reference.getVolume()); reference.getVolume());
markFailed();
break;
default:
LOG.error("Unexpected health check result {} for volume {}",
result, reference.getVolume());
markHealthy(); markHealthy();
break; } else {
switch(result) {
case HEALTHY:
case DEGRADED:
LOG.debug("Volume {} is {}.", reference.getVolume(), result);
markHealthy();
break;
case FAILED:
LOG.warn("Volume {} detected as being unhealthy",
reference.getVolume());
markFailed();
break;
default:
LOG.error("Unexpected health check result {} for volume {}",
result, reference.getVolume());
markHealthy();
break;
}
} }
cleanup(); cleanup();
} }

View File

@ -166,7 +166,7 @@ private void addResultCachingCallback(
Checkable<K, V> target, ListenableFuture<V> lf) { Checkable<K, V> target, ListenableFuture<V> lf) {
Futures.addCallback(lf, new FutureCallback<V>() { Futures.addCallback(lf, new FutureCallback<V>() {
@Override @Override
public void onSuccess(@Nullable V result) { public void onSuccess(V result) {
synchronized (ThrottledAsyncChecker.this) { synchronized (ThrottledAsyncChecker.this) {
checksInProgress.remove(target); checksInProgress.remove(target);
completedChecks.put(target, new LastCheckResult<>( completedChecks.put(target, new LastCheckResult<>(

View File

@ -1238,7 +1238,7 @@ private void incrOpCount(FSEditLogOpCodes opCode,
holder = new Holder<Integer>(1); holder = new Holder<Integer>(1);
opCounts.put(opCode, holder); opCounts.put(opCode, holder);
} else { } else {
holder.held++; holder.held = holder.held + 1;
} }
counter.increment(); counter.increment();
} }

View File

@ -534,4 +534,16 @@
<Bug pattern="SE_BAD_FIELD_INNER_CLASS" /> <Bug pattern="SE_BAD_FIELD_INNER_CLASS" />
</Match> </Match>
<!--
HADOOP-17138: Suppress warnings about unchecked Nullable
since the methoad catches NullPointerException then registerError.
-->
<Match>
<Or>
<Class name="org.apache.hadoop.mapred.LocatedFileStatusFetcher$ProcessInputDirCallback" />
<Class name="org.apache.hadoop.mapred.LocatedFileStatusFetcher$ProcessInitialInputPathCallback" />
</Or>
<Method name="onSuccess" />
<Bug pattern="NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE" />
</Match>
</FindBugsFilter> </FindBugsFilter>

View File

@ -813,7 +813,7 @@ private void increaseQueueAppNum(String queue) throws YarnException {
if (appNum == null) { if (appNum == null) {
appNum = 1; appNum = 1;
} else { } else {
appNum++; appNum = appNum + 1;
} }
queueAppNumMap.put(queueName, appNum); queueAppNumMap.put(queueName, appNum);

View File

@ -705,4 +705,10 @@
<Method name="getDevices" /> <Method name="getDevices" />
<Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" /> <Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" />
</Match> </Match>
<!-- Suppress warning about anonymous class for mocking. -->
<Match>
<Class name="~org\.apache\.hadoop\.yarn\.server\.timelineservice\.reader\.TestTimelineReaderWebServicesHBaseStorage.*" />
<Bug pattern="UMAC_UNCALLABLE_METHOD_OF_ANONYMOUS_CLASS" />
</Match>
</FindBugsFilter> </FindBugsFilter>

View File

@ -181,14 +181,13 @@ private static void waitForHBaseDown(HBaseTimelineReaderImpl htr) throws
} }
} }
private static void checkQuery(HBaseTimelineReaderImpl htr) throws private static Set<TimelineEntity> checkQuery(HBaseTimelineReaderImpl htr)
IOException { throws IOException {
TimelineReaderContext context = TimelineReaderContext context =
new TimelineReaderContext(YarnConfiguration.DEFAULT_RM_CLUSTER_ID, new TimelineReaderContext(YarnConfiguration.DEFAULT_RM_CLUSTER_ID,
null, null, null, null, TimelineEntityType null, null, null, null, TimelineEntityType
.YARN_FLOW_ACTIVITY.toString(), null, null); .YARN_FLOW_ACTIVITY.toString(), null, null);
Set<TimelineEntity> entities = htr.getEntities(context, MONITOR_FILTERS, return htr.getEntities(context, MONITOR_FILTERS, DATA_TO_RETRIEVE);
DATA_TO_RETRIEVE);
} }
private static void configure(HBaseTestingUtility util) { private static void configure(HBaseTestingUtility util) {