diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java index df5895553a..bdb6a614ef 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java @@ -18,9 +18,9 @@ package org.apache.hadoop.io.retry; import java.lang.reflect.Method; -import java.net.UnknownHostException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.ipc.RetriableException; /** * A dummy invocation handler extending RetryInvocationHandler. It drops the @@ -52,7 +52,7 @@ protected Object invokeMethod(Method method, Object[] args) throws Throwable { if (retryCount < this.numToDrop) { RetryCount.set(++retryCount); LOG.info("Drop the response. Current retryCount == " + retryCount); - throw new UnknownHostException("Fake Exception"); + throw new RetriableException("Fake Exception"); } else { LOG.info("retryCount == " + retryCount + ". It's time to normally process the response"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java index aa1f3ec0fa..a248f22cfc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java @@ -558,27 +558,25 @@ public RetryAction shouldRetry(Exception e, int retries, isWrappedStandbyException(e)) { return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY, getFailoverOrRetrySleepTime(failovers)); - } else if (e instanceof SocketException || - (e instanceof IOException && !(e instanceof RemoteException))) { + } else if (e instanceof RetriableException + || getWrappedRetriableException(e) != null) { + // RetriableException or RetriableException wrapped + return new RetryAction(RetryAction.RetryDecision.RETRY, + getFailoverOrRetrySleepTime(retries)); + } else if (e instanceof SocketException + || (e instanceof IOException && !(e instanceof RemoteException))) { if (isIdempotentOrAtMostOnce) { return RetryAction.FAILOVER_AND_RETRY; } else { return new RetryAction(RetryAction.RetryDecision.FAIL, 0, - "the invoked method is not idempotent, and unable to determine " + - "whether it was invoked"); + "the invoked method is not idempotent, and unable to determine " + + "whether it was invoked"); } } else { - RetriableException re = getWrappedRetriableException(e); - if (re != null) { - return new RetryAction(RetryAction.RetryDecision.RETRY, - getFailoverOrRetrySleepTime(retries)); - } else { return fallbackPolicy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce); - } } } - } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9f0a6a958b..a16f0b703b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -457,6 +457,9 @@ Release 2.3.0 - UNRELEASED HDFS-5436. Move HsFtpFileSystem and HFtpFileSystem into org.apache.hdfs.web (Haohui Mai via Arpit Agarwal) + HDFS-5371. Let client retry the same NN when + "dfs.client.test.drop.namenode.response.number" is enabled. (jing9) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)