From c92f6f360515cc21ecb9b9f49b3e59537ef0cb05 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Mon, 20 Apr 2015 15:42:42 -0700 Subject: [PATCH] HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system start. (Contributed by Xiaoyu Yao) --- .../src/main/java/org/apache/hadoop/fs/Trash.java | 5 +++++ .../org/apache/hadoop/fs/TrashPolicyDefault.java | 4 ++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/DFSClient.java | 4 +++- .../hadoop/hdfs/TestDistributedFileSystem.java | 15 +++++++++++++++ 5 files changed, 28 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java index 2d5f540e2d..aae5cf749b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java @@ -19,6 +19,7 @@ import java.io.IOException; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -33,6 +34,9 @@ @InterfaceAudience.Public @InterfaceStability.Stable public class Trash extends Configured { + private static final org.apache.commons.logging.Log LOG = + LogFactory.getLog(Trash.class); + private TrashPolicy trashPolicy; // configured trash policy instance /** @@ -84,6 +88,7 @@ public static boolean moveToAppropriateTrash(FileSystem fs, Path p, } catch (Exception e) { // If we can not determine that trash is enabled server side then // bail rather than potentially deleting a file when trash is enabled. + LOG.warn("Failed to get server trash configuration", e); throw new IOException("Failed to get server trash configuration", e); } Trash trash = new Trash(fullyResolvedFs, conf); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java index cfb51e24ec..d6a9b4b622 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java @@ -134,11 +134,11 @@ public boolean moveToTrash(Path path) throws IOException { for (int i = 0; i < 2; i++) { try { if (!fs.mkdirs(baseTrashPath, PERMISSION)) { // create current - LOG.warn("Can't create(mkdir) trash directory: "+baseTrashPath); + LOG.warn("Can't create(mkdir) trash directory: " + baseTrashPath); return false; } } catch (IOException e) { - LOG.warn("Can't create trash directory: "+baseTrashPath); + LOG.warn("Can't create trash directory: " + baseTrashPath, e); cause = e; break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8dec32e4b2..2d20812897 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -561,6 +561,9 @@ Release 2.7.1 - UNRELEASED HDFS-8153. Error Message points to wrong parent directory in case of path component name length error (Anu Engineer via jitendra) + HDFS-8179. DFSClient#getServerDefaults returns null within 1 + hour of system start. (Xiaoyu Yao via Arpit Agarwal) + Release 2.7.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index cc5727f554..6a73a9345a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -651,10 +651,12 @@ public long getBlockSize(String f) throws IOException { */ public FsServerDefaults getServerDefaults() throws IOException { long now = Time.monotonicNow(); - if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) { + if ((serverDefaults == null) || + (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD)) { serverDefaults = namenode.getServerDefaults(); serverDefaultsLastUpdate = now; } + assert serverDefaults != null; return serverDefaults; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 5be492ff4c..9a102065cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -997,4 +998,18 @@ public void testDFSClientPeerTimeout() throws IOException { cluster.shutdown(); } } + + @Test(timeout=60000) + public void testGetServerDefaults() throws IOException { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + try { + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + FsServerDefaults fsServerDefaults = dfs.getServerDefaults(); + Assert.assertNotNull(fsServerDefaults); + } finally { + cluster.shutdown(); + } + } }