From a75673cbd88215afd98e9d6ac31a9d93062048eb Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 16 Oct 2012 02:12:59 +0000 Subject: [PATCH] HDFS-4058. DirectoryScanner may fail with IOOB if the directory scanning threads return out of volume order. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1398612 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/datanode/DirectoryScanner.java | 18 +++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3a4843c25c..a59fb4da2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -446,6 +446,9 @@ Release 2.0.3-alpha - Unreleased HDFS-3678. Edit log files are never being purged from 2NN. (atm) + HDFS-4058. DirectoryScanner may fail with IOOB if the directory + scanning threads return out of volume order. (eli) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 004af654e6..5d870d771e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -431,16 +431,16 @@ private static boolean isValid(final FsDatasetSpi dataset, private Map getDiskReport() { // First get list of data directories final List volumes = dataset.getVolumes(); - ArrayList dirReports = - new ArrayList(volumes.size()); - + + // Use an array since the threads may return out of order and + // compilersInProgress#keySet may return out of order as well. + ScanInfoPerBlockPool[] dirReports = new ScanInfoPerBlockPool[volumes.size()]; + Map> compilersInProgress = new HashMap>(); + for (int i = 0; i < volumes.size(); i++) { - if (!isValid(dataset, volumes.get(i))) { - // volume is invalid - dirReports.add(i, null); - } else { + if (isValid(dataset, volumes.get(i))) { ReportCompiler reportCompiler = new ReportCompiler(volumes.get(i)); Future result = @@ -452,7 +452,7 @@ private Map getDiskReport() { for (Entry> report : compilersInProgress.entrySet()) { try { - dirReports.add(report.getKey(), report.getValue().get()); + dirReports[report.getKey()] = report.getValue().get(); } catch (Exception ex) { LOG.error("Error compiling report", ex); // Propagate ex to DataBlockScanner to deal with @@ -465,7 +465,7 @@ private Map getDiskReport() { for (int i = 0; i < volumes.size(); i++) { if (isValid(dataset, volumes.get(i))) { // volume is still valid - list.addAll(dirReports.get(i)); + list.addAll(dirReports[i]); } }