From d16c4eee186492608ffeb1c2e83f437000cc64f6 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Fri, 4 Sep 2015 10:41:09 -0700 Subject: [PATCH] HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to hadoop-hdfs-client module. Contributed by Mingliang Liu. --- .../protocol/datatransfer/PipelineAck.java | 31 --------------- .../hadoop/hdfs/util/LongBitFormat.java | 0 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/datanode/BlockReceiver.java | 2 +- .../hadoop/hdfs/server/datanode/DataNode.java | 38 +++++++++++++++++++ 5 files changed, 42 insertions(+), 32 deletions(-) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java (85%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java (100%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java similarity index 85% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java index 44f38c6673..38366065d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java @@ -23,15 +23,10 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_DEFAULT; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; @@ -46,9 +41,6 @@ public class PipelineAck { public final static long UNKOWN_SEQNO = -2; final static int OOB_START = Status.OOB_RESTART_VALUE; // the first OOB type final static int OOB_END = Status.OOB_RESERVED3_VALUE; // the last OOB type - final static int NUM_OOB_TYPES = OOB_END - OOB_START + 1; - // place holder for timeout value of each OOB type - final static long[] OOB_TIMEOUT; public enum ECN { DISABLED(0), @@ -99,16 +91,6 @@ public class PipelineAck { } } - static { - OOB_TIMEOUT = new long[NUM_OOB_TYPES]; - HdfsConfiguration conf = new HdfsConfiguration(); - String[] ele = conf.get(DFS_DATANODE_OOB_TIMEOUT_KEY, - DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(","); - for (int i = 0; i < NUM_OOB_TYPES; i++) { - OOB_TIMEOUT[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0; - } - } - /** default constructor **/ public PipelineAck() { } @@ -216,19 +198,6 @@ public class PipelineAck { return null; } - /** - * Get the timeout to be used for transmitting the OOB type - * @return the timeout in milliseconds - */ - public static long getOOBTimeout(Status status) throws IOException { - int index = status.getNumber() - OOB_START; - if (index >= 0 && index < NUM_OOB_TYPES) { - return OOB_TIMEOUT[index]; - } - // Not an OOB. - throw new IOException("Not an OOB status: " + status); - } - /** Get the Restart OOB ack status */ public static Status getRestartOOBStatus() { return Status.OOB_RESTART; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b9b89aa226..e67c9d5bc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -888,6 +888,9 @@ Release 2.8.0 - UNRELEASED HDFS-9021. Use a yellow elephant rather than a blue one in diagram. (wang) + HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to + hadoop-hdfs-client module. (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 1cb308f731..bc5396f94d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -1153,7 +1153,7 @@ class BlockReceiver implements Closeable { synchronized(this) { if (sending) { - wait(PipelineAck.getOOBTimeout(ackStatus)); + wait(datanode.getOOBTimeout(ackStatus)); // Didn't get my turn in time. Give up. if (sending) { throw new IOException("Could not send OOB reponse in time: " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 42cbd96f93..e0adc6dee4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -40,6 +40,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT; @@ -359,6 +361,8 @@ public class DataNode extends ReconfigurableBase .availableProcessors(); private static final double CONGESTION_RATIO = 1.5; + private long[] oobTimeouts; /** timeout value of each OOB type */ + /** * Creates a dummy DataNode for testing purpose. */ @@ -373,6 +377,7 @@ public class DataNode extends ReconfigurableBase this.connectToDnViaHostname = false; this.blockScanner = new BlockScanner(this, conf); this.pipelineSupportECN = false; + initOOBTimeout(); } /** @@ -446,6 +451,8 @@ public class DataNode extends ReconfigurableBase return ret; } }); + + initOOBTimeout(); } @Override // ReconfigurableBase @@ -3226,4 +3233,35 @@ public class DataNode extends ReconfigurableBase checkSuperuserPrivilege(); spanReceiverHost.removeSpanReceiver(id); } + + /** + * Get timeout value of each OOB type from configuration + */ + private void initOOBTimeout() { + final int oobStart = Status.OOB_RESTART_VALUE; // the first OOB type + final int oobEnd = Status.OOB_RESERVED3_VALUE; // the last OOB type + final int numOobTypes = oobEnd - oobStart + 1; + oobTimeouts = new long[numOobTypes]; + + final String[] ele = conf.get(DFS_DATANODE_OOB_TIMEOUT_KEY, + DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(","); + for (int i = 0; i < numOobTypes; i++) { + oobTimeouts[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0; + } + } + + /** + * Get the timeout to be used for transmitting the OOB type + * @return the timeout in milliseconds + */ + public long getOOBTimeout(Status status) + throws IOException { + if (status.getNumber() < Status.OOB_RESTART_VALUE || + status.getNumber() > Status.OOB_RESERVED3_VALUE) { + // Not an OOB. + throw new IOException("Not an OOB status: " + status); + } + + return oobTimeouts[status.getNumber() - Status.OOB_RESTART_VALUE]; + } }