From 0d6aa5d60948a7966da0ca1c3344a37c1d32f2e9 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Mon, 4 May 2015 15:00:29 -0700 Subject: [PATCH] HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. Contributed by Haohui Mai. --- .../dev-support/findbugsExcludeFile.xml | 3 + .../org/apache/hadoop/hdfs/DFSUtilClient.java | 85 +++++++++++++++++++ .../hdfs/client/HdfsClientConfigKeys.java | 1 + .../hdfs/protocol/CacheDirectiveEntry.java | 0 .../hdfs/protocol/CacheDirectiveInfo.java | 6 +- .../hdfs/protocol/CacheDirectiveStats.java | 0 .../hadoop/hdfs/protocol/CachePoolEntry.java | 0 .../hadoop/hdfs/protocol/CachePoolInfo.java | 0 .../hadoop/hdfs/protocol/CachePoolStats.java | 0 .../hdfs/protocol/SnapshotDiffReport.java | 8 +- .../SnapshottableDirectoryStatus.java | 8 +- .../delegation/DelegationTokenSelector.java | 4 +- .../namenode/NotReplicatedYetException.java | 0 .../hdfs/server/protocol/DatanodeStorage.java | 0 .../protocol/DatanodeStorageReport.java | 0 .../hdfs/server/protocol/StorageReport.java | 0 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 75 +--------------- .../hadoop/hdfs/protocolPB/PBHelper.java | 10 +-- .../hadoop/hdfs/server/namenode/INode.java | 3 +- .../namenode/snapshot/SnapshotManager.java | 3 +- 21 files changed, 116 insertions(+), 93 deletions(-) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java (98%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java (97%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java (96%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java (95%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java (100%) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml index 7aade70ceb..be2911f3e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml @@ -11,6 +11,9 @@ + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 97d340822d..eda135e114 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -19,6 +19,7 @@ import com.google.common.base.Joiner; import com.google.common.collect.Maps; +import com.google.common.primitives.SignedBytes; import org.apache.commons.io.Charsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -36,15 +37,19 @@ import java.io.UnsupportedEncodingException; import java.net.InetSocketAddress; +import java.text.SimpleDateFormat; import java.util.Collection; import java.util.Collections; +import java.util.Date; import java.util.List; +import java.util.Locale; import java.util.Map; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES; public class DFSUtilClient { + public static final byte[] EMPTY_BYTES = {}; private static final Logger LOG = LoggerFactory.getLogger( DFSUtilClient.class); /** @@ -184,6 +189,48 @@ public static BlockLocation[] locatedBlocks2Locations( return blkLocations; } + /** Compare two byte arrays by lexicographical order. */ + public static int compareBytes(byte[] left, byte[] right) { + if (left == null) { + left = EMPTY_BYTES; + } + if (right == null) { + right = EMPTY_BYTES; + } + return SignedBytes.lexicographicalComparator().compare(left, right); + } + + /** + * Given a list of path components returns a byte array + */ + public static byte[] byteArray2bytes(byte[][] pathComponents) { + if (pathComponents.length == 0) { + return EMPTY_BYTES; + } else if (pathComponents.length == 1 + && (pathComponents[0] == null || pathComponents[0].length == 0)) { + return new byte[]{(byte) Path.SEPARATOR_CHAR}; + } + int length = 0; + for (int i = 0; i < pathComponents.length; i++) { + length += pathComponents[i].length; + if (i < pathComponents.length - 1) { + length++; // for SEPARATOR + } + } + byte[] path = new byte[length]; + int index = 0; + for (int i = 0; i < pathComponents.length; i++) { + System.arraycopy(pathComponents[i], 0, path, index, + pathComponents[i].length); + index += pathComponents[i].length; + if (i < pathComponents.length - 1) { + path[index] = (byte) Path.SEPARATOR_CHAR; + index++; + } + } + return path; + } + /** * Decode a specific range of bytes of the given byte array to a string * using UTF8. @@ -343,4 +390,42 @@ public static boolean isValidName(String src) { } return true; } + + /** + * Converts a time duration in milliseconds into DDD:HH:MM:SS format. + */ + public static String durationToString(long durationMs) { + boolean negative = false; + if (durationMs < 0) { + negative = true; + durationMs = -durationMs; + } + // Chop off the milliseconds + long durationSec = durationMs / 1000; + final int secondsPerMinute = 60; + final int secondsPerHour = 60*60; + final int secondsPerDay = 60*60*24; + final long days = durationSec / secondsPerDay; + durationSec -= days * secondsPerDay; + final long hours = durationSec / secondsPerHour; + durationSec -= hours * secondsPerHour; + final long minutes = durationSec / secondsPerMinute; + durationSec -= minutes * secondsPerMinute; + final long seconds = durationSec; + final long milliseconds = durationMs % 1000; + String format = "%03d:%02d:%02d:%02d.%03d"; + if (negative) { + format = "-" + format; + } + return String.format(format, days, hours, minutes, seconds, milliseconds); + } + + /** + * Converts a Date into an ISO-8601 formatted datetime string. + */ + public static String dateToIso8601String(Date date) { + SimpleDateFormat df = + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH); + return df.format(date); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 86c8a87585..26283aade8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -38,6 +38,7 @@ public interface HdfsClientConfigKeys { int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470; String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address"; String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes"; + int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020; /** dfs.client.retry configuration properties */ interface Retry { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java index f6b3c34f4a..2305c59a71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java @@ -24,9 +24,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSUtil; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.DFSUtilClient; /** * Describes a path-based cache directive. @@ -244,9 +244,9 @@ public long getAbsoluteMillis() { @Override public String toString() { if (isRelative) { - return DFSUtil.durationToString(ms); + return DFSUtilClient.durationToString(ms); } - return DFSUtil.dateToIso8601String(new Date(ms)); + return DFSUtilClient.dateToIso8601String(new Date(ms)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java index b0db838dbd..b58ed3618b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java @@ -22,9 +22,9 @@ import java.util.List; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSUtil; import com.google.common.base.Objects; +import org.apache.hadoop.hdfs.DFSUtilClient; /** * This class represents to end users the difference between two snapshots of @@ -102,8 +102,8 @@ public DiffReportEntry(DiffType type, byte[] sourcePath, byte[] targetPath) { public DiffReportEntry(DiffType type, byte[][] sourcePathComponents, byte[][] targetPathComponents) { this.type = type; - this.sourcePath = DFSUtil.byteArray2bytes(sourcePathComponents); - this.targetPath = targetPathComponents == null ? null : DFSUtil + this.sourcePath = DFSUtilClient.byteArray2bytes(sourcePathComponents); + this.targetPath = targetPathComponents == null ? null : DFSUtilClient .byteArray2bytes(targetPathComponents); } @@ -121,7 +121,7 @@ public DiffType getType() { } static String getPathString(byte[] path) { - String pathStr = DFSUtil.bytes2String(path); + String pathStr = DFSUtilClient.bytes2String(path); if (pathStr.isEmpty()) { return Path.CUR_DIR; } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index 3067696eca..ac19d44cd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; /** * Metadata about a snapshottable directory @@ -36,9 +36,9 @@ public class SnapshottableDirectoryStatus { @Override public int compare(SnapshottableDirectoryStatus left, SnapshottableDirectoryStatus right) { - int d = DFSUtil.compareBytes(left.parentFullPath, right.parentFullPath); + int d = DFSUtilClient.compareBytes(left.parentFullPath, right.parentFullPath); return d != 0? d - : DFSUtil.compareBytes(left.dirStatus.getLocalNameInBytes(), + : DFSUtilClient.compareBytes(left.dirStatus.getLocalNameInBytes(), right.dirStatus.getLocalNameInBytes()); } }; @@ -101,7 +101,7 @@ public HdfsFileStatus getDirStatus() { public Path getFullPath() { String parentFullPathStr = (parentFullPath == null || parentFullPath.length == 0) ? - null : DFSUtil.bytes2String(parentFullPath); + null : DFSUtilClient.bytes2String(parentFullPath); if (parentFullPathStr == null && dirStatus.getLocalNameInBytes().length == 0) { // root diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java index e9b24ca012..9cf44d7d53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; @@ -57,7 +57,7 @@ public Token selectToken( Text serviceName = SecurityUtil.buildTokenService(nnUri); final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); - int nnRpcPort = NameNode.DEFAULT_PORT; + int nnRpcPort = HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 21d73bad87..365b005386 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -504,6 +504,9 @@ Release 2.8.0 - UNRELEASED "dfs.client.read.shortcircuit.streams.cache.size" (Brahma Reddy Battula via Colin P. McCabe) + HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. + (wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 37f8c72f6f..42ff7fa5db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -43,14 +43,12 @@ import java.net.URI; import java.net.URISyntaxException; import java.security.SecureRandom; -import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Collection; import java.util.Comparator; import java.util.Date; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; @@ -97,26 +95,12 @@ import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import com.google.common.primitives.SignedBytes; import com.google.protobuf.BlockingService; @InterfaceAudience.Private public class DFSUtil { public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName()); - public static final byte[] EMPTY_BYTES = {}; - - /** Compare two byte arrays by lexicographical order. */ - public static int compareBytes(byte[] left, byte[] right) { - if (left == null) { - left = EMPTY_BYTES; - } - if (right == null) { - right = EMPTY_BYTES; - } - return SignedBytes.lexicographicalComparator().compare(left, right); - } - private DFSUtil() { /* Hidden constructor */ } private static final ThreadLocal RANDOM = new ThreadLocal() { @Override @@ -345,37 +329,6 @@ public static String strings2PathString(String[] components) { return Joiner.on(Path.SEPARATOR).join(components); } - /** - * Given a list of path components returns a byte array - */ - public static byte[] byteArray2bytes(byte[][] pathComponents) { - if (pathComponents.length == 0) { - return EMPTY_BYTES; - } else if (pathComponents.length == 1 - && (pathComponents[0] == null || pathComponents[0].length == 0)) { - return new byte[]{(byte) Path.SEPARATOR_CHAR}; - } - int length = 0; - for (int i = 0; i < pathComponents.length; i++) { - length += pathComponents[i].length; - if (i < pathComponents.length - 1) { - length++; // for SEPARATOR - } - } - byte[] path = new byte[length]; - int index = 0; - for (int i = 0; i < pathComponents.length; i++) { - System.arraycopy(pathComponents[i], 0, path, index, - pathComponents[i].length); - index += pathComponents[i].length; - if (i < pathComponents.length - 1) { - path[index] = (byte) Path.SEPARATOR_CHAR; - index++; - } - } - return path; - } - /** Convert an object representing a path to a string. */ public static String path2String(final Object path) { return path == null? null @@ -1377,38 +1330,14 @@ static String getPassword(Configuration conf, String alias) { * Converts a Date into an ISO-8601 formatted datetime string. */ public static String dateToIso8601String(Date date) { - SimpleDateFormat df = - new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH); - return df.format(date); + return DFSUtilClient.dateToIso8601String(date); } /** * Converts a time duration in milliseconds into DDD:HH:MM:SS format. */ public static String durationToString(long durationMs) { - boolean negative = false; - if (durationMs < 0) { - negative = true; - durationMs = -durationMs; - } - // Chop off the milliseconds - long durationSec = durationMs / 1000; - final int secondsPerMinute = 60; - final int secondsPerHour = 60*60; - final int secondsPerDay = 60*60*24; - final long days = durationSec / secondsPerDay; - durationSec -= days * secondsPerDay; - final long hours = durationSec / secondsPerHour; - durationSec -= hours * secondsPerHour; - final long minutes = durationSec / secondsPerMinute; - durationSec -= minutes * secondsPerMinute; - final long seconds = durationSec; - final long milliseconds = durationMs % 1000; - String format = "%03d:%02d:%02d:%02d.%03d"; - if (negative) { - format = "-" + format; - } - return String.format(format, days, hours, minutes, seconds, milliseconds); + return DFSUtilClient.durationToString(durationMs); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index ba6670c0d9..c9a9c337a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -47,9 +47,9 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.inotify.EventBatch; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.inotify.Event; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.Block; @@ -469,7 +469,7 @@ public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) { public static BlockKeyProto convert(BlockKey key) { byte[] encodedKey = key.getEncodedKey(); ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? - DFSUtil.EMPTY_BYTES : encodedKey); + DFSUtilClient.EMPTY_BYTES : encodedKey); return BlockKeyProto.newBuilder().setKeyId(key.getKeyId()) .setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build(); } @@ -1514,7 +1514,7 @@ public static SnapshottableDirectoryStatusProto convert( int snapshotQuota = status.getSnapshotQuota(); byte[] parentFullPath = status.getParentFullPath(); ByteString parentFullPathBytes = ByteString.copyFrom( - parentFullPath == null ? DFSUtil.EMPTY_BYTES : parentFullPath); + parentFullPath == null ? DFSUtilClient.EMPTY_BYTES : parentFullPath); HdfsFileStatusProto fs = convert(status.getDirStatus()); SnapshottableDirectoryStatusProto.Builder builder = SnapshottableDirectoryStatusProto @@ -2024,7 +2024,7 @@ public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) { return null; } ByteString sourcePath = ByteString - .copyFrom(entry.getSourcePath() == null ? DFSUtil.EMPTY_BYTES : entry + .copyFrom(entry.getSourcePath() == null ? DFSUtilClient.EMPTY_BYTES : entry .getSourcePath()); String modification = entry.getType().getLabel(); SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto @@ -2032,7 +2032,7 @@ public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) { .setModificationLabel(modification); if (entry.getType() == DiffType.RENAME) { ByteString targetPath = ByteString - .copyFrom(entry.getTargetPath() == null ? DFSUtil.EMPTY_BYTES : entry + .copyFrom(entry.getTargetPath() == null ? DFSUtilClient.EMPTY_BYTES : entry .getTargetPath()); builder.setTargetPath(targetPath); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 409967eecb..f8efd76e0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.DFSUtil; @@ -765,7 +766,7 @@ public static String[] getPathNames(String path) { @Override public final int compareTo(byte[] bytes) { - return DFSUtil.compareBytes(getLocalNameBytes(), bytes); + return DFSUtilClient.compareBytes(getLocalNameBytes(), bytes); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index c3b7523e01..802d64ab6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -30,6 +30,7 @@ import javax.management.ObjectName; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotInfo; @@ -342,7 +343,7 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing( dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), dir.getDirectorySnapshottableFeature().getNumSnapshots(), dir.getDirectorySnapshottableFeature().getSnapshotQuota(), - dir.getParent() == null ? DFSUtil.EMPTY_BYTES : + dir.getParent() == null ? DFSUtilClient.EMPTY_BYTES : DFSUtil.string2Bytes(dir.getParent().getFullPathName())); statusList.add(status); }