diff --git a/NOTICE.txt b/NOTICE.txt index 95a670d9ee..a53f13c700 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -196,6 +196,14 @@ by Google Inc, which can be obtained at: * HOMEPAGE: * http://code.google.com/p/snappy/ +This product contains a modified portion of UnsignedBytes LexicographicalComparator +from Guava v21 project by Google Inc, which can be obtained at: + + * LICENSE: + * license/COPYING (Apache License 2.0) + * HOMEPAGE: + * https://github.com/google/guava + This product optionally depends on 'JBoss Marshalling', an alternative Java serialization API, which can be obtained at: diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching index ad8abe294c..be330d5aaa 100755 --- a/dev-support/bin/ozone-dist-layout-stitching +++ b/dev-support/bin/ozone-dist-layout-stitching @@ -145,6 +145,8 @@ run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$ run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" . run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" . run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" . +mkdir -p "./share/hadoop/ozonefs" +cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar" # Optional documentation, could be missing cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/ @@ -153,5 +155,5 @@ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdd mkdir -p ./share/hadoop/mapreduce mkdir -p ./share/hadoop/yarn echo -echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}" +echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone" echo diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index b9363de569..6fa24b49e5 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -166,10 +166,6 @@ commons-io commons-io - - commons-lang - commons-lang - commons-logging commons-logging @@ -495,10 +491,6 @@ commons-codec commons-codec - - commons-lang - commons-lang - commons-logging commons-logging diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 1a16dc48fb..67a5a54839 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -156,11 +156,6 @@ junit test - - commons-lang - commons-lang - compile - commons-beanutils commons-beanutils diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 19bd5dab22..b1125e588c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -3189,25 +3189,25 @@ public void addTags(Properties prop) { if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) { String systemTags = prop.getProperty(CommonConfigurationKeys .HADOOP_TAGS_SYSTEM); - Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(systemTags.split(","))); } // Get all custom tags if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) { String customTags = prop.getProperty(CommonConfigurationKeys .HADOOP_TAGS_CUSTOM); - Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(customTags.split(","))); } if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) { String systemTags = prop.getProperty(CommonConfigurationKeys .HADOOP_SYSTEM_TAGS); - Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(systemTags.split(","))); } // Get all custom tags if (prop.containsKey(CommonConfigurationKeys.HADOOP_CUSTOM_TAGS)) { String customTags = prop.getProperty(CommonConfigurationKeys .HADOOP_CUSTOM_TAGS); - Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag)); + TAGS.addAll(Arrays.asList(customTags.split(","))); } } catch (Exception ex) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java index 5a616f72b9..c5bdf4e021 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java @@ -18,7 +18,7 @@ package org.apache.hadoop.conf; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import java.util.Collection; import java.util.Enumeration; @@ -72,10 +72,10 @@ private Reconfigurable getReconfigurable(HttpServletRequest req) { private void printHeader(PrintWriter out, String nodeName) { out.print(""); out.printf("%s Reconfiguration Utility%n", - StringEscapeUtils.escapeHtml(nodeName)); + StringEscapeUtils.escapeHtml4(nodeName)); out.print("\n"); out.printf("

%s Reconfiguration Utility

%n", - StringEscapeUtils.escapeHtml(nodeName)); + StringEscapeUtils.escapeHtml4(nodeName)); } private void printFooter(PrintWriter out) { @@ -103,20 +103,20 @@ private void printConf(PrintWriter out, Reconfigurable reconf) { out.print(""); if (!reconf.isPropertyReconfigurable(c.prop)) { out.print("" + - StringEscapeUtils.escapeHtml(c.prop) + ""); + StringEscapeUtils.escapeHtml4(c.prop) + ""); changeOK = false; } else { - out.print(StringEscapeUtils.escapeHtml(c.prop)); + out.print(StringEscapeUtils.escapeHtml4(c.prop)); out.print(""); + StringEscapeUtils.escapeHtml4(c.prop) + "\" value=\"" + + StringEscapeUtils.escapeHtml4(c.newVal) + "\"/>"); } out.print("" + (c.oldVal == null ? "default" : - StringEscapeUtils.escapeHtml(c.oldVal)) + + StringEscapeUtils.escapeHtml4(c.oldVal)) + "" + (c.newVal == null ? "default" : - StringEscapeUtils.escapeHtml(c.newVal)) + + StringEscapeUtils.escapeHtml4(c.newVal)) + ""); out.print("\n"); } @@ -147,9 +147,9 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, synchronized(oldConf) { while (params.hasMoreElements()) { String rawParam = params.nextElement(); - String param = StringEscapeUtils.unescapeHtml(rawParam); + String param = StringEscapeUtils.unescapeHtml4(rawParam); String value = - StringEscapeUtils.unescapeHtml(req.getParameter(rawParam)); + StringEscapeUtils.unescapeHtml4(req.getParameter(rawParam)); if (value != null) { if (value.equals(newConf.getRaw(param)) || value.equals("default") || value.equals("null") || value.isEmpty()) { @@ -157,8 +157,8 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, value.isEmpty()) && oldConf.getRaw(param) != null) { out.println("

Changed \"" + - StringEscapeUtils.escapeHtml(param) + "\" from \"" + - StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) + + StringEscapeUtils.escapeHtml4(param) + "\" from \"" + + StringEscapeUtils.escapeHtml4(oldConf.getRaw(param)) + "\" to default

"); reconf.reconfigureProperty(param, null); } else if (!value.equals("default") && !value.equals("null") && @@ -168,16 +168,16 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, // change from default or value to different value if (oldConf.getRaw(param) == null) { out.println("

Changed \"" + - StringEscapeUtils.escapeHtml(param) + + StringEscapeUtils.escapeHtml4(param) + "\" from default to \"" + - StringEscapeUtils.escapeHtml(value) + "\"

"); + StringEscapeUtils.escapeHtml4(value) + "\"

"); } else { out.println("

Changed \"" + - StringEscapeUtils.escapeHtml(param) + "\" from \"" + - StringEscapeUtils.escapeHtml(oldConf. + StringEscapeUtils.escapeHtml4(param) + "\" from \"" + + StringEscapeUtils.escapeHtml4(oldConf. getRaw(param)) + "\" to \"" + - StringEscapeUtils.escapeHtml(value) + "\"

"); + StringEscapeUtils.escapeHtml4(value) + "\"

"); } reconf.reconfigureProperty(param, value); } else { @@ -185,10 +185,10 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, } } else { // parameter value != newConf value - out.println("

\"" + StringEscapeUtils.escapeHtml(param) + + out.println("

\"" + StringEscapeUtils.escapeHtml4(param) + "\" not changed because value has changed from \"" + - StringEscapeUtils.escapeHtml(value) + "\" to \"" + - StringEscapeUtils.escapeHtml(newConf.getRaw(param)) + + StringEscapeUtils.escapeHtml4(value) + "\" to \"" + + StringEscapeUtils.escapeHtml4(newConf.getRaw(param)) + "\" since approval

"); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java index 050540b4cb..286312ce5e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java @@ -33,8 +33,8 @@ import com.google.gson.stream.JsonReader; import com.google.gson.stream.JsonWriter; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java index 9fdf242fd5..fa84c47d26 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java @@ -27,7 +27,7 @@ import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider.Metadata; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index 08787a51bd..7b4607507b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -32,7 +32,9 @@ import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; +import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -40,6 +42,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL; import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.KMSUtil; import org.apache.http.client.utils.URIBuilder; import org.slf4j.Logger; @@ -77,7 +80,6 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @@ -130,9 +132,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension, private final ValueQueue encKeyVersionQueue; - private static final ObjectWriter WRITER = - new ObjectMapper().writerWithDefaultPrettyPrinter(); - private final Text dtService; // Allow fallback to default kms server port 9600 for certain tests that do @@ -235,7 +234,7 @@ public KMSEncryptedKeyVersion(String keyName, String keyVersionName, private static void writeJson(Object obj, OutputStream os) throws IOException { Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8); - WRITER.writeValue(writer, obj); + JsonSerialization.writer().writeValue(writer, obj); } /** @@ -543,7 +542,9 @@ private T call(HttpURLConnection conn, Object jsonOutput, String requestMethod = conn.getRequestMethod(); URL url = conn.getURL(); conn = createConnection(url, requestMethod); - conn.setRequestProperty(CONTENT_TYPE, contentType); + if (contentType != null && !contentType.isEmpty()) { + conn.setRequestProperty(CONTENT_TYPE, contentType); + } return call(conn, jsonOutput, expectedResponse, klass, authRetryCount - 1); } @@ -1087,8 +1088,7 @@ private UserGroupInformation getActualUgi() throws IOException { actualUgi = currentUgi.getRealUser(); } if (UserGroupInformation.isSecurityEnabled() && - !containsKmsDt(actualUgi) && - !actualUgi.hasKerberosCredentials()) { + !containsKmsDt(actualUgi) && !actualUgi.shouldRelogin()) { // Use login user is only necessary when Kerberos is enabled // but the actual user does not have either // Kerberos credential or KMS delegation token for KMS operations diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java new file mode 100644 index 0000000000..e1336b8085 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * Byte array backed part handle. + */ +public final class BBPartHandle implements PartHandle { + + private static final long serialVersionUID = 0x23ce3eb1; + + private final byte[] bytes; + + private BBPartHandle(ByteBuffer byteBuffer){ + this.bytes = byteBuffer.array(); + } + + public static PartHandle from(ByteBuffer byteBuffer) { + return new BBPartHandle(byteBuffer); + } + + @Override + public ByteBuffer bytes() { + return ByteBuffer.wrap(bytes); + } + + @Override + public int hashCode() { + return Arrays.hashCode(bytes); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof PartHandle)) { + return false; + + } + PartHandle o = (PartHandle) other; + return bytes().equals(o.bytes()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java new file mode 100644 index 0000000000..6430c145e2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * Byte array backed upload handle. + */ +public final class BBUploadHandle implements UploadHandle { + + private static final long serialVersionUID = 0x69d5509b; + + private final byte[] bytes; + + private BBUploadHandle(ByteBuffer byteBuffer){ + this.bytes = byteBuffer.array(); + } + + public static UploadHandle from(ByteBuffer byteBuffer) { + return new BBUploadHandle(byteBuffer); + } + + @Override + public int hashCode() { + return Arrays.hashCode(bytes); + } + + @Override + public ByteBuffer bytes() { + return ByteBuffer.wrap(bytes); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof UploadHandle)) { + return false; + } + UploadHandle o = (UploadHandle) other; + return bytes().equals(o.bytes()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 9e0ba20c28..c7f32f92a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -542,7 +542,7 @@ public class CommonConfigurationKeysPublic { * * core-default.xml */ - public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS = + public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY = "hadoop.security.groups.shell.command.timeout"; /** * @see @@ -550,7 +550,7 @@ public class CommonConfigurationKeysPublic { * core-default.xml */ public static final long - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT = + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT = 0L; /** * @see diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java index 86c284a9e8..d43129388b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java @@ -115,6 +115,27 @@ public abstract class FSDataOutputStreamBuilder */ protected abstract B getThisBuilder(); + /** + * Construct from a {@link FileContext}. + * + * @param fc FileContext + * @param p path. + * @throws IOException + */ + FSDataOutputStreamBuilder(@Nonnull FileContext fc, + @Nonnull Path p) throws IOException { + Preconditions.checkNotNull(fc); + Preconditions.checkNotNull(p); + this.fs = null; + this.path = p; + + AbstractFileSystem afs = fc.getFSofPath(p); + FsServerDefaults defaults = afs.getServerDefaults(p); + bufferSize = defaults.getFileBufferSize(); + replication = defaults.getReplication(); + blockSize = defaults.getBlockSize(); + } + /** * Constructor. */ @@ -131,6 +152,7 @@ protected FSDataOutputStreamBuilder(@Nonnull FileSystem fileSystem, } protected FileSystem getFS() { + Preconditions.checkNotNull(fs); return fs; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 6ea69d01b1..5215c3cdee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -24,6 +24,7 @@ import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; import java.util.HashSet; @@ -35,6 +36,8 @@ import java.util.TreeSet; import java.util.Map.Entry; +import javax.annotation.Nonnull; + import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -694,6 +697,69 @@ public FSDataOutputStream next(final AbstractFileSystem fs, final Path p) }.resolve(this, absF); } + /** + * {@link FSDataOutputStreamBuilder} for {@liink FileContext}. + */ + private static final class FCDataOutputStreamBuilder extends + FSDataOutputStreamBuilder< + FSDataOutputStream, FCDataOutputStreamBuilder> { + private final FileContext fc; + + private FCDataOutputStreamBuilder( + @Nonnull FileContext fc, @Nonnull Path p) throws IOException { + super(fc, p); + this.fc = fc; + Preconditions.checkNotNull(fc); + } + + @Override + protected FCDataOutputStreamBuilder getThisBuilder() { + return this; + } + + @Override + public FSDataOutputStream build() throws IOException { + final EnumSet flags = getFlags(); + List createOpts = new ArrayList<>(Arrays.asList( + CreateOpts.blockSize(getBlockSize()), + CreateOpts.bufferSize(getBufferSize()), + CreateOpts.repFac(getReplication()), + CreateOpts.perms(getPermission()) + )); + if (getChecksumOpt() != null) { + createOpts.add(CreateOpts.checksumParam(getChecksumOpt())); + } + if (getProgress() != null) { + createOpts.add(CreateOpts.progress(getProgress())); + } + if (isRecursive()) { + createOpts.add(CreateOpts.createParent()); + } + return fc.create(getPath(), flags, + createOpts.toArray(new CreateOpts[0])); + } + } + + /** + * Create a {@link FSDataOutputStreamBuilder} for creating or overwriting + * a file on indicated path. + * + * @param f the file path to create builder for. + * @return {@link FSDataOutputStreamBuilder} to build a + * {@link FSDataOutputStream}. + * + * Upon {@link FSDataOutputStreamBuilder#build()} being invoked, + * builder parameters will be verified by {@link FileContext} and + * {@link AbstractFileSystem#create}. And filesystem states will be modified. + * + * Client should expect {@link FSDataOutputStreamBuilder#build()} throw the + * same exceptions as create(Path, EnumSet, CreateOpts...). + */ + public FSDataOutputStreamBuilder create(final Path f) + throws IOException { + return new FCDataOutputStreamBuilder(this, f).create(); + } + /** * Make(create) a directory and all the non-existent parents. * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java new file mode 100644 index 0000000000..b57ff3dc3a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import com.google.common.base.Charsets; +import org.apache.commons.compress.utils.IOUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +/** + * A MultipartUploader that uses the basic FileSystem commands. + * This is done in three stages: + * Init - create a temp _multipart directory. + * PutPart - copying the individual parts of the file to the temp directory. + * Complete - use {@link FileSystem#concat} to merge the files; and then delete + * the temp directory. + */ +public class FileSystemMultipartUploader extends MultipartUploader { + + private final FileSystem fs; + + public FileSystemMultipartUploader(FileSystem fs) { + this.fs = fs; + } + + @Override + public UploadHandle initialize(Path filePath) throws IOException { + Path collectorPath = createCollectorPath(filePath); + fs.mkdirs(collectorPath, FsPermission.getDirDefault()); + + ByteBuffer byteBuffer = ByteBuffer.wrap( + collectorPath.toString().getBytes(Charsets.UTF_8)); + return BBUploadHandle.from(byteBuffer); + } + + @Override + public PartHandle putPart(Path filePath, InputStream inputStream, + int partNumber, UploadHandle uploadId, long lengthInBytes) + throws IOException { + + byte[] uploadIdByteArray = uploadId.toByteArray(); + Path collectorPath = new Path(new String(uploadIdByteArray, 0, + uploadIdByteArray.length, Charsets.UTF_8)); + Path partPath = + Path.mergePaths(collectorPath, Path.mergePaths(new Path(Path.SEPARATOR), + new Path(Integer.toString(partNumber) + ".part"))); + FSDataOutputStreamBuilder outputStream = fs.createFile(partPath); + FSDataOutputStream fsDataOutputStream = outputStream.build(); + IOUtils.copy(inputStream, fsDataOutputStream, 4096); + fsDataOutputStream.close(); + return BBPartHandle.from(ByteBuffer.wrap( + partPath.toString().getBytes(Charsets.UTF_8))); + } + + private Path createCollectorPath(Path filePath) { + return Path.mergePaths(filePath.getParent(), + Path.mergePaths(new Path(filePath.getName().split("\\.")[0]), + Path.mergePaths(new Path("_multipart"), + new Path(Path.SEPARATOR)))); + } + + @Override + @SuppressWarnings("deprecation") // rename w/ OVERWRITE + public PathHandle complete(Path filePath, + List> handles, UploadHandle multipartUploadId) + throws IOException { + handles.sort(Comparator.comparing(Pair::getKey)); + List partHandles = handles + .stream() + .map(pair -> { + byte[] byteArray = pair.getValue().toByteArray(); + return new Path(new String(byteArray, 0, byteArray.length, + Charsets.UTF_8)); + }) + .collect(Collectors.toList()); + + Path collectorPath = createCollectorPath(filePath); + Path filePathInsideCollector = Path.mergePaths(collectorPath, + new Path(Path.SEPARATOR + filePath.getName())); + fs.create(filePathInsideCollector).close(); + fs.concat(filePathInsideCollector, + partHandles.toArray(new Path[handles.size()])); + fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE); + fs.delete(collectorPath, true); + FileStatus status = fs.getFileStatus(filePath); + return fs.getPathHandle(status); + } + + @Override + public void abort(Path filePath, UploadHandle uploadId) throws IOException { + byte[] uploadIdByteArray = uploadId.toByteArray(); + Path collectorPath = new Path(new String(uploadIdByteArray, 0, + uploadIdByteArray.length, Charsets.UTF_8)); + fs.delete(collectorPath, true); + } + + /** + * Factory for creating MultipartUploaderFactory objects for file:// + * filesystems. + */ + public static class Factory extends MultipartUploaderFactory { + protected MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) { + if (fs.getScheme().equals("file")) { + return new FileSystemMultipartUploader(fs); + } + return null; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java index 94d3389408..5be6e5f829 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java @@ -23,7 +23,6 @@ import java.util.Arrays; import java.util.LinkedList; -import org.apache.commons.lang.WordUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -275,7 +274,7 @@ private void printInstanceHelp(PrintStream out, Command instance) { listing = null; } - for (String descLine : WordUtils.wrap( + for (String descLine : StringUtils.wrap( line, MAX_LINE_WIDTH, "\n", true).split("\n")) { out.println(prefix + descLine); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java new file mode 100644 index 0000000000..a6b37b32bb --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import com.google.protobuf.ByteString; +import org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; +import java.util.Optional; + +/** + * Opaque handle to an entity in a FileSystem. + */ +public class LocalFileSystemPathHandle implements PathHandle { + + private final String path; + private final Long mtime; + + public LocalFileSystemPathHandle(String path, Optional mtime) { + this.path = path; + this.mtime = mtime.orElse(null); + } + + public LocalFileSystemPathHandle(ByteBuffer bytes) throws IOException { + if (null == bytes) { + throw new IOException("Missing PathHandle"); + } + LocalFileSystemPathHandleProto p = + LocalFileSystemPathHandleProto.parseFrom(ByteString.copyFrom(bytes)); + path = p.hasPath() ? p.getPath() : null; + mtime = p.hasMtime() ? p.getMtime() : null; + } + + public String getPath() { + return path; + } + + public void verify(FileStatus stat) throws InvalidPathHandleException { + if (null == stat) { + throw new InvalidPathHandleException("Could not resolve handle"); + } + if (mtime != null && mtime != stat.getModificationTime()) { + throw new InvalidPathHandleException("Content changed"); + } + } + + @Override + public ByteBuffer bytes() { + LocalFileSystemPathHandleProto.Builder b = + LocalFileSystemPathHandleProto.newBuilder(); + b.setPath(path); + if (mtime != null) { + b.setMtime(mtime); + } + return b.build().toByteString().asReadOnlyByteBuffer(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + LocalFileSystemPathHandle that = (LocalFileSystemPathHandle) o; + return Objects.equals(path, that.path) && + Objects.equals(mtime, that.mtime); + } + + @Override + public int hashCode() { + return Objects.hash(path, mtime); + } + + @Override + public String toString() { + return "LocalFileSystemPathHandle{" + + "path='" + path + '\'' + + ", mtime=" + mtime + + '}'; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java new file mode 100644 index 0000000000..24a92169a2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import org.apache.commons.lang3.tuple.Pair; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * MultipartUploader is an interface for copying files multipart and across + * multiple nodes. Users should: + * 1. Initialize an upload + * 2. Upload parts in any order + * 3. Complete the upload in order to have it materialize in the destination FS. + * + * Implementers should make sure that the complete function should make sure + * that 'complete' will reorder parts if the destination FS doesn't already + * do it for them. + */ +public abstract class MultipartUploader { + public static final Logger LOG = + LoggerFactory.getLogger(MultipartUploader.class); + + /** + * Initialize a multipart upload. + * @param filePath Target path for upload. + * @return unique identifier associating part uploads. + * @throws IOException + */ + public abstract UploadHandle initialize(Path filePath) throws IOException; + + /** + * Put part as part of a multipart upload. It should be possible to have + * parts uploaded in any order (or in parallel). + * @param filePath Target path for upload (same as {@link #initialize(Path)}). + * @param inputStream Data for this part. + * @param partNumber Index of the part relative to others. + * @param uploadId Identifier from {@link #initialize(Path)}. + * @param lengthInBytes Target length to read from the stream. + * @return unique PartHandle identifier for the uploaded part. + * @throws IOException + */ + public abstract PartHandle putPart(Path filePath, InputStream inputStream, + int partNumber, UploadHandle uploadId, long lengthInBytes) + throws IOException; + + /** + * Complete a multipart upload. + * @param filePath Target path for upload (same as {@link #initialize(Path)}. + * @param handles Identifiers with associated part numbers from + * {@link #putPart(Path, InputStream, int, UploadHandle, long)}. + * Depending on the backend, the list order may be significant. + * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * @return unique PathHandle identifier for the uploaded file. + * @throws IOException + */ + public abstract PathHandle complete(Path filePath, + List> handles, UploadHandle multipartUploadId) + throws IOException; + + /** + * Aborts a multipart upload. + * @param filePath Target path for upload (same as {@link #initialize(Path)}. + * @param multipartuploadId Identifier from {@link #initialize(Path)}. + * @throws IOException + */ + public abstract void abort(Path filePath, UploadHandle multipartuploadId) + throws IOException; + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java new file mode 100644 index 0000000000..b0fa798ee2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.conf.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Iterator; +import java.util.ServiceLoader; + +/** + * {@link ServiceLoader}-driven uploader API for storage services supporting + * multipart uploads. + */ +public abstract class MultipartUploaderFactory { + public static final Logger LOG = + LoggerFactory.getLogger(MultipartUploaderFactory.class); + + /** + * Multipart Uploaders listed as services. + */ + private static ServiceLoader serviceLoader = + ServiceLoader.load(MultipartUploaderFactory.class, + MultipartUploaderFactory.class.getClassLoader()); + + // Iterate through the serviceLoader to avoid lazy loading. + // Lazy loading would require synchronization in concurrent use cases. + static { + Iterator iterServices = serviceLoader.iterator(); + while (iterServices.hasNext()) { + iterServices.next(); + } + } + + public static MultipartUploader get(FileSystem fs, Configuration conf) + throws IOException { + MultipartUploader mpu = null; + for (MultipartUploaderFactory factory : serviceLoader) { + mpu = factory.createMultipartUploader(fs, conf); + if (mpu != null) { + break; + } + } + return mpu; + } + + protected abstract MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index 126e754731..5e932864c8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -55,6 +55,9 @@ public static ChecksumParam checksumParam( ChecksumOpt csumOpt) { return new ChecksumParam(csumOpt); } + public static Progress progress(Progressable prog) { + return new Progress(prog); + } public static Perms perms(FsPermission perm) { return new Perms(perm); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java new file mode 100644 index 0000000000..df70b746cc --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.Serializable; +import java.nio.ByteBuffer; + +/** + * Opaque, serializable reference to an part id for multipart uploads. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface PartHandle extends Serializable { + /** + * @return Serialized from in bytes. + */ + default byte[] toByteArray() { + ByteBuffer bb = bytes(); + byte[] ret = new byte[bb.remaining()]; + bb.get(ret); + return ret; + } + + ByteBuffer bytes(); + + @Override + boolean equals(Object other); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 252b3cca79..b6244d6a36 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -27,7 +27,7 @@ import java.util.regex.Pattern; import org.apache.avro.reflect.Stringable; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index c0f81997b8..bd003ae90a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -40,6 +40,7 @@ import java.nio.file.attribute.FileTime; import java.util.Arrays; import java.util.EnumSet; +import java.util.Optional; import java.util.StringTokenizer; import org.apache.hadoop.classification.InterfaceAudience; @@ -212,7 +213,19 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException { return new FSDataInputStream(new BufferedFSInputStream( new LocalFSFileInputStream(f), bufferSize)); } - + + @Override + public FSDataInputStream open(PathHandle fd, int bufferSize) + throws IOException { + if (!(fd instanceof LocalFileSystemPathHandle)) { + fd = new LocalFileSystemPathHandle(fd.bytes()); + } + LocalFileSystemPathHandle id = (LocalFileSystemPathHandle) fd; + id.verify(getFileStatus(new Path(id.getPath()))); + return new FSDataInputStream(new BufferedFSInputStream( + new LocalFSFileInputStream(new Path(id.getPath())), bufferSize)); + } + /********************************************************* * For create()'s FSOutputStream. *********************************************************/ @@ -246,7 +259,7 @@ private LocalFSFileOutputStream(Path f, boolean append, } } } - + /* * Just forward to the fos */ @@ -350,6 +363,18 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, return out; } + @Override + public void concat(final Path trg, final Path [] psrcs) throws IOException { + final int bufferSize = 4096; + try(FSDataOutputStream out = create(trg)) { + for (Path src : psrcs) { + try(FSDataInputStream in = open(src)) { + IOUtils.copyBytes(in, out, bufferSize, false); + } + } + } + } + @Override public boolean rename(Path src, Path dst) throws IOException { // Attempt rename using Java API. @@ -863,6 +888,38 @@ public void setTimes(Path p, long mtime, long atime) throws IOException { } } + /** + * Hook to implement support for {@link PathHandle} operations. + * @param stat Referent in the target FileSystem + * @param opts Constraints that determine the validity of the + * {@link PathHandle} reference. + */ + protected PathHandle createPathHandle(FileStatus stat, + Options.HandleOpt... opts) { + if (stat.isDirectory() || stat.isSymlink()) { + throw new IllegalArgumentException("PathHandle only available for files"); + } + String authority = stat.getPath().toUri().getAuthority(); + if (authority != null && !authority.equals("file://")) { + throw new IllegalArgumentException("Wrong FileSystem: " + stat.getPath()); + } + Options.HandleOpt.Data data = + Options.HandleOpt.getOpt(Options.HandleOpt.Data.class, opts) + .orElse(Options.HandleOpt.changed(false)); + Options.HandleOpt.Location loc = + Options.HandleOpt.getOpt(Options.HandleOpt.Location.class, opts) + .orElse(Options.HandleOpt.moved(false)); + if (loc.allowChange()) { + throw new UnsupportedOperationException("Tracking file movement in " + + "basic FileSystem is not supported"); + } + final Path p = stat.getPath(); + final Optional mtime = !data.allowChange() + ? Optional.of(stat.getModificationTime()) + : Optional.empty(); + return new LocalFileSystemPathHandle(p.toString(), mtime); + } + @Override public boolean supportsSymlinks() { return true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java new file mode 100644 index 0000000000..5606a80dec --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * MultipartUploader for a given file system name/scheme is not supported. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class UnsupportedMultipartUploaderException extends IOException { + private static final long serialVersionUID = 1L; + + /** + * Constructs exception with the specified detail message. + * + * @param message exception message. + */ + public UnsupportedMultipartUploaderException(final String message) { + super(message); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java new file mode 100644 index 0000000000..143b4d1584 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.Serializable; +import java.nio.ByteBuffer; + +/** + * Opaque, serializable reference to an uploadId for multipart uploads. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface UploadHandle extends Serializable { + + /** + * @return Serialized from in bytes. + */ + default byte[] toByteArray() { + ByteBuffer bb = bytes(); + byte[] ret = new byte[bb.remaining()]; + bb.get(ret); + return ret; + } + + ByteBuffer bytes(); + + @Override + boolean equals(Object other); + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java index 8f6fc4d570..011e489df2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java @@ -23,7 +23,7 @@ import java.util.LinkedList; import java.util.List; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java index 9dd7771fd5..bbedf2a2dc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java @@ -18,7 +18,7 @@ package org.apache.hadoop.io; import com.google.common.collect.ComparisonChain; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import java.nio.ByteBuffer; import java.util.Map; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java index a2903f89b9..5af6602b87 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java @@ -26,7 +26,6 @@ import org.slf4j.LoggerFactory; import sun.misc.Unsafe; -import com.google.common.primitives.Longs; import com.google.common.primitives.UnsignedBytes; /** @@ -195,52 +194,43 @@ public int compareTo(byte[] buffer1, int offset1, int length1, length1 == length2) { return 0; } + final int stride = 8; int minLength = Math.min(length1, length2); - int minWords = minLength / Longs.BYTES; + int strideLimit = minLength & ~(stride - 1); int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET; int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET; + int i; /* * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a * time is no slower than comparing 4 bytes at a time even on 32-bit. * On the other hand, it is substantially faster on 64-bit. */ - for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) { + for (i = 0; i < strideLimit; i += stride) { long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i); long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i); - long diff = lw ^ rw; - if (diff != 0) { + if (lw != rw) { if (!littleEndian) { return lessThanUnsigned(lw, rw) ? -1 : 1; } - // Use binary search - int n = 0; - int y; - int x = (int) diff; - if (x == 0) { - x = (int) (diff >>> 32); - n = 32; - } - - y = x << 16; - if (y == 0) { - n += 16; - } else { - x = y; - } - - y = x << 8; - if (y == 0) { - n += 8; - } - return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL)); + /* + * We want to compare only the first index where left[index] != + * right[index]. This corresponds to the least significant nonzero + * byte in lw ^ rw, since lw and rw are little-endian. + * Long.numberOfTrailingZeros(diff) tells us the least significant + * nonzero bit, and zeroing out the first three bits of L.nTZ gives + * us the shift to get that least significant nonzero byte. This + * comparison logic is based on UnsignedBytes from Guava v21 + */ + int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7; + return ((int) ((lw >>> n) & 0xFF)) - ((int) ((rw >>> n) & 0xFF)); } } // The epilogue to cover the last (minLength % 8) elements. - for (int i = minWords * Longs.BYTES; i < minLength; i++) { + for (; i < minLength; i++) { int result = UnsignedBytes.compare( buffer1[offset1 + i], buffer2[offset2 + i]); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java index f008e24d2f..0f95058afc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java @@ -22,8 +22,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java index 3d6867aec4..ec317eee4d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java @@ -46,7 +46,7 @@ public final class ErasureCodeNative { loadLibrary(); } catch (Throwable t) { problem = "Loading ISA-L failed: " + t.getMessage(); - LOG.error("Loading ISA-L failed", t); + LOG.warn(problem); } LOADING_FAILURE_REASON = problem; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java index 412634462a..4d820c271a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.io.FileDescriptor; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java index bdfa471f53..b156d1fe64 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.ipc; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 0a00ca73d9..f12ecb6462 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -39,7 +39,7 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.AtomicDoubleArray; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.metrics2.MetricsCollector; @@ -429,7 +429,7 @@ private void decayCurrentCounts() { updateAverageResponseTime(true); } catch (Exception ex) { LOG.error("decayCurrentCounts exception: " + - ExceptionUtils.getFullStackTrace(ex)); + ExceptionUtils.getStackTrace(ex)); throw ex; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java index 6d9ea3e72e..3a8c83dea7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java @@ -32,7 +32,7 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; import org.apache.hadoop.metrics2.util.MBeans; @@ -286,7 +286,7 @@ public int size() { */ @Override public Iterator iterator() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java index ce6fbe1d82..1d62c0a29f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.codehaus.jackson.map.ObjectMapper; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java index 027450cb65..976f16bedd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java @@ -37,10 +37,8 @@ import org.apache.commons.configuration2.Configuration; import org.apache.commons.configuration2.PropertiesConfiguration; import org.apache.commons.configuration2.SubsetConfiguration; -import org.apache.commons.configuration2.builder.fluent.Configurations; -import org.apache.commons.configuration2.builder.fluent.Parameters; -import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler; import org.apache.commons.configuration2.ex.ConfigurationException; +import org.apache.commons.configuration2.io.FileHandler; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsPlugin; import org.apache.hadoop.metrics2.filter.GlobFilter; @@ -112,12 +110,11 @@ static MetricsConfig create(String prefix, String... fileNames) { static MetricsConfig loadFirst(String prefix, String... fileNames) { for (String fname : fileNames) { try { - Configuration cf = new Configurations().propertiesBuilder(fname) - .configure(new Parameters().properties() - .setFileName(fname) - .setListDelimiterHandler(new DefaultListDelimiterHandler(','))) - .getConfiguration() - .interpolatedConfiguration(); + PropertiesConfiguration pcf = new PropertiesConfiguration(); + FileHandler fh = new FileHandler(pcf); + fh.setFileName(fname); + fh.load(); + Configuration cf = pcf.interpolatedConfiguration(); LOG.info("Loaded properties from {}", fname); if (LOG.isDebugEnabled()) { LOG.debug("Properties: {}", toString(cf)); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java index 3d7a90e7ee..9b54adcb43 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java @@ -21,7 +21,7 @@ import java.lang.reflect.Method; import static com.google.common.base.Preconditions.*; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java index b2042e7a12..a3ca98d040 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java @@ -21,7 +21,7 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java index cc32975513..6b30618475 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java @@ -26,7 +26,7 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java index 053cb5535c..22c288a3b1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java @@ -32,7 +32,7 @@ import java.util.function.Function; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java index 92fe3d1496..5ef31785a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java index 0f6e9a9172..92ac9529be 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java @@ -37,7 +37,7 @@ import java.util.regex.Pattern; import org.apache.commons.configuration2.SubsetConfiguration; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java index ead9a7430b..45759df6ad 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.Map; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java index ac118c0517..9693220438 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java @@ -28,7 +28,7 @@ import java.nio.channels.ReadableByteChannel; import java.nio.ByteBuffer; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.CloseableReferenceCount; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index c7af97f60a..e36399ff96 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -32,7 +32,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.util.NativeCodeLoader; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 94698d8446..31f4398055 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -18,7 +18,6 @@ package org.apache.hadoop.security; import java.io.IOException; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.StringTokenizer; @@ -26,7 +25,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -52,7 +51,8 @@ public class ShellBasedUnixGroupsMapping extends Configured protected static final Logger LOG = LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class); - private long timeout = 0L; + private long timeout = CommonConfigurationKeys. + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT; private static final List EMPTY_GROUPS = new LinkedList<>(); @Override @@ -61,10 +61,10 @@ public void setConf(Configuration conf) { if (conf != null) { timeout = conf.getTimeDuration( CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS, + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY, CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT, - TimeUnit.SECONDS); + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 3872810748..29b9fea424 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -831,7 +831,9 @@ private long getRefreshTime(KerberosTicket tgt) { return start + (long) ((end - start) * TICKET_RENEW_WINDOW); } - private boolean shouldRelogin() { + @InterfaceAudience.Private + @InterfaceStability.Unstable + public boolean shouldRelogin() { return hasKerberosCredentials() && isHadoopLogin(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java index 608512155b..0a00d79104 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java @@ -27,7 +27,7 @@ import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.tools.CommandShell; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java index 34d9fe2b70..02c168f7b6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java @@ -34,7 +34,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import com.google.common.annotations.VisibleForTesting; import java.util.stream.Collectors; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java index d36ad9bf67..f154f2d816 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java @@ -26,7 +26,7 @@ import java.util.Date; import java.util.ServiceLoader; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java index 617773b34d..0ae2af35bf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.security.token.delegation.web; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.net.NetUtils; @@ -31,6 +29,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,9 +55,6 @@ public abstract class DelegationTokenAuthenticator implements Authenticator { private static final String CONTENT_TYPE = "Content-Type"; private static final String APPLICATION_JSON_MIME = "application/json"; - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); - private static final String HTTP_GET = "GET"; private static final String HTTP_PUT = "PUT"; @@ -328,7 +324,7 @@ private Map doDelegationTokenOperation(URL url, if (contentType != null && contentType.contains(APPLICATION_JSON_MIME)) { try { - ret = READER.readValue(conn.getInputStream()); + ret = JsonSerialization.mapReader().readValue(conn.getInputStream()); } catch (Exception ex) { throw new AuthenticationException(String.format( "'%s' did not handle the '%s' delegation token operation: %s", diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java index bc2e2d49d7..85015fbe30 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java @@ -20,8 +20,7 @@ import java.util.ArrayList; import java.util.LinkedList; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.WordUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; /** @@ -103,7 +102,8 @@ String[] getRow(int idx) { // Line-wrap if it's too long String[] lines = new String[] {raw}; if (wrap) { - lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n"); + lines = org.apache.hadoop.util.StringUtils.wrap(lines[0], wrapWidth, + "\n", true).split("\n"); } for (int i=0; i jsonResponse = new LinkedHashMap(); jsonResponse.put(ERROR_JSON, json); Writer writer = response.getWriter(); - WRITER.writeValue(writer, jsonResponse); + JsonSerialization.writer().writeValue(writer, jsonResponse); writer.flush(); } @@ -150,7 +142,7 @@ public static void validateResponse(HttpURLConnection conn, InputStream es = null; try { es = conn.getErrorStream(); - Map json = READER.readValue(es); + Map json = JsonSerialization.mapReader().readValue(es); json = (Map) json.get(ERROR_JSON); String exClass = (String) json.get(ERROR_CLASSNAME_JSON); String exMsg = (String) json.get(ERROR_MESSAGE_JSON); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java index 86c4df666e..cbc8560a40 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java @@ -25,14 +25,18 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.Map; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.base.Preconditions; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,6 +69,26 @@ public class JsonSerialization { private final Class classType; private final ObjectMapper mapper; + private static final ObjectWriter WRITER = + new ObjectMapper().writerWithDefaultPrettyPrinter(); + + private static final ObjectReader MAP_READER = + new ObjectMapper().readerFor(Map.class); + + /** + * @return an ObjectWriter which pretty-prints its output + */ + public static ObjectWriter writer() { + return WRITER; + } + + /** + * @return an ObjectReader which returns simple Maps. + */ + public static ObjectReader mapReader() { + return MAP_READER; + } + /** * Create an instance bound to a specific type. * @param classType class to marshall diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 0b76f0df2a..46a0fccd41 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -1191,7 +1191,7 @@ public ShellCommandExecutor(String[] execString, File dir, /** * Returns the timeout value set for the executor's sub-commands. - * @return The timeout value in seconds + * @return The timeout value in milliseconds */ @VisibleForTesting public long getTimeoutInterval() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 33a2010d6f..3db805fa4f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -35,7 +35,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -987,7 +987,7 @@ public static String camelize(String s) { String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR, '_'); for (String word : words) - sb.append(org.apache.commons.lang.StringUtils.capitalize(word)); + sb.append(org.apache.commons.lang3.StringUtils.capitalize(word)); return sb.toString(); } @@ -1183,4 +1183,64 @@ public static boolean isAlpha(String str) { return true; } + /** + * Same as WordUtils#wrap in commons-lang 2.6. Unlike commons-lang3, leading + * spaces on the first line are NOT stripped. + * + * @param str the String to be word wrapped, may be null + * @param wrapLength the column to wrap the words at, less than 1 is treated + * as 1 + * @param newLineStr the string to insert for a new line, + * null uses the system property line separator + * @param wrapLongWords true if long words (such as URLs) should be wrapped + * @return a line with newlines inserted, null if null input + */ + public static String wrap(String str, int wrapLength, String newLineStr, + boolean wrapLongWords) { + if(str == null) { + return null; + } else { + if(newLineStr == null) { + newLineStr = System.lineSeparator(); + } + + if(wrapLength < 1) { + wrapLength = 1; + } + + int inputLineLength = str.length(); + int offset = 0; + StringBuffer wrappedLine = new StringBuffer(inputLineLength + 32); + + while(inputLineLength - offset > wrapLength) { + if(str.charAt(offset) == 32) { + ++offset; + } else { + int spaceToWrapAt = str.lastIndexOf(32, wrapLength + offset); + if(spaceToWrapAt >= offset) { + wrappedLine.append(str.substring(offset, spaceToWrapAt)); + wrappedLine.append(newLineStr); + offset = spaceToWrapAt + 1; + } else if(wrapLongWords) { + wrappedLine.append(str.substring(offset, wrapLength + offset)); + wrappedLine.append(newLineStr); + offset += wrapLength; + } else { + spaceToWrapAt = str.indexOf(32, wrapLength + offset); + if(spaceToWrapAt >= 0) { + wrappedLine.append(str.substring(offset, spaceToWrapAt)); + wrappedLine.append(newLineStr); + offset = spaceToWrapAt + 1; + } else { + wrappedLine.append(str.substring(offset)); + offset = inputLineLength; + } + } + } + } + + wrappedLine.append(str.substring(offset)); + return wrappedLine.toString(); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java index 7fd19907fd..2c2aca3a6b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java @@ -216,6 +216,21 @@ private void readProcMemInfoFile() { readProcMemInfoFile(false); } + /** + * + * Wrapper for Long.parseLong() that returns zero if the value is + * invalid. Under some circumstances, swapFree in /proc/meminfo can + * go negative, reported as a very large decimal value. + */ + private long safeParseLong(String strVal) { + long parsedVal; + try { + parsedVal = Long.parseLong(strVal); + } catch (NumberFormatException nfe) { + parsedVal = 0; + } + return parsedVal; + } /** * Read /proc/meminfo, parse and compute memory information. * @param readAgain if false, read only on the first time @@ -252,9 +267,9 @@ private void readProcMemInfoFile(boolean readAgain) { } else if (mat.group(1).equals(SWAPTOTAL_STRING)) { swapSize = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(MEMFREE_STRING)) { - ramSizeFree = Long.parseLong(mat.group(2)); + ramSizeFree = safeParseLong(mat.group(2)); } else if (mat.group(1).equals(SWAPFREE_STRING)) { - swapSizeFree = Long.parseLong(mat.group(2)); + swapSizeFree = safeParseLong(mat.group(2)); } else if (mat.group(1).equals(INACTIVE_STRING)) { inactiveSize = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(INACTIVEFILE_STRING)) { diff --git a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto index 5b8c45d0ad..c3b768ab67 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto @@ -68,3 +68,11 @@ message FileStatusProto { optional bytes ec_data = 17; optional uint32 flags = 18 [default = 0]; } + +/** + * Placeholder type for consistent basic FileSystem operations. + */ +message LocalFileSystemPathHandleProto { + optional uint64 mtime = 1; + optional string path = 2; +} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory new file mode 100644 index 0000000000..f0054fedb8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.hadoop.fs.FileSystemMultipartUploader$Factory diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index e865bf1d93..2361626c3f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -62,7 +62,7 @@ import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java index 7f27d7d51e..152159b3f3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java @@ -18,7 +18,7 @@ package org.apache.hadoop.conf; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java index 6c2e5b88bc..2ea45231a1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.Arrays; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.junit.Assume; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java new file mode 100644 index 0000000000..f132089a9e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.tuple.Pair; + +import org.junit.Test; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public abstract class AbstractSystemMultipartUploaderTest { + + abstract FileSystem getFS() throws IOException; + + abstract Path getBaseTestPath(); + + @Test + public void testMultipartUpload() throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + List> partHandles = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= 100; ++i) { + String contents = "ThisIsPart" + i + "\n"; + sb.append(contents); + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + partHandles.add(Pair.of(i, partHandle)); + } + PathHandle fd = mpu.complete(file, partHandles, uploadHandle); + byte[] fdData = IOUtils.toByteArray(fs.open(fd)); + byte[] fileData = IOUtils.toByteArray(fs.open(file)); + String readString = new String(fdData); + assertEquals(sb.toString(), readString); + assertArrayEquals(fdData, fileData); + } + + @Test + public void testMultipartUploadReverseOrder() throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + List> partHandles = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= 100; ++i) { + String contents = "ThisIsPart" + i + "\n"; + sb.append(contents); + } + for (int i = 100; i > 0; --i) { + String contents = "ThisIsPart" + i + "\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + partHandles.add(Pair.of(i, partHandle)); + } + PathHandle fd = mpu.complete(file, partHandles, uploadHandle); + byte[] fdData = IOUtils.toByteArray(fs.open(fd)); + byte[] fileData = IOUtils.toByteArray(fs.open(file)); + String readString = new String(fdData); + assertEquals(sb.toString(), readString); + assertArrayEquals(fdData, fileData); + } + + @Test + public void testMultipartUploadReverseOrderNoNContiguousPartNumbers() + throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + List> partHandles = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + for (int i = 2; i <= 200; i += 2) { + String contents = "ThisIsPart" + i + "\n"; + sb.append(contents); + } + for (int i = 200; i > 0; i -= 2) { + String contents = "ThisIsPart" + i + "\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + partHandles.add(Pair.of(i, partHandle)); + } + PathHandle fd = mpu.complete(file, partHandles, uploadHandle); + byte[] fdData = IOUtils.toByteArray(fs.open(fd)); + byte[] fileData = IOUtils.toByteArray(fs.open(file)); + String readString = new String(fdData); + assertEquals(sb.toString(), readString); + assertArrayEquals(fdData, fileData); + } + + @Test + public void testMultipartUploadAbort() throws Exception { + FileSystem fs = getFS(); + Path file = new Path(getBaseTestPath(), "some-file"); + MultipartUploader mpu = MultipartUploaderFactory.get(fs, null); + UploadHandle uploadHandle = mpu.initialize(file); + for (int i = 100; i >= 50; --i) { + String contents = "ThisIsPart" + i + "\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len); + } + mpu.abort(file, uploadHandle); + + String contents = "ThisIsPart49\n"; + int len = contents.getBytes().length; + InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + + try { + mpu.putPart(file, is, 49, uploadHandle, len); + fail("putPart should have thrown an exception"); + } catch (IOException ok) { + // ignore + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java index da071050eb..8cbca8e815 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java index 35ec4ff6b6..62ecd9f13a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java @@ -810,7 +810,49 @@ public void testCreateFlagAppendCreateOverwrite() throws IOException { fc.create(p, EnumSet.of(CREATE, APPEND, OVERWRITE)); Assert.fail("Excepted exception not thrown"); } - + + @Test + public void testBuilderCreateNonExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateNonExistingFile"); + FSDataOutputStream out = fc.create(p).build(); + writeData(fc, p, out, data, data.length); + } + + @Test + public void testBuilderCreateExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateExistingFile"); + createFile(p); + FSDataOutputStream out = fc.create(p).overwrite(true).build(); + writeData(fc, p, out, data, data.length); + } + + @Test + public void testBuilderCreateAppendNonExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateAppendNonExistingFile"); + FSDataOutputStream out = fc.create(p).append().build(); + writeData(fc, p, out, data, data.length); + } + + @Test + public void testBuilderCreateAppendExistingFile() throws IOException { + Path p = getTestRootPath(fc, "test/testBuilderCreateAppendExistingFile"); + createFile(p); + FSDataOutputStream out = fc.create(p).append().build(); + writeData(fc, p, out, data, 2 * data.length); + } + + @Test + public void testBuilderCreateRecursive() throws IOException { + Path p = getTestRootPath(fc, "test/parent/no/exist/file1"); + try (FSDataOutputStream out = fc.create(p).build()) { + fail("Should throw FileNotFoundException on non-exist directory"); + } catch (FileNotFoundException e) { + } + + FSDataOutputStream out = fc.create(p).recursive().build(); + writeData(fc, p, out, data, data.length); + } + private static void writeData(FileContext fc, Path p, FSDataOutputStream out, byte[] data, long expectedLen) throws IOException { out.write(data, 0, data.length); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java index 1f37f74e71..b5307a4e27 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java @@ -22,7 +22,6 @@ import java.io.FileNotFoundException; import java.util.EnumSet; -import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.CreateOpts.BlockSize; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java index 3def5d5388..6b9a34c3b3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java index 597eb93b58..fa682649a0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs; -import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.fs.StorageStatistics.LongStatistic; import org.junit.Before; @@ -67,15 +67,15 @@ public class TestFileSystemStorageStatistics { @Before public void setup() { - statistics.incrementBytesRead(RandomUtils.nextInt(100)); - statistics.incrementBytesWritten(RandomUtils.nextInt(100)); - statistics.incrementLargeReadOps(RandomUtils.nextInt(100)); - statistics.incrementWriteOps(RandomUtils.nextInt(100)); + statistics.incrementBytesRead(RandomUtils.nextInt(0, 100)); + statistics.incrementBytesWritten(RandomUtils.nextInt(0, 100)); + statistics.incrementLargeReadOps(RandomUtils.nextInt(0, 100)); + statistics.incrementWriteOps(RandomUtils.nextInt(0, 100)); - statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100)); - statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100)); - statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100)); - statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100)); + statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(0, 100)); + statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(0, 100)); + statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(0, 100)); + statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(0, 100)); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 0e337b4736..d5622af085 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -689,17 +689,18 @@ public void testFSOutputStreamBuilder() throws Exception { // and permission FSDataOutputStreamBuilder builder = fileSys.createFile(path); - builder.build(); - Assert.assertEquals("Should be default block size", - builder.getBlockSize(), fileSys.getDefaultBlockSize()); - Assert.assertEquals("Should be default replication factor", - builder.getReplication(), fileSys.getDefaultReplication()); - Assert.assertEquals("Should be default buffer size", - builder.getBufferSize(), - fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, - IO_FILE_BUFFER_SIZE_DEFAULT)); - Assert.assertEquals("Should be default permission", - builder.getPermission(), FsPermission.getFileDefault()); + try (FSDataOutputStream stream = builder.build()) { + Assert.assertEquals("Should be default block size", + builder.getBlockSize(), fileSys.getDefaultBlockSize()); + Assert.assertEquals("Should be default replication factor", + builder.getReplication(), fileSys.getDefaultReplication()); + Assert.assertEquals("Should be default buffer size", + builder.getBufferSize(), + fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, + IO_FILE_BUFFER_SIZE_DEFAULT)); + Assert.assertEquals("Should be default permission", + builder.getPermission(), FsPermission.getFileDefault()); + } // Test set 0 to replication, block size and buffer size builder = fileSys.createFile(path); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java new file mode 100644 index 0000000000..21d01b6cdb --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.test.GenericTestUtils.getRandomizedTestDir; + +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.File; +import java.io.IOException; + +/** + * Test the FileSystemMultipartUploader on local file system. + */ +public class TestLocalFileSystemMultipartUploader + extends AbstractSystemMultipartUploaderTest { + + private static FileSystem fs; + private File tmp; + + @BeforeClass + public static void init() throws IOException { + fs = LocalFileSystem.getLocal(new Configuration()); + } + + @Before + public void setup() throws IOException { + tmp = getRandomizedTestDir(); + tmp.mkdirs(); + } + + @After + public void tearDown() throws IOException { + tmp.delete(); + } + + @Override + public FileSystem getFS() { + return fs; + } + + @Override + public Path getBaseTestPath() { + return new Path(tmp.getAbsolutePath()); + } + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java index fbe28c3c24..36cfa6ccda 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java @@ -123,6 +123,12 @@ public void testChanged() throws IOException { HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts) .orElseThrow(IllegalArgumentException::new); FileStatus stat = testFile(B1); + try { + // Temporary workaround while RawLocalFS supports only second precision + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new IOException(e); + } // modify the file by appending data appendFile(getFileSystem(), stat.getPath(), B2); byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java new file mode 100644 index 0000000000..3c088d278e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract.rawlocal; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.contract.localfs.LocalFSContract; +import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract; + +public class TestRawlocalContractPathHandle + extends AbstractContractPathHandleTest { + + public TestRawlocalContractPathHandle(String testname, + Options.HandleOpt[] opts, boolean serialized) { + super(testname, opts, serialized); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new RawlocalFSContract(conf); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java index 8e60540126..e7f36fc850 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.fs.shell; -import org.apache.commons.lang.RandomStringUtils; -import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; @@ -56,11 +56,11 @@ public static int initialize(Path dir) throws Exception { fs.mkdirs(toDirPath); int numTotalFiles = 0; - int numDirs = RandomUtils.nextInt(5); + int numDirs = RandomUtils.nextInt(0, 5); for (int dirCount = 0; dirCount < numDirs; ++dirCount) { Path subDirPath = new Path(fromDirPath, "subdir" + dirCount); fs.mkdirs(subDirPath); - int numFiles = RandomUtils.nextInt(10); + int numFiles = RandomUtils.nextInt(0, 10); for (int fileCount = 0; fileCount < numFiles; ++fileCount) { numTotalFiles++; Path subFile = new Path(subDirPath, "file" + fileCount); @@ -115,7 +115,7 @@ public void testCopyFromLocalWithThreads() throws Exception { Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4)); int numFiles = TestCopyFromLocal.initialize(dir); int maxThreads = Runtime.getRuntime().availableProcessors() * 2; - int randThreads = RandomUtils.nextInt(maxThreads - 1) + 1; + int randThreads = RandomUtils.nextInt(0, maxThreads - 1) + 1; String numThreads = Integer.toString(randThreads); run(new TestMultiThreadedCopy(randThreads, randThreads == 1 ? 0 : numFiles), "-t", numThreads, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java index fbe3fb8118..17be5874c5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java @@ -26,7 +26,7 @@ import org.junit.Assume; import org.junit.Before; import org.junit.Test; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index 41ae910cba..5fbd957312 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -20,7 +20,7 @@ import com.google.protobuf.BlockingService; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.metrics.RpcMetrics; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 0b463a5130..520042017d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ipc; import com.google.protobuf.ServiceException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java index 30176f202c..62bd1b142e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java @@ -38,7 +38,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.KerberosAuthException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index 28b3cbe3fa..c0d204f86a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -43,7 +43,7 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket.DomainChannel; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java index d3c9538641..8c1339d38d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java @@ -173,6 +173,37 @@ public void testGetNumericGroupsResolvable() throws Exception { assertTrue(groups.contains("zzz")); } + public long getTimeoutInterval(String timeout) { + Configuration conf = new Configuration(); + String userName = "foobarnonexistinguser"; + conf.set( + CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY, + timeout); + TestDelayedGroupCommand mapping = ReflectionUtils + .newInstance(TestDelayedGroupCommand.class, conf); + ShellCommandExecutor executor = mapping.createGroupExecutor(userName); + return executor.getTimeoutInterval(); + } + + @Test + public void testShellTimeOutConf() { + + // Test a 1 second max-runtime timeout + assertEquals( + "Expected the group names executor to carry the configured timeout", + 1000L, getTimeoutInterval("1s")); + + // Test a 1 minute max-runtime timeout + assertEquals( + "Expected the group names executor to carry the configured timeout", + 60000L, getTimeoutInterval("1m")); + + // Test a 1 millisecond max-runtime timeout + assertEquals( + "Expected the group names executor to carry the configured timeout", + 1L, getTimeoutInterval("1")); + } + private class TestGroupResolvable extends ShellBasedUnixGroupsMapping { /** @@ -222,7 +253,7 @@ public void testGetGroupsResolvable() throws Exception { private static class TestDelayedGroupCommand extends ShellBasedUnixGroupsMapping { - private Long timeoutSecs = 2L; + private Long timeoutSecs = 1L; TestDelayedGroupCommand() { super(); @@ -249,12 +280,12 @@ public void testFiniteGroupResolutionTime() throws Exception { String userName = "foobarnonexistinguser"; String commandTimeoutMessage = "ran longer than the configured timeout limit"; - long testTimeout = 1L; + long testTimeout = 500L; // Test a 1 second max-runtime timeout conf.setLong( CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS, + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY, testTimeout); TestDelayedGroupCommand mapping = @@ -306,7 +337,7 @@ public void testFiniteGroupResolutionTime() throws Exception { conf = new Configuration(); long defaultTimeout = CommonConfigurationKeys. - HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT; + HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT; mapping = ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java index 9357f48df3..0f8f1e45c9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java index 127b0b3827..d7c86316ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java @@ -18,7 +18,7 @@ package org.apache.hadoop.service.launcher; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceOperations; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 53eb2be3bb..3e9da1b45f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -40,7 +40,7 @@ import java.util.regex.Pattern; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.fs.FileUtil; @@ -344,7 +344,7 @@ public static void assertExceptionContains(String expectedText, throw new AssertionError(E_NULL_THROWABLE_STRING, t); } if (expectedText != null && !msg.contains(expectedText)) { - String prefix = org.apache.commons.lang.StringUtils.isEmpty(message) + String prefix = org.apache.commons.lang3.StringUtils.isEmpty(message) ? "" : (message + ": "); throw new AssertionError( String.format("%s Expected to find '%s' %s: %s", diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java index 2aa5e95b04..d53982363d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.util; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.slf4j.LoggerFactory; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java index a9fa4c64e9..b61cebc0a6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util; -import org.apache.commons.lang.SystemUtils; +import org.apache.commons.lang3.SystemUtils; import org.junit.Assert; import org.junit.Assume; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java index a646a41271..0ae5d3ce8c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java @@ -161,6 +161,36 @@ int readDiskBlockInformation(String diskName, int defSector) { "DirectMap2M: 2027520 kB\n" + "DirectMap1G: 132120576 kB\n"; + static final String MEMINFO_FORMAT3 = + "MemTotal: %d kB\n" + + "MemFree: %s kB\n" + + "Buffers: 138244 kB\n" + + "Cached: 947780 kB\n" + + "SwapCached: 142880 kB\n" + + "Active: 3229888 kB\n" + + "Inactive: %d kB\n" + + "SwapTotal: %d kB\n" + + "SwapFree: %s kB\n" + + "Dirty: 122012 kB\n" + + "Writeback: 0 kB\n" + + "AnonPages: 2710792 kB\n" + + "Mapped: 24740 kB\n" + + "Slab: 132528 kB\n" + + "SReclaimable: 105096 kB\n" + + "SUnreclaim: 27432 kB\n" + + "PageTables: 11448 kB\n" + + "NFS_Unstable: 0 kB\n" + + "Bounce: 0 kB\n" + + "CommitLimit: 4125904 kB\n" + + "Committed_AS: 4143556 kB\n" + + "VmallocTotal: 34359738367 kB\n" + + "VmallocUsed: 1632 kB\n" + + "VmallocChunk: 34359736375 kB\n" + + "HugePages_Total: %d\n" + + "HugePages_Free: 0\n" + + "HugePages_Rsvd: 0\n" + + "Hugepagesize: 2048 kB"; + static final String CPUINFO_FORMAT = "processor : %s\n" + "vendor_id : AuthenticAMD\n" + @@ -384,6 +414,36 @@ public void parsingProcMemFile2() throws IOException { (nrHugePages * 2048) + swapTotal)); } + /** + * Test parsing /proc/meminfo + * @throws IOException + */ + @Test + public void parsingProcMemFileWithBadValues() throws IOException { + long memTotal = 4058864L; + long memFree = 0L; // bad value should return 0 + long inactive = 567732L; + long swapTotal = 2096472L; + long swapFree = 0L; // bad value should return 0 + int nrHugePages = 10; + String badFreeValue = "18446744073709551596"; + File tempFile = new File(FAKE_MEMFILE); + tempFile.deleteOnExit(); + FileWriter fWriter = new FileWriter(FAKE_MEMFILE); + fWriter.write(String.format(MEMINFO_FORMAT3, + memTotal, badFreeValue, inactive, swapTotal, badFreeValue, nrHugePages)); + + fWriter.close(); + assertEquals(plugin.getAvailablePhysicalMemorySize(), + 1024L * (memFree + inactive)); + assertEquals(plugin.getAvailableVirtualMemorySize(), + 1024L * (memFree + inactive + swapFree)); + assertEquals(plugin.getPhysicalMemorySize(), + 1024L * (memTotal - (nrHugePages * 2048))); + assertEquals(plugin.getVirtualMemorySize(), + 1024L * (memTotal - (nrHugePages * 2048) + swapTotal)); + } + @Test public void testCoreCounts() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml index a0d1d21a94..8cbd4a0abc 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml @@ -122,4 +122,9 @@ true + + fs.contract.supports-content-check + true + + diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh b/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh new file mode 100644 index 0000000000..d7c7427b70 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +trap "echo SIGTERM trapped!" SIGTERM +trap "echo SIGINT trapped!" SIGINT + +echo "$$" > "$1" + +while true; do + sleep 1.3 +done diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java index f8265729d8..b9b8d9cee6 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import com.fasterxml.jackson.databind.ObjectMapper; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.http.JettyUtils; +import org.apache.hadoop.util.JsonSerialization; import javax.ws.rs.Produces; import javax.ws.rs.WebApplicationException; @@ -67,8 +66,7 @@ public void writeTo(Object obj, Class aClass, Type type, OutputStream outputStream) throws IOException, WebApplicationException { Writer writer = new OutputStreamWriter(outputStream, Charset .forName("UTF-8")); - ObjectMapper jsonMapper = new ObjectMapper(); - jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj); + JsonSerialization.writer().writeValue(writer, obj); } } diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index dfbf8184f8..5de6759ce9 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -265,6 +265,11 @@ hadoop-ozone-docs provided
+ + org.apache.hadoop + hadoop-ozone-filesystem + provided + diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml index faf420c7f5..512c649e21 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml @@ -16,18 +16,6 @@ version: "3" services: - namenode: - image: apache/hadoop-runner - hostname: namenode - volumes: - - ../../ozone:/opt/hadoop - ports: - - 9870:9870 - environment: - ENSURE_NAMENODE_DIR: /data/namenode - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/hdfs","namenode"] datanode: image: apache/hadoop-runner volumes: diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config index c693db0428..632f8701d2 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-config +++ b/hadoop-dist/src/main/compose/ozone/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True @@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml index fb7873bf88..3233c11641 100644 --- a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml @@ -16,19 +16,6 @@ version: "3" services: - namenode: - image: apache/hadoop-runner - hostname: namenode - volumes: - - ../../ozone:/opt/hadoop - - ./jmxpromo.jar:/opt/jmxpromo.jar - ports: - - 9870:9870 - environment: - ENSURE_NAMENODE_DIR: /data/namenode - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/hdfs","namenode"] datanode: image: apache/hadoop-runner volumes: diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config index e4f5485ac5..2be22a7792 100644 --- a/hadoop-dist/src/main/compose/ozoneperf/docker-config +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True @@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index b6b95eba06..438615fec5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -244,32 +244,6 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; - /** - * Don't start processing a pool if we have not had a minimum number of - * seconds from the last processing. - */ - public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL = - "ozone.scm.container.report.processing.interval"; - public static final String - OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s"; - - /** - * This determines the total number of pools to be processed in parallel. - */ - public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS = - "ozone.scm.max.nodepool.processing.threads"; - public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1; - /** - * These 2 settings control the number of threads in executor pool and time - * outs for thw container reports from all nodes. - */ - public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS = - "ozone.scm.max.container.report.threads"; - public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100; - public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT = - "ozone.scm.container.reports.wait.timeout"; - public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT = - "5m"; public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = "ozone.scm.block.deletion.max.retry"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java index 2c38d45728..ee05c8768a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.container.common.helpers; import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.PropertyAccessor; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; @@ -30,6 +31,7 @@ import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.Arrays; import java.util.Comparator; import static java.lang.Math.max; @@ -63,6 +65,13 @@ public class ContainerInfo private String owner; private long containerID; private long deleteTransactionId; + /** + * Allows you to maintain private data on ContainerInfo. This is not + * serialized via protobuf, just allows us to maintain some private data. + */ + @JsonIgnore + private byte[] data; + ContainerInfo( long containerID, HddsProtos.LifeCycleState state, @@ -295,6 +304,29 @@ public String toJsonString() throws IOException { return WRITER.writeValueAsString(this); } + /** + * Returns private data that is set on this containerInfo. + * + * @return blob, the user can interpret it any way they like. + */ + public byte[] getData() { + if (this.data != null) { + return Arrays.copyOf(this.data, this.data.length); + } else { + return null; + } + } + + /** + * Set private data on ContainerInfo object. + * + * @param data -- private data. + */ + public void setData(byte[] data) { + if (data != null) { + this.data = Arrays.copyOf(data, data.length); + } + } /** * Builder class for ContainerInfo. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java index 87408385ec..c5794f4c03 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java @@ -27,14 +27,14 @@ import com.fasterxml.jackson.databind.ser.FilterProvider; import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Map; +import java.util.TreeMap; import java.util.List; /** @@ -46,7 +46,7 @@ public class Pipeline { static { ObjectMapper mapper = new ObjectMapper(); - String[] ignorableFieldNames = {"data"}; + String[] ignorableFieldNames = {"leaderID", "datanodes"}; FilterProvider filters = new SimpleFilterProvider() .addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter .serializeAllExcept(ignorableFieldNames)); @@ -57,38 +57,66 @@ public class Pipeline { WRITER = mapper.writer(filters); } - private PipelineChannel pipelineChannel; - /** - * Allows you to maintain private data on pipelines. This is not serialized - * via protobuf, just allows us to maintain some private data. - */ @JsonIgnore - private byte[] data; + private String leaderID; + @JsonIgnore + private Map datanodes; + private HddsProtos.LifeCycleState lifeCycleState; + private HddsProtos.ReplicationType type; + private HddsProtos.ReplicationFactor factor; + private String name; + // TODO: change to long based id + //private long id; + /** * Constructs a new pipeline data structure. * - * @param pipelineChannel - transport information for this container + * @param leaderID - Leader datanode id + * @param lifeCycleState - Pipeline State + * @param replicationType - Replication protocol + * @param replicationFactor - replication count on datanodes + * @param name - pipelineName */ - public Pipeline(PipelineChannel pipelineChannel) { - this.pipelineChannel = pipelineChannel; - data = null; + public Pipeline(String leaderID, HddsProtos.LifeCycleState lifeCycleState, + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, String name) { + this.leaderID = leaderID; + this.lifeCycleState = lifeCycleState; + this.type = replicationType; + this.factor = replicationFactor; + this.name = name; + datanodes = new TreeMap<>(); } /** * Gets pipeline object from protobuf. * - * @param pipeline - ProtoBuf definition for the pipeline. + * @param pipelineProto - ProtoBuf definition for the pipeline. * @return Pipeline Object */ - public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) { - Preconditions.checkNotNull(pipeline); - PipelineChannel pipelineChannel = - PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel()); - return new Pipeline(pipelineChannel); + public static Pipeline getFromProtoBuf( + HddsProtos.Pipeline pipelineProto) { + Preconditions.checkNotNull(pipelineProto); + Pipeline pipeline = + new Pipeline(pipelineProto.getLeaderID(), + pipelineProto.getState(), + pipelineProto.getType(), + pipelineProto.getFactor(), + pipelineProto.getName()); + + for (HddsProtos.DatanodeDetailsProto dataID : + pipelineProto.getMembersList()) { + pipeline.addMember(DatanodeDetails.getFromProtoBuf(dataID)); + } + return pipeline; } + /** + * returns the replication count. + * @return Replication Factor + */ public HddsProtos.ReplicationFactor getFactor() { - return pipelineChannel.getFactor(); + return factor; } /** @@ -98,19 +126,34 @@ public HddsProtos.ReplicationFactor getFactor() { */ @JsonIgnore public DatanodeDetails getLeader() { - return pipelineChannel.getDatanodes().get(pipelineChannel.getLeaderID()); + return getDatanodes().get(leaderID); } + public void addMember(DatanodeDetails datanodeDetails) { + datanodes.put(datanodeDetails.getUuid().toString(), + datanodeDetails); + } + + public Map getDatanodes() { + return datanodes; + } /** * Returns the leader host. * * @return First Machine. */ public String getLeaderHost() { - return pipelineChannel.getDatanodes() - .get(pipelineChannel.getLeaderID()).getHostName(); + return getDatanodes() + .get(leaderID).getHostName(); } + /** + * + * @return lead + */ + public String getLeaderID() { + return leaderID; + } /** * Returns all machines that make up this pipeline. * @@ -118,7 +161,7 @@ public String getLeaderHost() { */ @JsonIgnore public List getMachines() { - return new ArrayList<>(pipelineChannel.getDatanodes().values()); + return new ArrayList<>(getDatanodes().values()); } /** @@ -128,7 +171,7 @@ public List getMachines() { */ public List getDatanodeHosts() { List dataHosts = new ArrayList<>(); - for (DatanodeDetails id : pipelineChannel.getDatanodes().values()) { + for (DatanodeDetails id :getDatanodes().values()) { dataHosts.add(id.getHostName()); } return dataHosts; @@ -143,46 +186,31 @@ public List getDatanodeHosts() { public HddsProtos.Pipeline getProtobufMessage() { HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder(); - builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage()); + for (DatanodeDetails datanode : datanodes.values()) { + builder.addMembers(datanode.getProtoBufMessage()); + } + builder.setLeaderID(leaderID); + + if (this.getLifeCycleState() != null) { + builder.setState(this.getLifeCycleState()); + } + if (this.getType() != null) { + builder.setType(this.getType()); + } + + if (this.getFactor() != null) { + builder.setFactor(this.getFactor()); + } return builder.build(); } - /** - * Returns private data that is set on this pipeline. - * - * @return blob, the user can interpret it any way they like. - */ - public byte[] getData() { - if (this.data != null) { - return Arrays.copyOf(this.data, this.data.length); - } else { - return null; - } - } - - @VisibleForTesting - public PipelineChannel getPipelineChannel() { - return pipelineChannel; - } - - /** - * Set private data on pipeline. - * - * @param data -- private data. - */ - public void setData(byte[] data) { - if (data != null) { - this.data = Arrays.copyOf(data, data.length); - } - } - /** * Gets the State of the pipeline. * * @return - LifeCycleStates. */ public HddsProtos.LifeCycleState getLifeCycleState() { - return pipelineChannel.getLifeCycleState(); + return lifeCycleState; } /** @@ -191,7 +219,7 @@ public HddsProtos.LifeCycleState getLifeCycleState() { * @return - Name of the pipeline */ public String getPipelineName() { - return pipelineChannel.getName(); + return name; } /** @@ -200,16 +228,16 @@ public String getPipelineName() { * @return type - Standalone, Ratis, Chained. */ public HddsProtos.ReplicationType getType() { - return pipelineChannel.getType(); + return type; } @Override public String toString() { final StringBuilder b = new StringBuilder(getClass().getSimpleName()) .append("["); - pipelineChannel.getDatanodes().keySet().stream() + getDatanodes().keySet().stream() .forEach(id -> b. - append(id.endsWith(pipelineChannel.getLeaderID()) ? "*" + id : id)); + append(id.endsWith(getLeaderID()) ? "*" + id : id)); b.append(" name:").append(getPipelineName()); if (getType() != null) { b.append(" type:").append(getType().toString()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java deleted file mode 100644 index 655751d737..0000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; - -import java.util.Map; -import java.util.TreeMap; - -/** - * PipelineChannel information for a {@link Pipeline}. - */ -public class PipelineChannel { - @JsonIgnore - private String leaderID; - @JsonIgnore - private Map datanodes; - private LifeCycleState lifeCycleState; - private ReplicationType type; - private ReplicationFactor factor; - private String name; - // TODO: change to long based id - //private long id; - - public PipelineChannel(String leaderID, LifeCycleState lifeCycleState, - ReplicationType replicationType, ReplicationFactor replicationFactor, - String name) { - this.leaderID = leaderID; - this.lifeCycleState = lifeCycleState; - this.type = replicationType; - this.factor = replicationFactor; - this.name = name; - datanodes = new TreeMap<>(); - } - - public String getLeaderID() { - return leaderID; - } - - public Map getDatanodes() { - return datanodes; - } - - public LifeCycleState getLifeCycleState() { - return lifeCycleState; - } - - public ReplicationType getType() { - return type; - } - - public ReplicationFactor getFactor() { - return factor; - } - - public String getName() { - return name; - } - - public void addMember(DatanodeDetails datanodeDetails) { - datanodes.put(datanodeDetails.getUuid().toString(), - datanodeDetails); - } - - @JsonIgnore - public HddsProtos.PipelineChannel getProtobufMessage() { - HddsProtos.PipelineChannel.Builder builder = - HddsProtos.PipelineChannel.newBuilder(); - for (DatanodeDetails datanode : datanodes.values()) { - builder.addMembers(datanode.getProtoBufMessage()); - } - builder.setLeaderID(leaderID); - - if (this.getLifeCycleState() != null) { - builder.setState(this.getLifeCycleState()); - } - if (this.getType() != null) { - builder.setType(this.getType()); - } - - if (this.getFactor() != null) { - builder.setFactor(this.getFactor()); - } - return builder.build(); - } - - public static PipelineChannel getFromProtoBuf( - HddsProtos.PipelineChannel transportProtos) { - Preconditions.checkNotNull(transportProtos); - PipelineChannel pipelineChannel = - new PipelineChannel(transportProtos.getLeaderID(), - transportProtos.getState(), - transportProtos.getType(), - transportProtos.getFactor(), - transportProtos.getName()); - - for (HddsProtos.DatanodeDetailsProto dataID : - transportProtos.getMembersList()) { - pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID)); - } - return pipelineChannel; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 3b774a5a6a..27aa6ee8f7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -96,7 +96,6 @@ public final class OzoneConsts { public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX; public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; public static final String BLOCK_DB = "block.db"; - public static final String NODEPOOL_DB = "nodepool.db"; public static final String OPEN_CONTAINERS_DB = "openContainers.db"; public static final String DELETED_BLOCK_DB = "deletedBlock.db"; public static final String KSM_DB_NAME = "ksm.db"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java index 431da64094..5718008b41 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java @@ -126,7 +126,7 @@ public synchronized void run() { try { // Collect task results BackgroundTaskResult result = serviceTimeout > 0 - ? taskResultFuture.get(serviceTimeout, TimeUnit.MILLISECONDS) + ? taskResultFuture.get(serviceTimeout, unit) : taskResultFuture.get(); if (LOG.isDebugEnabled()) { LOG.debug("task execution result size {}", result.getSize()); diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto index a9a703eb00..816efa7c25 100644 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ b/hadoop-hdds/common/src/main/proto/hdds.proto @@ -40,7 +40,7 @@ message Port { required uint32 value = 2; } -message PipelineChannel { +message Pipeline { required string leaderID = 1; repeated DatanodeDetailsProto members = 2; optional LifeCycleState state = 3 [default = OPEN]; @@ -49,12 +49,6 @@ message PipelineChannel { optional string name = 6; } -// A pipeline is composed of PipelineChannel (Ratis/StandAlone) that back a -// container. -message Pipeline { - required PipelineChannel pipelineChannel = 2; -} - message KeyValue { required string key = 1; optional string value = 2; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a3e4776beb..adad51206c 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -583,25 +583,6 @@ allocation. - - ozone.scm.container.report.processing.interval - 60s - OZONE, PERFORMANCE - Time interval for scm to process container reports - for a node pool. Scm handles node pool reports in a cyclic clock - manner, it fetches pools periodically with this time interval. - - - - ozone.scm.container.reports.wait.timeout - 300s - OZONE, PERFORMANCE, MANAGEMENT - Maximum time to wait in seconds for processing all container - reports from - a node pool. It determines the timeout for a - node pool report. - - ozone.scm.container.size.gb 5 @@ -804,17 +785,6 @@ The keytab file for Kerberos authentication in SCM. - - ozone.scm.max.container.report.threads - 100 - OZONE, PERFORMANCE - - Maximum number of threads to process container reports in scm. - Each container report from a data node is processed by scm in a worker - thread, fetched from a thread pool. This property is used to control the - maximum size of the thread pool. - - ozone.scm.max.hb.count.to.process 5000 @@ -826,14 +796,6 @@ for more info. - - ozone.scm.max.nodepool.processing.threads - 1 - OZONE, MANAGEMENT, PERFORMANCE - - Number of node pools to process in parallel. - - ozone.scm.names @@ -855,15 +817,6 @@ see ozone.scm.heartbeat.thread.interval before changing this value. - - ozone.scm.max.nodepool.processing.threads - 1 - OZONE, SCM - - Controls the number of node pools that can be processed in parallel by - Container Supervisor. - - ozone.trace.enabled false diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index fa4187a254..ddeec873bc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -25,9 +25,11 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine .DatanodeStateMachine; +import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; @@ -229,9 +231,18 @@ public static HddsDatanodeService createHddsDatanodeService( public static void main(String[] args) { try { + if (DFSUtil.parseHelpArgument(args, "Starts HDDS Datanode", System.out, false)) { + System.exit(0); + } + Configuration conf = new OzoneConfiguration(); + GenericOptionsParser hParser = new GenericOptionsParser(conf, args); + if (!hParser.isParseSuccessful()) { + GenericOptionsParser.printGenericCommandUsage(System.err); + System.exit(1); + } StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG); HddsDatanodeService hddsDatanodeService = - createHddsDatanodeService(new OzoneConfiguration()); + createHddsDatanodeService(conf); hddsDatanodeService.start(null); hddsDatanodeService.join(); } catch (Throwable e) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index b6a9bb92b1..245d76f0db 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -32,6 +32,8 @@ .CommandDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler .DeleteBlocksCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler + .ReplicateContainerCommandHandler; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; @@ -95,6 +97,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, .addHandler(new CloseContainerCommandHandler()) .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(), conf)) + .addHandler(new ReplicateContainerCommandHandler()) .setConnectionManager(connectionManager) .setContainer(container) .setContext(context) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java index 50dea0a146..52cf2e0123 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java @@ -83,10 +83,10 @@ public class BlockDeletingService extends BackgroundService{ private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10; public BlockDeletingService(ContainerManager containerManager, - long serviceInterval, long serviceTimeout, Configuration conf) { - super("BlockDeletingService", serviceInterval, - TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, - serviceTimeout); + long serviceInterval, long serviceTimeout, TimeUnit unit, + Configuration conf) { + super("BlockDeletingService", serviceInterval, unit, + BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); this.containerManager = containerManager; this.conf = conf; this.blockLimitPerTask = conf.getInt( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java new file mode 100644 index 0000000000..b4e83b7d40 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; +import org.apache.hadoop.ozone.container.common.statemachine + .SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.protocol.commands.SCMCommand; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Command handler to copy containers from sources. + */ +public class ReplicateContainerCommandHandler implements CommandHandler { + static final Logger LOG = + LoggerFactory.getLogger(ReplicateContainerCommandHandler.class); + + private int invocationCount; + + private long totalTime; + + @Override + public void handle(SCMCommand command, OzoneContainer container, + StateContext context, SCMConnectionManager connectionManager) { + LOG.warn("Replicate command is not yet handled"); + + } + + @Override + public SCMCommandProto.Type getCommandType() { + return Type.replicateContainerCommand; + } + + @Override + public int getInvocationCount() { + return this.invocationCount; + } + + @Override + public long getAverageRunTime() { + if (invocationCount > 0) { + return totalTime / invocationCount; + } + return 0; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index 1ee6375a56..260a245ceb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -39,6 +39,8 @@ import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -196,6 +198,16 @@ private void processResponse(SCMHeartbeatResponseProto response, } this.context.addCommand(closeContainer); break; + case replicateContainerCommand: + ReplicateContainerCommand replicateContainerCommand = + ReplicateContainerCommand.getFromProtobuf( + commandResponseProto.getReplicateContainerCommandProto()); + if (LOG.isDebugEnabled()) { + LOG.debug("Received SCM container replicate request for container {}", + replicateContainerCommand.getContainerID()); + } + this.context.addCommand(replicateContainerCommand); + break; default: throw new IllegalArgumentException("Unknown response : " + commandResponseProto.getCommandType().name()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java index 6809d57042..5d6fc0aa3f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.container.common.utils; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.InconsistentStorageStateException; import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java new file mode 100644 index 0000000000..e0a235122e --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.util.List; +import java.util.PriorityQueue; +import java.util.Queue; + +/** + * Priority queue to handle under-replicated and over replicated containers + * in ozone. ReplicationManager will consume these messages and decide + * accordingly. + */ +public class ReplicationQueue { + + private final Queue queue; + + ReplicationQueue() { + queue = new PriorityQueue<>(); + } + + public synchronized boolean add(ReplicationRequest repObj) { + if (this.queue.contains(repObj)) { + // Remove the earlier message and insert this one + this.queue.remove(repObj); + } + return this.queue.add(repObj); + } + + public synchronized boolean remove(ReplicationRequest repObj) { + return queue.remove(repObj); + } + + /** + * Retrieves, but does not remove, the head of this queue, + * or returns {@code null} if this queue is empty. + * + * @return the head of this queue, or {@code null} if this queue is empty + */ + public synchronized ReplicationRequest peek() { + return queue.peek(); + } + + /** + * Retrieves and removes the head of this queue, + * or returns {@code null} if this queue is empty. + * + * @return the head of this queue, or {@code null} if this queue is empty + */ + public synchronized ReplicationRequest poll() { + return queue.poll(); + } + + public synchronized boolean removeAll(List repObjs) { + return queue.removeAll(repObjs); + } + + public int size() { + return queue.size(); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java new file mode 100644 index 0000000000..a6ccce13e0 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.io.Serializable; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; + +/** + * Wrapper class for hdds replication queue. Implements its natural + * ordering for priority queue. + */ +public class ReplicationRequest implements Comparable, + Serializable { + private final long containerId; + private final short replicationCount; + private final short expecReplicationCount; + private final long timestamp; + + public ReplicationRequest(long containerId, short replicationCount, + long timestamp, short expecReplicationCount) { + this.containerId = containerId; + this.replicationCount = replicationCount; + this.timestamp = timestamp; + this.expecReplicationCount = expecReplicationCount; + } + + /** + * Compares this object with the specified object for order. Returns a + * negative integer, zero, or a positive integer as this object is less + * than, equal to, or greater than the specified object. + * @param o the object to be compared. + * @return a negative integer, zero, or a positive integer as this object + * is less than, equal to, or greater than the specified object. + * @throws NullPointerException if the specified object is null + * @throws ClassCastException if the specified object's type prevents it + * from being compared to this object. + */ + @Override + public int compareTo(ReplicationRequest o) { + if (o == null) { + return 1; + } + if (this == o) { + return 0; + } + int retVal = Integer + .compare(getReplicationCount() - getExpecReplicationCount(), + o.getReplicationCount() - o.getExpecReplicationCount()); + if (retVal != 0) { + return retVal; + } + return Long.compare(getTimestamp(), o.getTimestamp()); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(91, 1011) + .append(getContainerId()) + .toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReplicationRequest that = (ReplicationRequest) o; + return new EqualsBuilder().append(getContainerId(), that.getContainerId()) + .isEquals(); + } + + public long getContainerId() { + return containerId; + } + + public short getReplicationCount() { + return replicationCount; + } + + public long getTimestamp() { + return timestamp; + } + + public short getExpecReplicationCount() { + return expecReplicationCount; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java new file mode 100644 index 0000000000..7f335e37c1 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.replication; + +/** + * Ozone Container replicaton related classes. + */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java new file mode 100644 index 0000000000..0c4964ac4c --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.protocol.commands; + +import java.util.UUID; + +import com.google.protobuf.GeneratedMessage; + +/** + * Command for the datanode with the destination address. + */ +public class CommandForDatanode { + + private final UUID datanodeId; + + private final SCMCommand command; + + public CommandForDatanode(UUID datanodeId, SCMCommand command) { + this.datanodeId = datanodeId; + this.command = command; + } + + public UUID getDatanodeId() { + return datanodeId; + } + + public SCMCommand getCommand() { + return command; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java new file mode 100644 index 0000000000..834318b145 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.protocol.commands; + +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto + .Builder; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; +import org.apache.hadoop.hdds.scm.container.ContainerID; + +import com.google.common.base.Preconditions; + +/** + * SCM command to request replication of a container. + */ +public class ReplicateContainerCommand + extends SCMCommand { + + private final long containerID; + + private final List sourceDatanodes; + + public ReplicateContainerCommand(long containerID, + List sourceDatanodes) { + this.containerID = containerID; + this.sourceDatanodes = sourceDatanodes; + } + + @Override + public Type getType() { + return SCMCommandProto.Type.replicateContainerCommand; + } + + @Override + public byte[] getProtoBufMessage() { + return getProto().toByteArray(); + } + + public ReplicateContainerCommandProto getProto() { + Builder builder = ReplicateContainerCommandProto.newBuilder() + .setContainerID(containerID); + for (DatanodeDetails dd : sourceDatanodes) { + builder.addSources(dd.getProtoBufMessage()); + } + return builder.build(); + } + + public static ReplicateContainerCommand getFromProtobuf( + ReplicateContainerCommandProto protoMessage) { + Preconditions.checkNotNull(protoMessage); + + List datanodeDetails = + protoMessage.getSourcesList() + .stream() + .map(DatanodeDetails::getFromProtoBuf) + .collect(Collectors.toList()); + + return new ReplicateContainerCommand(protoMessage.getContainerID(), + datanodeDetails); + + } + + public long getContainerID() { + return containerID; + } + + public List getSourceDatanodes() { + return sourceDatanodes; + } +} diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto index f6aba05636..54230c1e9f 100644 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -172,6 +172,7 @@ message SCMCommandProto { deleteBlocksCommand = 2; closeContainerCommand = 3; deleteContainerCommand = 4; + replicateContainerCommand = 5; } // TODO: once we start using protoc 3.x, refactor this message using "oneof" required Type commandType = 1; @@ -179,6 +180,7 @@ message SCMCommandProto { optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3; optional CloseContainerCommandProto closeContainerCommandProto = 4; optional DeleteContainerCommandProto deleteContainerCommandProto = 5; + optional ReplicateContainerCommandProto replicateContainerCommandProto = 6; } /** @@ -227,12 +229,20 @@ message CloseContainerCommandProto { } /** -This command asks the datanode to close a specific container. +This command asks the datanode to delete a specific container. */ message DeleteContainerCommandProto { required int64 containerID = 1; } +/** +This command asks the datanode to replicate a container from specific sources. +*/ +message ReplicateContainerCommandProto { + required int64 containerID = 1; + repeated DatanodeDetailsProto sources = 2; +} + /** * Protocol used from a datanode to StorageContainerManager. * diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java new file mode 100644 index 0000000000..6d74c683ee --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import java.util.Random; +import java.util.UUID; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test class for ReplicationQueue. + */ +public class TestReplicationQueue { + + private ReplicationQueue replicationQueue; + private Random random; + + @Before + public void setUp() { + replicationQueue = new ReplicationQueue(); + random = new Random(); + } + + @Test + public void testDuplicateAddOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationRequest obj1, obj2, obj3; + long time = Time.monotonicNow(); + obj1 = new ReplicationRequest(contId, (short) 2, time, (short) 3); + obj2 = new ReplicationRequest(contId, (short) 2, time + 1, (short) 3); + obj3 = new ReplicationRequest(contId, (short) 1, time+2, (short) 3); + + replicationQueue.add(obj1); + replicationQueue.add(obj2); + replicationQueue.add(obj3); + Assert.assertEquals("Should add only 1 msg as second one is duplicate", + 1, replicationQueue.size()); + ReplicationRequest temp = replicationQueue.poll(); + Assert.assertEquals(temp, obj3); + } + + @Test + public void testPollOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationRequest msg1, msg2, msg3, msg4, msg5; + msg1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(), + (short) 3); + long time = Time.monotonicNow(); + msg2 = new ReplicationRequest(contId + 1, (short) 4, time, (short) 3); + msg3 = new ReplicationRequest(contId + 2, (short) 0, time, (short) 3); + msg4 = new ReplicationRequest(contId, (short) 2, time, (short) 3); + // Replication message for same container but different nodeId + msg5 = new ReplicationRequest(contId + 1, (short) 2, time, (short) 3); + + replicationQueue.add(msg1); + replicationQueue.add(msg2); + replicationQueue.add(msg3); + replicationQueue.add(msg4); + replicationQueue.add(msg5); + Assert.assertEquals("Should have 3 objects", + 3, replicationQueue.size()); + + // Since Priority queue orders messages according to replication count, + // message with lowest replication should be first + ReplicationRequest temp; + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 2 objects", + 2, replicationQueue.size()); + Assert.assertEquals(temp, msg3); + + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 1 objects", + 1, replicationQueue.size()); + Assert.assertEquals(temp, msg5); + + // Message 2 should be ordered before message 5 as both have same replication + // number but message 2 has earlier timestamp. + temp = replicationQueue.poll(); + Assert.assertEquals("Should have 0 objects", + replicationQueue.size(), 0); + Assert.assertEquals(temp, msg4); + } + + @Test + public void testRemoveOp() { + long contId = random.nextLong(); + String nodeId = UUID.randomUUID().toString(); + ReplicationRequest obj1, obj2, obj3; + obj1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(), + (short) 3); + obj2 = new ReplicationRequest(contId + 1, (short) 2, Time.monotonicNow(), + (short) 3); + obj3 = new ReplicationRequest(contId + 2, (short) 3, Time.monotonicNow(), + (short) 3); + + replicationQueue.add(obj1); + replicationQueue.add(obj2); + replicationQueue.add(obj3); + Assert.assertEquals("Should have 3 objects", + 3, replicationQueue.size()); + + replicationQueue.remove(obj3); + Assert.assertEquals("Should have 2 objects", + 2, replicationQueue.size()); + + replicationQueue.remove(obj2); + Assert.assertEquals("Should have 1 objects", + 1, replicationQueue.size()); + + replicationQueue.remove(obj1); + Assert.assertEquals("Should have 0 objects", + 0, replicationQueue.size()); + } + +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java new file mode 100644 index 0000000000..5b1fd0f43a --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * SCM Testing and Mocking Utils. + */ +package org.apache.hadoop.ozone.container.replication; +// Test classes for Replication functionality. \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java index 86888aa790..7c129457fd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java @@ -44,8 +44,8 @@ public class BlockDeletingServiceTestImpl public BlockDeletingServiceTestImpl(ContainerManager containerManager, int serviceInterval, Configuration conf) { - super(containerManager, serviceInterval, - SERVICE_TIMEOUT_IN_MILLISECONDS, conf); + super(containerManager, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS, + TimeUnit.MILLISECONDS, conf); } @VisibleForTesting diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java new file mode 100644 index 0000000000..19fddde9b4 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import org.apache.hadoop.ozone.lease.Lease; +import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException; +import org.apache.hadoop.ozone.lease.LeaseExpiredException; +import org.apache.hadoop.ozone.lease.LeaseManager; +import org.apache.hadoop.ozone.lease.LeaseNotFoundException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Event watcher the (re)send a message after timeout. + *

+ * Event watcher will send the tracked payload/event after a timeout period + * unless a confirmation from the original event (completion event) is arrived. + * + * @param The type of the events which are tracked. + * @param The type of event which could cancel the + * tracking. + */ +@SuppressWarnings("CheckStyle") +public abstract class EventWatcher { + + private static final Logger LOG = LoggerFactory.getLogger(EventWatcher.class); + + private final Event startEvent; + + private final Event completionEvent; + + private final LeaseManager leaseManager; + + protected final Map trackedEventsByUUID = + new ConcurrentHashMap<>(); + + protected final Set trackedEvents = new HashSet<>(); + + public EventWatcher(Event startEvent, + Event completionEvent, + LeaseManager leaseManager) { + this.startEvent = startEvent; + this.completionEvent = completionEvent; + this.leaseManager = leaseManager; + + } + + public void start(EventQueue queue) { + + queue.addHandler(startEvent, this::handleStartEvent); + + queue.addHandler(completionEvent, (completionPayload, publisher) -> { + UUID uuid = completionPayload.getUUID(); + try { + handleCompletion(uuid, publisher); + } catch (LeaseNotFoundException e) { + //It's already done. Too late, we already retried it. + //Not a real problem. + LOG.warn("Completion event without active lease. UUID={}", uuid); + } + }); + + } + + private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload, + EventPublisher publisher) { + UUID identifier = payload.getUUID(); + trackedEventsByUUID.put(identifier, payload); + trackedEvents.add(payload); + try { + Lease lease = leaseManager.acquire(identifier); + try { + lease.registerCallBack(() -> { + handleTimeout(publisher, identifier); + return null; + }); + + } catch (LeaseExpiredException e) { + handleTimeout(publisher, identifier); + } + } catch (LeaseAlreadyExistException e) { + //No problem at all. But timer is not reset. + } + } + + private synchronized void handleCompletion(UUID uuid, + EventPublisher publisher) throws LeaseNotFoundException { + leaseManager.release(uuid); + TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(uuid); + trackedEvents.remove(payload); + onFinished(publisher, payload); + } + + private synchronized void handleTimeout(EventPublisher publisher, + UUID identifier) { + TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(identifier); + trackedEvents.remove(payload); + onTimeout(publisher, payload); + } + + + /** + * Check if a specific payload is in-progress. + */ + public synchronized boolean contains(TIMEOUT_PAYLOAD payload) { + return trackedEvents.contains(payload); + } + + public synchronized boolean remove(TIMEOUT_PAYLOAD payload) { + try { + leaseManager.release(payload.getUUID()); + } catch (LeaseNotFoundException e) { + LOG.warn("Completion event without active lease. UUID={}", + payload.getUUID()); + } + trackedEventsByUUID.remove(payload.getUUID()); + return trackedEvents.remove(payload); + + } + + abstract void onTimeout(EventPublisher publisher, TIMEOUT_PAYLOAD payload); + + abstract void onFinished(EventPublisher publisher, TIMEOUT_PAYLOAD payload); + + public List getTimeoutEvents( + Predicate predicate) { + return trackedEventsByUUID.values().stream().filter(predicate) + .collect(Collectors.toList()); + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java new file mode 100644 index 0000000000..e73e30fcde --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.UUID; + +/** + * Event with an additional unique identifier. + * + */ +public interface IdentifiableEventPayload { + + UUID getUUID(); + +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java new file mode 100644 index 0000000000..3f34a70e6e --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.ArrayList; +import java.util.List; + +/** + * Dummy class for testing to collect all the received events. + */ +public class EventHandlerStub implements EventHandler { + + private List receivedEvents = new ArrayList<>(); + + @Override + public void onMessage(PAYLOAD payload, EventPublisher publisher) { + receivedEvents.add(payload); + } + + public List getReceivedEvents() { + return receivedEvents; + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java new file mode 100644 index 0000000000..1731350cfe --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +import java.util.List; +import java.util.Objects; +import java.util.UUID; + +import org.apache.hadoop.ozone.lease.LeaseManager; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test the basic functionality of event watcher. + */ +public class TestEventWatcher { + + private static final TypedEvent WATCH_UNDER_REPLICATED = + new TypedEvent<>(UnderreplicatedEvent.class); + + private static final TypedEvent UNDER_REPLICATED = + new TypedEvent<>(UnderreplicatedEvent.class); + + private static final TypedEvent + REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class); + + LeaseManager leaseManager; + + @Before + public void startLeaseManager() { + leaseManager = new LeaseManager<>(2000l); + leaseManager.start(); + } + + @After + public void stopLeaseManager() { + leaseManager.shutdown(); + } + + + @Test + public void testEventHandling() throws InterruptedException { + + EventQueue queue = new EventQueue(); + + EventWatcher + replicationWatcher = createEventWatcher(); + + EventHandlerStub underReplicatedEvents = + new EventHandlerStub<>(); + + queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); + + replicationWatcher.start(queue); + + UUID uuid1 = UUID.randomUUID(); + UUID uuid2 = UUID.randomUUID(); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(uuid1, "C1")); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(uuid2, "C2")); + + Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); + + Thread.sleep(1000); + + queue.fireEvent(REPLICATION_COMPLETED, + new ReplicationCompletedEvent(uuid1, "C2", "D1")); + + Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); + + Thread.sleep(1500); + + queue.processAll(1000L); + + Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size()); + Assert.assertEquals(uuid2, + underReplicatedEvents.getReceivedEvents().get(0).UUID); + + } + + @Test + public void testInprogressFilter() throws InterruptedException { + + EventQueue queue = new EventQueue(); + + EventWatcher + replicationWatcher = createEventWatcher(); + + EventHandlerStub underReplicatedEvents = + new EventHandlerStub<>(); + + queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); + + replicationWatcher.start(queue); + + UnderreplicatedEvent event1 = + new UnderreplicatedEvent(UUID.randomUUID(), "C1"); + + queue.fireEvent(WATCH_UNDER_REPLICATED, event1); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(UUID.randomUUID(), "C2")); + + queue.fireEvent(WATCH_UNDER_REPLICATED, + new UnderreplicatedEvent(UUID.randomUUID(), "C1")); + + queue.processAll(1000L); + Thread.sleep(1000L); + List c1todo = replicationWatcher + .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); + + Assert.assertEquals(2, c1todo.size()); + Assert.assertTrue(replicationWatcher.contains(event1)); + Thread.sleep(1500L); + + c1todo = replicationWatcher + .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); + Assert.assertEquals(0, c1todo.size()); + Assert.assertFalse(replicationWatcher.contains(event1)); + + + } + + private EventWatcher + createEventWatcher() { + return new EventWatcher( + WATCH_UNDER_REPLICATED, REPLICATION_COMPLETED, leaseManager) { + + @Override + void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) { + publisher.fireEvent(UNDER_REPLICATED, payload); + } + + @Override + void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) { + //Good job. We did it. + } + }; + } + + private static class ReplicationCompletedEvent + implements IdentifiableEventPayload { + + private final UUID UUID; + + private final String containerId; + + private final String datanodeId; + + public ReplicationCompletedEvent(UUID UUID, String containerId, + String datanodeId) { + this.UUID = UUID; + this.containerId = containerId; + this.datanodeId = datanodeId; + } + + public UUID getUUID() { + return UUID; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReplicationCompletedEvent that = (ReplicationCompletedEvent) o; + return Objects.equals(containerId, that.containerId) && Objects + .equals(datanodeId, that.datanodeId); + } + + @Override + public int hashCode() { + + return Objects.hash(containerId, datanodeId); + } + } + + private static class UnderreplicatedEvent + + implements IdentifiableEventPayload { + + private final UUID UUID; + + private final String containerId; + + public UnderreplicatedEvent(UUID UUID, String containerId) { + this.containerId = containerId; + this.UUID = UUID; + } + + public UUID getUUID() { + return UUID; + } + } + +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index b563e90e76..9fd30f2ad0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; @@ -80,7 +79,6 @@ public class ContainerMapping implements Mapping { private final PipelineSelector pipelineSelector; private final ContainerStateManager containerStateManager; private final LeaseManager containerLeaseManager; - private final ContainerSupervisor containerSupervisor; private final float containerCloseThreshold; private final ContainerCloser closer; private final long size; @@ -127,9 +125,7 @@ public ContainerMapping( OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024; this.containerStateManager = new ContainerStateManager(conf, this); - this.containerSupervisor = - new ContainerSupervisor(conf, nodeManager, - nodeManager.getNodePoolManager()); + this.containerCloseThreshold = conf.getFloat( ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD, ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT); @@ -407,8 +403,8 @@ public void processContainerReports(DatanodeDetails datanodeDetails, throws IOException { List containerInfos = reports.getReportsList(); - containerSupervisor.handleContainerReport(datanodeDetails, reports); - for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : + + for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : containerInfos) { byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID()); lock.lock(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java index 937076cfb7..cbb2ba75c2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java @@ -127,12 +127,12 @@ public void close(HddsProtos.SCMContainerInfo info) { // to SCM. In that case also, data node will ignore this command. HddsProtos.Pipeline pipeline = info.getPipeline(); - for (HddsProtos.DatanodeDetailsProto datanodeDetails : pipeline - .getPipelineChannel().getMembersList()) { + for (HddsProtos.DatanodeDetailsProto datanodeDetails : + pipeline.getMembersList()) { nodeManager.addDatanodeCommand( DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(), new CloseContainerCommand(info.getContainerID(), - pipeline.getPipelineChannel().getType())); + pipeline.getType())); } if (!commandIssued.containsKey(info.getContainerID())) { commandIssued.put(info.getContainerID(), diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java deleted file mode 100644 index 5bd05746bf..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import static com.google.common.util.concurrent.Uninterruptibles - .sleepUninterruptibly; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT; - -/** - * This class takes a set of container reports that belong to a pool and then - * computes the replication levels for each container. - */ -public class ContainerSupervisor implements Closeable { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerSupervisor.class); - - private final NodePoolManager poolManager; - private final HashSet poolNames; - private final PriorityQueue poolQueue; - private final NodeManager nodeManager; - private final long containerProcessingLag; - private final AtomicBoolean runnable; - private final ExecutorService executorService; - private final long maxPoolWait; - private long poolProcessCount; - private final List inProgressPoolList; - private final AtomicInteger threadFaultCount; - private final int inProgressPoolMaxCount; - - private final ReadWriteLock inProgressPoolListLock; - - /** - * Returns the number of times we have processed pools. - * @return long - */ - public long getPoolProcessCount() { - return poolProcessCount; - } - - - /** - * Constructs a class that computes Replication Levels. - * - * @param conf - OzoneConfiguration - * @param nodeManager - Node Manager - * @param poolManager - Pool Manager - */ - public ContainerSupervisor(Configuration conf, NodeManager nodeManager, - NodePoolManager poolManager) { - Preconditions.checkNotNull(poolManager); - Preconditions.checkNotNull(nodeManager); - this.containerProcessingLag = - conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL, - OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT, - TimeUnit.SECONDS - ) * 1000; - int maxContainerReportThreads = - conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS, - OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT - ); - this.maxPoolWait = - conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, - OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - this.inProgressPoolMaxCount = conf.getInt( - OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS, - OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT); - this.poolManager = poolManager; - this.nodeManager = nodeManager; - this.poolNames = new HashSet<>(); - this.poolQueue = new PriorityQueue<>(); - this.runnable = new AtomicBoolean(true); - this.threadFaultCount = new AtomicInteger(0); - this.executorService = newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Container Reports Processing Thread - %d") - .build(), maxContainerReportThreads); - this.inProgressPoolList = new LinkedList<>(); - this.inProgressPoolListLock = new ReentrantReadWriteLock(); - - initPoolProcessThread(); - } - - private ExecutorService newCachedThreadPool(ThreadFactory threadFactory, - int maxThreads) { - return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(), threadFactory); - } - - /** - * Returns the number of pools that are under process right now. - * @return int - Number of pools that are in process. - */ - public int getInProgressPoolCount() { - return inProgressPoolList.size(); - } - - /** - * Exits the background thread. - */ - public void setExit() { - this.runnable.set(false); - } - - /** - * Adds or removes pools from names that we need to process. - * - * There are two different cases that we need to process. - * The case where some pools are being added and some times we have to - * handle cases where pools are removed. - */ - private void refreshPools() { - List pools = this.poolManager.getNodePools(); - if (pools != null) { - - HashSet removedPools = - computePoolDifference(this.poolNames, new HashSet<>(pools)); - - HashSet addedPools = - computePoolDifference(new HashSet<>(pools), this.poolNames); - // TODO: Support remove pool API in pool manager so that this code - // path can be tested. This never happens in the current code base. - for (String poolName : removedPools) { - for (PeriodicPool periodicPool : poolQueue) { - if (periodicPool.getPoolName().compareTo(poolName) == 0) { - poolQueue.remove(periodicPool); - } - } - } - // Remove the pool names that we have in the list. - this.poolNames.removeAll(removedPools); - - for (String poolName : addedPools) { - poolQueue.add(new PeriodicPool(poolName)); - } - - // Add to the pool names we are tracking. - poolNames.addAll(addedPools); - } - - } - - /** - * Handle the case where pools are added. - * - * @param newPools - New Pools list - * @param oldPool - oldPool List. - */ - private HashSet computePoolDifference(HashSet newPools, - Set oldPool) { - Preconditions.checkNotNull(newPools); - Preconditions.checkNotNull(oldPool); - HashSet newSet = new HashSet<>(newPools); - newSet.removeAll(oldPool); - return newSet; - } - - private void initPoolProcessThread() { - - /* - * Task that runs to check if we need to start a pool processing job. - * if so we create a pool reconciliation job and find out of all the - * expected containers are on the nodes. - */ - Runnable processPools = () -> { - while (runnable.get()) { - // Make sure that we don't have any new pools. - refreshPools(); - while (inProgressPoolList.size() < inProgressPoolMaxCount) { - PeriodicPool pool = poolQueue.poll(); - if (pool != null) { - if (pool.getLastProcessedTime() + this.containerProcessingLag > - Time.monotonicNow()) { - LOG.debug("Not within the time window for processing: {}", - pool.getPoolName()); - // we might over sleep here, not a big deal. - sleepUninterruptibly(this.containerProcessingLag, - TimeUnit.MILLISECONDS); - } - LOG.debug("Adding pool {} to container processing queue", - pool.getPoolName()); - InProgressPool inProgressPool = new InProgressPool(maxPoolWait, - pool, this.nodeManager, this.poolManager, this.executorService); - inProgressPool.startReconciliation(); - inProgressPoolListLock.writeLock().lock(); - try { - inProgressPoolList.add(inProgressPool); - } finally { - inProgressPoolListLock.writeLock().unlock(); - } - poolProcessCount++; - } else { - break; - } - } - sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS); - inProgressPoolListLock.readLock().lock(); - try { - for (InProgressPool inProgressPool : inProgressPoolList) { - inProgressPool.finalizeReconciliation(); - poolQueue.add(inProgressPool.getPool()); - } - } finally { - inProgressPoolListLock.readLock().unlock(); - } - inProgressPoolListLock.writeLock().lock(); - try { - inProgressPoolList.clear(); - } finally { - inProgressPoolListLock.writeLock().unlock(); - } - } - }; - - // We will have only one thread for pool processing. - Thread poolProcessThread = new Thread(processPools); - poolProcessThread.setDaemon(true); - poolProcessThread.setName("Pool replica thread"); - poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> { - // Let us just restart this thread after logging a critical error. - // if this thread is not running we cannot handle commands from SCM. - LOG.error("Critical Error : Pool replica thread encountered an " + - "error. Thread: {} Error Count : {}", t.toString(), e, - threadFaultCount.incrementAndGet()); - poolProcessThread.start(); - // TODO : Add a config to restrict how many times we will restart this - // thread in a single session. - }); - poolProcessThread.start(); - } - - /** - * Adds a container report to appropriate inProgress Pool. - * @param containerReport -- Container report for a specific container from - * a datanode. - */ - public void handleContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto containerReport) { - inProgressPoolListLock.readLock().lock(); - try { - String poolName = poolManager.getNodePool(datanodeDetails); - for (InProgressPool ppool : inProgressPoolList) { - if (ppool.getPoolName().equalsIgnoreCase(poolName)) { - ppool.handleContainerReport(datanodeDetails, containerReport); - return; - } - } - // TODO: Decide if we can do anything else with this report. - LOG.debug("Discarding the container report for pool {}. " + - "That pool is not currently in the pool reconciliation process." + - " Container Name: {}", poolName, datanodeDetails); - } catch (SCMException e) { - LOG.warn("Skipping processing container report from datanode {}, " - + "cause: failed to get the corresponding node pool", - datanodeDetails.toString(), e); - } finally { - inProgressPoolListLock.readLock().unlock(); - } - } - - /** - * Get in process pool list, used for testing. - * @return List of InProgressPool - */ - @VisibleForTesting - public List getInProcessPoolList() { - return inProgressPoolList; - } - - /** - * Shutdown the Container Replication Manager. - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - setExit(); - HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java deleted file mode 100644 index 4b547311da..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -/** - * These are pools that are actively checking for replication status of the - * containers. - */ -public final class InProgressPool { - public static final Logger LOG = - LoggerFactory.getLogger(InProgressPool.class); - - private final PeriodicPool pool; - private final NodeManager nodeManager; - private final NodePoolManager poolManager; - private final ExecutorService executorService; - private final Map containerCountMap; - private final Map processedNodeSet; - private final long startTime; - private ProgressStatus status; - private AtomicInteger nodeCount; - private AtomicInteger nodeProcessed; - private AtomicInteger containerProcessedCount; - private long maxWaitTime; - /** - * Constructs an pool that is being processed. - * @param maxWaitTime - Maximum wait time in milliseconds. - * @param pool - Pool that we are working against - * @param nodeManager - Nodemanager - * @param poolManager - pool manager - * @param executorService - Shared Executor service. - */ - InProgressPool(long maxWaitTime, PeriodicPool pool, - NodeManager nodeManager, NodePoolManager poolManager, - ExecutorService executorService) { - Preconditions.checkNotNull(pool); - Preconditions.checkNotNull(nodeManager); - Preconditions.checkNotNull(poolManager); - Preconditions.checkNotNull(executorService); - Preconditions.checkArgument(maxWaitTime > 0); - this.pool = pool; - this.nodeManager = nodeManager; - this.poolManager = poolManager; - this.executorService = executorService; - this.containerCountMap = new ConcurrentHashMap<>(); - this.processedNodeSet = new ConcurrentHashMap<>(); - this.maxWaitTime = maxWaitTime; - startTime = Time.monotonicNow(); - } - - /** - * Returns periodic pool. - * - * @return PeriodicPool - */ - public PeriodicPool getPool() { - return pool; - } - - /** - * We are done if we have got reports from all nodes or we have - * done waiting for the specified time. - * - * @return true if we are done, false otherwise. - */ - public boolean isDone() { - return (nodeCount.get() == nodeProcessed.get()) || - (this.startTime + this.maxWaitTime) > Time.monotonicNow(); - } - - /** - * Gets the number of containers processed. - * - * @return int - */ - public int getContainerProcessedCount() { - return containerProcessedCount.get(); - } - - /** - * Returns the start time in milliseconds. - * - * @return - Start Time. - */ - public long getStartTime() { - return startTime; - } - - /** - * Get the number of nodes in this pool. - * - * @return - node count - */ - public int getNodeCount() { - return nodeCount.get(); - } - - /** - * Get the number of nodes that we have already processed container reports - * from. - * - * @return - Processed count. - */ - public int getNodeProcessed() { - return nodeProcessed.get(); - } - - /** - * Returns the current status. - * - * @return Status - */ - public ProgressStatus getStatus() { - return status; - } - - /** - * Starts the reconciliation process for all the nodes in the pool. - */ - public void startReconciliation() { - List datanodeDetailsList = - this.poolManager.getNodes(pool.getPoolName()); - if (datanodeDetailsList.size() == 0) { - LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ", - pool.getPoolName()); - this.status = ProgressStatus.Error; - return; - } - - nodeProcessed = new AtomicInteger(0); - containerProcessedCount = new AtomicInteger(0); - nodeCount = new AtomicInteger(0); - this.status = ProgressStatus.InProgress; - this.getPool().setLastProcessedTime(Time.monotonicNow()); - } - - /** - * Queues a container Report for handling. This is done in a worker thread - * since decoding a container report might be compute intensive . We don't - * want to block since we have asked for bunch of container reports - * from a set of datanodes. - * - * @param containerReport - ContainerReport - */ - public void handleContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto containerReport) { - if (status == ProgressStatus.InProgress) { - executorService.submit(processContainerReport(datanodeDetails, - containerReport)); - } else { - LOG.debug("Cannot handle container report when the pool is in {} status.", - status); - } - } - - private Runnable processContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto reports) { - return () -> { - if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(), - (k) -> true)) { - nodeProcessed.incrementAndGet(); - LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed, - datanodeDetails.getUuid()); - for (ContainerInfo info : reports.getReportsList()) { - containerProcessedCount.incrementAndGet(); - LOG.debug("Total Containers processed: {} Container Name: {}", - containerProcessedCount.get(), info.getContainerID()); - - // Update the container map with count + 1 if the key exists or - // update the map with 1. Since this is a concurrentMap the - // computation and update is atomic. - containerCountMap.merge(info.getContainerID(), 1, Integer::sum); - } - } - }; - } - - /** - * Filter the containers based on specific rules. - * - * @param predicate -- Predicate to filter by - * @return A list of map entries. - */ - public List> filterContainer( - Predicate> predicate) { - return containerCountMap.entrySet().stream() - .filter(predicate).collect(Collectors.toList()); - } - - /** - * Used only for testing, calling this will abort container report - * processing. This is very dangerous call and should not be made by any users - */ - @VisibleForTesting - public void setDoneProcessing() { - nodeProcessed.set(nodeCount.get()); - } - - /** - * Returns the pool name. - * - * @return Name of the pool. - */ - String getPoolName() { - return pool.getPoolName(); - } - - public void finalizeReconciliation() { - status = ProgressStatus.Done; - //TODO: Add finalizing logic. This is where actual reconciliation happens. - } - - /** - * Current status of the computing replication status. - */ - public enum ProgressStatus { - InProgress, Done, Error - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java deleted file mode 100644 index ef28aa78d0..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import java.util.concurrent.atomic.AtomicLong; - -/** - * Periodic pool is a pool with a time stamp, this allows us to process pools - * based on a cyclic clock. - */ -public class PeriodicPool implements Comparable { - private final String poolName; - private long lastProcessedTime; - private AtomicLong totalProcessedCount; - - /** - * Constructs a periodic pool. - * - * @param poolName - Name of the pool - */ - public PeriodicPool(String poolName) { - this.poolName = poolName; - lastProcessedTime = 0; - totalProcessedCount = new AtomicLong(0); - } - - /** - * Get pool Name. - * @return PoolName - */ - public String getPoolName() { - return poolName; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(PeriodicPool o) { - return Long.compare(this.lastProcessedTime, o.lastProcessedTime); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - PeriodicPool that = (PeriodicPool) o; - - return poolName.equals(that.poolName); - } - - @Override - public int hashCode() { - return poolName.hashCode(); - } - - /** - * Returns the Total Times we have processed this pool. - * - * @return processed count. - */ - public long getTotalProcessedCount() { - return totalProcessedCount.get(); - } - - /** - * Gets the last time we processed this pool. - * @return time in milliseconds - */ - public long getLastProcessedTime() { - return this.lastProcessedTime; - } - - - /** - * Sets the last processed time. - * - * @param lastProcessedTime - Long in milliseconds. - */ - - public void setLastProcessedTime(long lastProcessedTime) { - this.lastProcessedTime = lastProcessedTime; - } - - /* - * Increments the total processed count. - */ - public void incTotalProcessedCount() { - this.totalProcessedCount.incrementAndGet(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java deleted file mode 100644 index 7bbe2efe57..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; -/* - This package contains routines that manage replication of a container. This - relies on container reports to understand the replication level of a - container - UnderReplicated, Replicated, OverReplicated -- and manages the - replication level based on that. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 4392633b16..72d7e946cc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -123,12 +123,6 @@ public interface NodeManager extends StorageContainerNodeProtocol, */ SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails); - /** - * Returns the NodePoolManager associated with the NodeManager. - * @return NodePoolManager - */ - NodePoolManager getNodePoolManager(); - /** * Wait for the heartbeat is processed by NodeManager. * @return true if heartbeat has been processed. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java deleted file mode 100644 index 46faf9ca4d..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * Interface that defines SCM NodePoolManager. - */ -public interface NodePoolManager extends Closeable { - - /** - * Add a node to a node pool. - * @param pool - name of the node pool. - * @param node - data node. - */ - void addNode(String pool, DatanodeDetails node) throws IOException; - - /** - * Remove a node from a node pool. - * @param pool - name of the node pool. - * @param node - data node. - * @throws SCMException - */ - void removeNode(String pool, DatanodeDetails node) - throws SCMException; - - /** - * Get a list of known node pools. - * @return a list of known node pool names or an empty list if not node pool - * is defined. - */ - List getNodePools(); - - /** - * Get all nodes of a node pool given the name of the node pool. - * @param pool - name of the node pool. - * @return a list of datanode ids or an empty list if the node pool was not - * found. - */ - List getNodes(String pool); - - /** - * Get the node pool name if the node has been added to a node pool. - * @param datanodeDetails - datanode ID. - * @return node pool name if it has been assigned. - * null if the node has not been assigned to any node pool yet. - */ - String getNodePool(DatanodeDetails datanodeDetails) throws SCMException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 9ac99303bd..ff5b9f1f23 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -25,6 +25,10 @@ import org.apache.hadoop.hdds.scm.VersionInfo; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.server.events.Event; +import org.apache.hadoop.hdds.server.events.EventHandler; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.TypedEvent; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -43,11 +47,13 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol; import org.apache.hadoop.ozone.protocol.VersionResponse; +import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -101,7 +107,8 @@ * as soon as you read it. */ public class SCMNodeManager - implements NodeManager, StorageContainerNodeProtocol { + implements NodeManager, StorageContainerNodeProtocol, + EventHandler { @VisibleForTesting static final Logger LOG = @@ -152,9 +159,11 @@ public class SCMNodeManager private ObjectName nmInfoBean; // Node pool manager. - private final SCMNodePoolManager nodePoolManager; private final StorageContainerManager scmManager; + public static final Event DATANODE_COMMAND = + new TypedEvent<>(CommandForDatanode.class, "DATANODE_COMMAND"); + /** * Constructs SCM machine Manager. */ @@ -200,7 +209,6 @@ public SCMNodeManager(OzoneConfiguration conf, String clusterID, registerMXBean(); - this.nodePoolManager = new SCMNodePoolManager(conf); this.scmManager = scmManager; } @@ -672,7 +680,6 @@ private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) { @Override public void close() throws IOException { unregisterMXBean(); - nodePoolManager.close(); executorService.shutdown(); try { if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { @@ -753,20 +760,6 @@ public RegisteredCommand register( LOG.info("Leaving startup chill mode."); } - // TODO: define node pool policy for non-default node pool. - // For now, all nodes are added to the "DefaultNodePool" upon registration - // if it has not been added to any node pool yet. - try { - if (nodePoolManager.getNodePool(datanodeDetails) == null) { - nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL, - datanodeDetails); - } - } catch (IOException e) { - // TODO: make sure registration failure is handled correctly. - return RegisteredCommand.newBuilder() - .setErrorCode(ErrorCode.errorNodeNotPermitted) - .build(); - } // Updating Node Report, as registration is successful updateNodeStat(datanodeDetails.getUuid(), nodeReport); LOG.info("Data node with ID: {} Registered.", @@ -852,11 +845,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeStats.get(datanodeDetails.getUuid())); } - @Override - public NodePoolManager getNodePoolManager() { - return nodePoolManager; - } - @Override public Map getNodeCount() { Map nodeCountMap = new HashMap(); @@ -875,4 +863,11 @@ public void addDatanodeCommand(UUID dnId, SCMCommand command) { public void setStaleNodeIntervalMs(long interval) { this.staleNodeIntervalMs = interval; } + + @Override + public void onMessage(CommandForDatanode commandForDatanode, + EventPublisher publisher) { + addDatanodeCommand(commandForDatanode.getDatanodeId(), + commandForDatanode.getCommand()); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java deleted file mode 100644 index faf330ea1d..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.utils.MetadataStore; -import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_FIND_NODE_IN_POOL; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_LOAD_NODEPOOL; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; - -/** - * SCM node pool manager that manges node pools. - */ -public final class SCMNodePoolManager implements NodePoolManager { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMNodePoolManager.class); - private static final List EMPTY_NODE_LIST = - new ArrayList<>(); - private static final List EMPTY_NODEPOOL_LIST = new ArrayList<>(); - public static final String DEFAULT_NODEPOOL = "DefaultNodePool"; - - // DB that saves the node to node pool mapping. - private MetadataStore nodePoolStore; - - // In-memory node pool to nodes mapping - private HashMap> nodePools; - - // Read-write lock for nodepool operations - private ReadWriteLock lock; - - /** - * Construct SCMNodePoolManager class that manages node to node pool mapping. - * @param conf - configuration. - * @throws IOException - */ - public SCMNodePoolManager(final OzoneConfiguration conf) - throws IOException { - final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - File metaDir = getOzoneMetaDirPath(conf); - String scmMetaDataDir = metaDir.getPath(); - File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB); - nodePoolStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(nodePoolDBPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); - nodePools = new HashMap<>(); - lock = new ReentrantReadWriteLock(); - init(); - } - - /** - * Initialize the in-memory store based on persist store from level db. - * No lock is needed as init() is only invoked by constructor. - * @throws SCMException - */ - private void init() throws SCMException { - try { - nodePoolStore.iterate(null, (key, value) -> { - try { - DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf( - HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key)); - String poolName = DFSUtil.bytes2String(value); - - Set nodePool = null; - if (nodePools.containsKey(poolName)) { - nodePool = nodePools.get(poolName); - } else { - nodePool = new HashSet<>(); - nodePools.put(poolName, nodePool); - } - nodePool.add(nodeId); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding node: {} to node pool: {}", - nodeId, poolName); - } - } catch (IOException e) { - LOG.warn("Can't add a datanode to node pool, continue next..."); - } - return true; - }); - } catch (IOException e) { - LOG.error("Loading node pool error " + e); - throw new SCMException("Failed to load node pool", - FAILED_TO_LOAD_NODEPOOL); - } - } - - /** - * Add a datanode to a node pool. - * @param pool - name of the node pool. - * @param node - name of the datanode. - */ - @Override - public void addNode(final String pool, final DatanodeDetails node) - throws IOException { - Preconditions.checkNotNull(pool, "pool name is null"); - Preconditions.checkNotNull(node, "node is null"); - lock.writeLock().lock(); - try { - // add to the persistent store - nodePoolStore.put(node.getProtoBufMessage().toByteArray(), - DFSUtil.string2Bytes(pool)); - - // add to the in-memory store - Set nodePool = null; - if (nodePools.containsKey(pool)) { - nodePool = nodePools.get(pool); - } else { - nodePool = new HashSet(); - nodePools.put(pool, nodePool); - } - nodePool.add(node); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Remove a datanode from a node pool. - * @param pool - name of the node pool. - * @param node - datanode id. - * @throws SCMException - */ - @Override - public void removeNode(final String pool, final DatanodeDetails node) - throws SCMException { - Preconditions.checkNotNull(pool, "pool name is null"); - Preconditions.checkNotNull(node, "node is null"); - lock.writeLock().lock(); - try { - // Remove from the persistent store - byte[] kName = node.getProtoBufMessage().toByteArray(); - byte[] kData = nodePoolStore.get(kName); - if (kData == null) { - throw new SCMException(String.format("Unable to find node %s from" + - " pool %s in DB.", DFSUtil.bytes2String(kName), pool), - FAILED_TO_FIND_NODE_IN_POOL); - } - nodePoolStore.delete(kName); - - // Remove from the in-memory store - if (nodePools.containsKey(pool)) { - Set nodePool = nodePools.get(pool); - nodePool.remove(node); - } else { - throw new SCMException(String.format("Unable to find node %s from" + - " pool %s in MAP.", DFSUtil.bytes2String(kName), pool), - FAILED_TO_FIND_NODE_IN_POOL); - } - } catch (IOException e) { - throw new SCMException("Failed to remove node " + node.toString() - + " from node pool " + pool, e, - SCMException.ResultCodes.IO_EXCEPTION); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Get all the node pools. - * @return all the node pools. - */ - @Override - public List getNodePools() { - lock.readLock().lock(); - try { - if (!nodePools.isEmpty()) { - return nodePools.keySet().stream().collect(Collectors.toList()); - } else { - return EMPTY_NODEPOOL_LIST; - } - } finally { - lock.readLock().unlock(); - } - } - - /** - * Get all datanodes of a specific node pool. - * @param pool - name of the node pool. - * @return all datanodes of the specified node pool. - */ - @Override - public List getNodes(final String pool) { - Preconditions.checkNotNull(pool, "pool name is null"); - if (nodePools.containsKey(pool)) { - return nodePools.get(pool).stream().collect(Collectors.toList()); - } else { - return EMPTY_NODE_LIST; - } - } - - /** - * Get the node pool name if the node has been added to a node pool. - * @param datanodeDetails - datanode ID. - * @return node pool name if it has been assigned. - * null if the node has not been assigned to any node pool yet. - * TODO: Put this in a in-memory map if performance is an issue. - */ - @Override - public String getNodePool(final DatanodeDetails datanodeDetails) - throws SCMException { - Preconditions.checkNotNull(datanodeDetails, "node is null"); - try { - byte[] result = nodePoolStore.get( - datanodeDetails.getProtoBufMessage().toByteArray()); - return result == null ? null : DFSUtil.bytes2String(result); - } catch (IOException e) { - throw new SCMException("Failed to get node pool for node " - + datanodeDetails.toString(), e, - SCMException.ResultCodes.IO_EXCEPTION); - } - } - - /** - * Close node pool level db store. - * @throws IOException - */ - @Override - public void close() throws IOException { - nodePoolStore.close(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java index 832fcc669a..48affa4112 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdds.scm.pipelines; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -36,12 +35,12 @@ public abstract class PipelineManager { private static final Logger LOG = LoggerFactory.getLogger(PipelineManager.class); - private final List activePipelineChannels; - private final AtomicInteger conduitsIndex; + private final List activePipelines; + private final AtomicInteger pipelineIndex; public PipelineManager() { - activePipelineChannels = new LinkedList<>(); - conduitsIndex = new AtomicInteger(0); + activePipelines = new LinkedList<>(); + pipelineIndex = new AtomicInteger(0); } /** @@ -59,9 +58,9 @@ public synchronized final Pipeline getPipeline( /** * In the Ozone world, we have a very simple policy. * - * 1. Try to create a pipelineChannel if there are enough free nodes. + * 1. Try to create a pipeline if there are enough free nodes. * - * 2. This allows all nodes to part of a pipelineChannel quickly. + * 2. This allows all nodes to part of a pipeline quickly. * * 3. if there are not enough free nodes, return conduits in a * round-robin fashion. @@ -70,28 +69,28 @@ public synchronized final Pipeline getPipeline( * Create a new placement policy that returns conduits in round robin * fashion. */ - PipelineChannel pipelineChannel = - allocatePipelineChannel(replicationFactor); - if (pipelineChannel != null) { - LOG.debug("created new pipelineChannel:{} for container with " + + Pipeline pipeline = + allocatePipeline(replicationFactor); + if (pipeline != null) { + LOG.debug("created new pipeline:{} for container with " + "replicationType:{} replicationFactor:{}", - pipelineChannel.getName(), replicationType, replicationFactor); - activePipelineChannels.add(pipelineChannel); + pipeline.getPipelineName(), replicationType, replicationFactor); + activePipelines.add(pipeline); } else { - pipelineChannel = - findOpenPipelineChannel(replicationType, replicationFactor); - if (pipelineChannel != null) { - LOG.debug("re-used pipelineChannel:{} for container with " + + pipeline = + findOpenPipeline(replicationType, replicationFactor); + if (pipeline != null) { + LOG.debug("re-used pipeline:{} for container with " + "replicationType:{} replicationFactor:{}", - pipelineChannel.getName(), replicationType, replicationFactor); + pipeline.getPipelineName(), replicationType, replicationFactor); } } - if (pipelineChannel == null) { - LOG.error("Get pipelineChannel call failed. We are not able to find" + - "free nodes or operational pipelineChannel."); + if (pipeline == null) { + LOG.error("Get pipeline call failed. We are not able to find" + + "free nodes or operational pipeline."); return null; } else { - return new Pipeline(pipelineChannel); + return pipeline; } } @@ -106,19 +105,19 @@ protected int getReplicationCount(ReplicationFactor factor) { } } - public abstract PipelineChannel allocatePipelineChannel( + public abstract Pipeline allocatePipeline( ReplicationFactor replicationFactor) throws IOException; /** - * Find a PipelineChannel that is operational. + * Find a Pipeline that is operational. * * @return - Pipeline or null */ - private PipelineChannel findOpenPipelineChannel( + private Pipeline findOpenPipeline( ReplicationType type, ReplicationFactor factor) { - PipelineChannel pipelineChannel = null; + Pipeline pipeline = null; final int sentinal = -1; - if (activePipelineChannels.size() == 0) { + if (activePipelines.size() == 0) { LOG.error("No Operational conduits found. Returning null."); return null; } @@ -126,26 +125,26 @@ private PipelineChannel findOpenPipelineChannel( int nextIndex = sentinal; for (; startIndex != nextIndex; nextIndex = getNextIndex()) { // Just walk the list in a circular way. - PipelineChannel temp = - activePipelineChannels + Pipeline temp = + activePipelines .get(nextIndex != sentinal ? nextIndex : startIndex); - // if we find an operational pipelineChannel just return that. + // if we find an operational pipeline just return that. if ((temp.getLifeCycleState() == LifeCycleState.OPEN) && (temp.getFactor() == factor) && (temp.getType() == type)) { - pipelineChannel = temp; + pipeline = temp; break; } } - return pipelineChannel; + return pipeline; } /** - * gets the next index of the PipelineChannel to get. + * gets the next index of the Pipeline to get. * * @return index in the link list to get. */ private int getNextIndex() { - return conduitsIndex.incrementAndGet() % activePipelineChannels.size(); + return pipelineIndex.incrementAndGet() % activePipelines.size(); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java index 2e56043c6b..508ca9bd3b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.container.placement.algorithms @@ -85,20 +84,20 @@ public PipelineSelector(NodeManager nodeManager, Configuration conf) { * The first of the list will be the leader node. * @return pipeline corresponding to nodes */ - public static PipelineChannel newPipelineFromNodes( + public static Pipeline newPipelineFromNodes( List nodes, LifeCycleState state, ReplicationType replicationType, ReplicationFactor replicationFactor, String name) { Preconditions.checkNotNull(nodes); Preconditions.checkArgument(nodes.size() > 0); String leaderId = nodes.get(0).getUuidString(); - PipelineChannel - pipelineChannel = new PipelineChannel(leaderId, state, replicationType, + Pipeline + pipeline = new Pipeline(leaderId, state, replicationType, replicationFactor, name); for (DatanodeDetails node : nodes) { - pipelineChannel.addMember(node); + pipeline.addMember(node); } - return pipelineChannel; + return pipeline; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java index 70489b9253..ace8758234 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -68,12 +67,12 @@ public RatisManagerImpl(NodeManager nodeManager, } /** - * Allocates a new ratis PipelineChannel from the free nodes. + * Allocates a new ratis Pipeline from the free nodes. * * @param factor - One or Three * @return PipelineChannel. */ - public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) { + public Pipeline allocatePipeline(ReplicationFactor factor) { List newNodesList = new LinkedList<>(); List datanodes = nodeManager.getNodes(NodeState.HEALTHY); int count = getReplicationCount(factor); @@ -87,22 +86,20 @@ public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) { // once a datanode has been added to a pipeline, exclude it from // further allocations ratisMembers.addAll(newNodesList); - LOG.info("Allocating a new ratis pipelineChannel of size: {}", count); + LOG.info("Allocating a new ratis pipeline of size: {}", count); // Start all channel names with "Ratis", easy to grep the logs. String conduitName = PREFIX + UUID.randomUUID().toString().substring(PREFIX.length()); - PipelineChannel pipelineChannel = + Pipeline pipeline= PipelineSelector.newPipelineFromNodes(newNodesList, LifeCycleState.OPEN, ReplicationType.RATIS, factor, conduitName); - Pipeline pipeline = - new Pipeline(pipelineChannel); try (XceiverClientRatis client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) { client.createPipeline(pipeline.getPipelineName(), newNodesList); } catch (IOException e) { return null; } - return pipelineChannel; + return pipeline; } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java index 8268329351..e76027fb2b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdds.scm.pipelines.standalone; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -67,12 +67,12 @@ public StandaloneManagerImpl(NodeManager nodeManager, /** - * Allocates a new standalone PipelineChannel from the free nodes. + * Allocates a new standalone Pipeline from the free nodes. * * @param factor - One - * @return PipelineChannel. + * @return Pipeline. */ - public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) { + public Pipeline allocatePipeline(ReplicationFactor factor) { List newNodesList = new LinkedList<>(); List datanodes = nodeManager.getNodes(NodeState.HEALTHY); int count = getReplicationCount(factor); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java new file mode 100644 index 0000000000..36f10a93dc --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.server; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.TypedEvent; + +import com.google.protobuf.GeneratedMessage; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class is responsible for dispatching heartbeat from datanode to + * appropriate EventHandler at SCM. + */ +public final class SCMDatanodeHeartbeatDispatcher { + + private static final Logger LOG = + LoggerFactory.getLogger(SCMDatanodeHeartbeatDispatcher.class); + + private EventPublisher eventPublisher; + + public static final TypedEvent NODE_REPORT = + new TypedEvent<>(NodeReportFromDatanode.class); + + public static final TypedEvent CONTAINER_REPORT = + new TypedEvent(ContainerReportFromDatanode.class); + + public SCMDatanodeHeartbeatDispatcher(EventPublisher eventPublisher) { + this.eventPublisher = eventPublisher; + } + + + /** + * Dispatches heartbeat to registered event handlers. + * + * @param heartbeat heartbeat to be dispatched. + */ + public void dispatch(SCMHeartbeatRequestProto heartbeat) { + DatanodeDetails datanodeDetails = + DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails()); + + if (heartbeat.hasNodeReport()) { + eventPublisher.fireEvent(NODE_REPORT, + new NodeReportFromDatanode(datanodeDetails, + heartbeat.getNodeReport())); + } + + if (heartbeat.hasContainerReport()) { + eventPublisher.fireEvent(CONTAINER_REPORT, + new ContainerReportFromDatanode(datanodeDetails, + heartbeat.getContainerReport())); + + } + } + + /** + * Wrapper class for events with the datanode origin. + */ + public static class ReportFromDatanode { + + private final DatanodeDetails datanodeDetails; + + private final T report; + + public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) { + this.datanodeDetails = datanodeDetails; + this.report = report; + } + + public DatanodeDetails getDatanodeDetails() { + return datanodeDetails; + } + + public T getReport() { + return report; + } + } + + /** + * Node report event payload with origin. + */ + public static class NodeReportFromDatanode + extends ReportFromDatanode { + + public NodeReportFromDatanode(DatanodeDetails datanodeDetails, + NodeReportProto report) { + super(datanodeDetails, report); + } + } + + /** + * Container report event payload with origin. + */ + public static class ContainerReportFromDatanode + extends ReportFromDatanode { + + public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, + ContainerReportsProto report) { + super(datanodeDetails, report); + } + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 7d16161e59..56b07190a5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; + import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.hdds.protocol.proto @@ -62,6 +63,9 @@ import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto .Type.deleteBlocksCommand; +import static org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type + .replicateContainerCommand; import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto .Type.reregisterCommand; @@ -69,7 +73,7 @@ import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.server.report.SCMDatanodeHeartbeatDispatcher; +import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -77,6 +81,7 @@ import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.ozone.protocolPB @@ -117,14 +122,19 @@ public class SCMDatanodeProtocolServer implements private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher; public SCMDatanodeProtocolServer(final OzoneConfiguration conf, - StorageContainerManager scm) throws IOException { + StorageContainerManager scm, EventPublisher eventPublisher) + throws IOException { Preconditions.checkNotNull(scm, "SCM cannot be null"); + Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be null"); + this.scm = scm; final int handlerCount = conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, OZONE_SCM_HANDLER_COUNT_DEFAULT); + heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher(eventPublisher); + RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, ProtobufRpcEngine.class); BlockingService dnProtoPbService = @@ -150,10 +160,6 @@ public SCMDatanodeProtocolServer(final OzoneConfiguration conf, conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr, datanodeRpcServer); - heartbeatDispatcher = SCMDatanodeHeartbeatDispatcher.newBuilder(conf, scm) - .addHandlerFor(NodeReportProto.class) - .addHandlerFor(ContainerReportsProto.class) - .build(); } public void start() { @@ -293,6 +299,12 @@ public SCMCommandProto getCommandResponse(SCMCommand cmd) .setCloseContainerCommandProto( ((CloseContainerCommand) cmd).getProto()) .build(); + case replicateContainerCommand: + return builder + .setCommandType(replicateContainerCommand) + .setReplicateContainerCommandProto( + ((ReplicateContainerCommand)cmd).getProto()) + .build(); default: throw new IllegalArgumentException("Not implemented"); } @@ -308,7 +320,6 @@ public void stop() { try { LOG.info("Stopping the RPC server for DataNodes"); datanodeRpcServer.stop(); - heartbeatDispatcher.shutdown(); } catch (Exception ex) { LOG.error(" datanodeRpcServer stop failed.", ex); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 78f13cb47c..568a86ab4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; +import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; @@ -161,8 +162,12 @@ private StorageContainerManager(OzoneConfiguration conf) throws IOException { throw new SCMException("SCM not initialized.", ResultCodes .SCM_NOT_INITIALIZED); } + EventQueue eventQueue = new EventQueue(); + + SCMNodeManager nm = new SCMNodeManager(conf, scmStorage.getClusterID(), this); + scmNodeManager = nm; + eventQueue.addHandler(SCMNodeManager.DATANODE_COMMAND, nm); - scmNodeManager = new SCMNodeManager(conf, scmStorage.getClusterID(), this); scmContainerManager = new ContainerMapping(conf, getScmNodeManager(), cacheSize); @@ -176,7 +181,8 @@ private StorageContainerManager(OzoneConfiguration conf) throws IOException { scmAdminUsernames.add(scmUsername); } - datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this); + datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this, + eventQueue); blockProtocolServer = new SCMBlockProtocolServer(conf, this); clientProtocolServer = new SCMClientProtocolServer(conf, this); httpServer = new StorageContainerManagerHttpServer(conf); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java deleted file mode 100644 index 00ce94d7f5..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handler for Datanode Container Report. - */ -public class SCMDatanodeContainerReportHandler extends - SCMDatanodeReportHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeContainerReportHandler.class); - - @Override - public void processReport(DatanodeDetails datanodeDetails, - ContainerReportsProto report) throws IOException { - LOG.trace("Processing container report from {}.", datanodeDetails); - updateContainerReportMetrics(datanodeDetails, report); - getSCM().getScmContainerManager() - .processContainerReports(datanodeDetails, report); - } - - /** - * Updates container report metrics in SCM. - * - * @param datanodeDetails Datanode Information - * @param reports Container Reports - */ - private void updateContainerReportMetrics(DatanodeDetails datanodeDetails, - ContainerReportsProto reports) { - ContainerStat newStat = new ContainerStat(); - for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports - .getReportsList()) { - newStat.add(new ContainerStat(info.getSize(), info.getUsed(), - info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(), - info.getReadCount(), info.getWriteCount())); - } - // update container metrics - StorageContainerManager.getMetrics().setLastContainerStat(newStat); - - // Update container stat entry, this will trigger a removal operation if it - // exists in cache. - String datanodeUuid = datanodeDetails.getUuidString(); - getSCM().getContainerReportCache().put(datanodeUuid, newStat); - // update global view container metrics - StorageContainerManager.getMetrics().incrContainerStat(newStat); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index d50edff7c5..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutorService; - -/** - * This class is responsible for dispatching heartbeat from datanode to - * appropriate ReportHandlers at SCM. - * Only one handler per report is supported now, it's very easy to support - * multiple handlers for a report. - */ -public final class SCMDatanodeHeartbeatDispatcher { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeHeartbeatDispatcher.class); - - /** - * This stores Report to Handler mapping. - */ - private final Map, - SCMDatanodeReportHandler> handlers; - - /** - * Executor service which will be used for processing reports. - */ - private final ExecutorService executorService; - - /** - * Constructs SCMDatanodeHeartbeatDispatcher instance with the given - * handlers. - * - * @param handlers report to report handler mapping - */ - private SCMDatanodeHeartbeatDispatcher(Map, - SCMDatanodeReportHandler> handlers) { - this.handlers = handlers; - this.executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("SCMDatanode Heartbeat Dispatcher Thread - %d") - .build()); - } - - /** - * Dispatches heartbeat to registered handlers. - * - * @param heartbeat heartbeat to be dispatched. - */ - public void dispatch(SCMHeartbeatRequestProto heartbeat) { - DatanodeDetails datanodeDetails = DatanodeDetails - .getFromProtoBuf(heartbeat.getDatanodeDetails()); - if (heartbeat.hasNodeReport()) { - processReport(datanodeDetails, heartbeat.getNodeReport()); - } - if (heartbeat.hasContainerReport()) { - processReport(datanodeDetails, heartbeat.getContainerReport()); - } - } - - /** - * Invokes appropriate ReportHandler and submits the task to executor - * service for processing. - * - * @param datanodeDetails Datanode Information - * @param report Report to be processed - */ - @SuppressWarnings("unchecked") - private void processReport(DatanodeDetails datanodeDetails, - GeneratedMessage report) { - executorService.submit(() -> { - try { - SCMDatanodeReportHandler handler = handlers.get(report.getClass()); - handler.processReport(datanodeDetails, report); - } catch (IOException ex) { - LOG.error("Exception wile processing report {}, from {}", - report.getClass(), datanodeDetails, ex); - } - }); - } - - /** - * Shuts down SCMDatanodeHeartbeatDispatcher. - */ - public void shutdown() { - executorService.shutdown(); - } - - /** - * Returns a new Builder to construct {@link SCMDatanodeHeartbeatDispatcher}. - * - * @param conf Configuration to be used by SCMDatanodeHeartbeatDispatcher - * @param scm {@link StorageContainerManager} instance to be used by report - * handlers - * - * @return {@link SCMDatanodeHeartbeatDispatcher.Builder} instance - */ - public static Builder newBuilder(Configuration conf, - StorageContainerManager scm) { - return new Builder(conf, scm); - } - - /** - * Builder for SCMDatanodeHeartbeatDispatcher. - */ - public static class Builder { - - private final SCMDatanodeReportHandlerFactory reportHandlerFactory; - private final Map, - SCMDatanodeReportHandler> report2handler; - - /** - * Constructs SCMDatanodeHeartbeatDispatcher.Builder instance. - * - * @param conf Configuration object to be used. - * @param scm StorageContainerManager instance to be used for report - * handler initialization. - */ - private Builder(Configuration conf, StorageContainerManager scm) { - this.report2handler = new HashMap<>(); - this.reportHandlerFactory = - new SCMDatanodeReportHandlerFactory(conf, scm); - } - - /** - * Adds new report handler for the given report. - * - * @param report Report for which handler has to be added - * - * @return Builder - */ - public Builder addHandlerFor(Class report) { - report2handler.put(report, reportHandlerFactory.getHandlerFor(report)); - return this; - } - - /** - * Associates the given report handler for the given report. - * - * @param report Report to be associated with - * @param handler Handler to be used for the report - * - * @return Builder - */ - public Builder addHandler(Class report, - SCMDatanodeReportHandler handler) { - report2handler.put(report, handler); - return this; - } - - /** - * Builds and returns {@link SCMDatanodeHeartbeatDispatcher} instance. - * - * @return SCMDatanodeHeartbeatDispatcher - */ - public SCMDatanodeHeartbeatDispatcher build() { - return new SCMDatanodeHeartbeatDispatcher(report2handler); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java deleted file mode 100644 index fb89b02215..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handles Datanode Node Report. - */ -public class SCMDatanodeNodeReportHandler extends - SCMDatanodeReportHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeNodeReportHandler.class); - - @Override - public void processReport(DatanodeDetails datanodeDetails, - NodeReportProto report) throws IOException { - LOG.debug("Processing node report from {}.", datanodeDetails); - //TODO: add logic to process node report. - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java deleted file mode 100644 index d3386493c1..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; - -import java.io.IOException; - -/** - * Datanode Report handlers should implement this interface in order to get - * call back whenever the report is received from datanode. - * - * @param Type of report the handler is interested in. - */ -public abstract class SCMDatanodeReportHandler - implements Configurable { - - private Configuration config; - private StorageContainerManager scm; - - /** - * Initializes SCMDatanodeReportHandler and associates it with the given - * StorageContainerManager instance. - * - * @param storageContainerManager StorageContainerManager instance to be - * associated with. - */ - public void init(StorageContainerManager storageContainerManager) { - this.scm = storageContainerManager; - } - - /** - * Returns the associated StorageContainerManager instance. This will be - * used by the ReportHandler implementations. - * - * @return {@link StorageContainerManager} - */ - protected StorageContainerManager getSCM() { - return scm; - } - - @Override - public void setConf(Configuration conf) { - this.config = conf; - } - - @Override - public Configuration getConf() { - return config; - } - - /** - * Processes the report received from datanode. Each ReportHandler - * implementation is responsible for providing the logic to process the - * report it's interested in. - * - * @param datanodeDetails Datanode Information - * @param report Report to be processed - * - * @throws IOException In case of any exception - */ - abstract void processReport(DatanodeDetails datanodeDetails, T report) - throws IOException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java deleted file mode 100644 index e88495fc23..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.util.ReflectionUtils; - -import java.util.HashMap; -import java.util.Map; - - -/** - * Factory class to construct {@link SCMDatanodeReportHandler} given a report. - */ -public class SCMDatanodeReportHandlerFactory { - - private final Configuration conf; - private final StorageContainerManager scm; - private final Map, - Class>> - report2handler; - - /** - * Constructs {@link SCMDatanodeReportHandler} instance. - * - * @param conf Configuration to be passed to the - * {@link SCMDatanodeReportHandler} - */ - public SCMDatanodeReportHandlerFactory(Configuration conf, - StorageContainerManager scm) { - this.conf = conf; - this.scm = scm; - this.report2handler = new HashMap<>(); - - report2handler.put(NodeReportProto.class, - SCMDatanodeNodeReportHandler.class); - report2handler.put(ContainerReportsProto.class, - SCMDatanodeContainerReportHandler.class); - } - - /** - * Returns the SCMDatanodeReportHandler for the corresponding report. - * - * @param report report - * - * @return report handler - */ - public SCMDatanodeReportHandler getHandlerFor( - Class report) { - Class> - handlerClass = report2handler.get(report); - if (handlerClass == null) { - throw new RuntimeException("No handler found for report " + report); - } - SCMDatanodeReportHandler instance = - ReflectionUtils.newInstance(handlerClass, conf); - instance.init(scm); - return instance; - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java deleted file mode 100644 index fda3993096..0000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.server.report; -/** - * Handling of all the datanode reports in SCM which are received through - * heartbeat is done here. - * - * SCM Datanode Report Processing State Diagram: - * - * SCMDatanode SCMDatanodeHeartbeat SCMDatanodeReport - * ProtocolServer Dispatcher Handler - * | | | - * | | | - * | construct | | - * |----------------------->| | - * | | | - * | | register | - * | |<-----------------------| - * | | | - * +------------+------------------------+------------------------+--------+ - * | loop | | | | - * | | | | | - * | | | | | - * | heartbeat | | | | - * - +----------->| | | | - * | from | heartbeat | | | - * | Datanode |----------------------->| | | - * | | | report | | - * | | |----------------------->| | - * | | | | | - * | DN | | | | - * <-+------------| | | | - * | commands | | | | - * | | | | | - * +------------+------------------------+------------------------+--------+ - * | | | - * | | | - * | shutdown | | - * |----------------------->| | - * | | | - * | | | - * - - - - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index adb212a409..d06d568ae0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.scm.container.Mapping; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -357,11 +356,10 @@ public void testDeletedBlockTransactions() throws IOException { private void mockContainerInfo(Mapping mappingService, long containerID, DatanodeDetails dd) throws IOException { - PipelineChannel pipelineChannel = - new PipelineChannel("fake", LifeCycleState.OPEN, + Pipeline pipeline = + new Pipeline("fake", LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "fake"); - pipelineChannel.addMember(dd); - Pipeline pipeline = new Pipeline(pipelineChannel); + pipeline.addMember(dd); ContainerInfo.Builder builder = new ContainerInfo.Builder(); builder.setPipeline(pipeline); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 8c59462b40..80b5d6e182 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -19,7 +19,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -273,11 +272,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { return new SCMNodeMetric(nodeMetricMap.get(datanodeDetails.getUuid())); } - @Override - public NodePoolManager getNodePoolManager() { - return Mockito.mock(NodePoolManager.class); - } - /** * Used for testing. * diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java index 2b04d6b862..824a135194 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java @@ -19,6 +19,7 @@ import com.google.common.base.Supplier; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -29,7 +30,10 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; +import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; +import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; @@ -1165,4 +1169,39 @@ public void testScmNodeReportUpdate() throws IOException, assertEquals(expectedRemaining, foundRemaining); } } + + @Test + public void testHandlingSCMCommandEvent() { + OzoneConfiguration conf = getConf(); + conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, + 100, TimeUnit.MILLISECONDS); + + DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); + String dnId = datanodeDetails.getUuidString(); + String storagePath = testDir.getAbsolutePath() + "/" + dnId; + List reports = + TestUtils.createStorageReport(100, 10, 90, + storagePath, null, dnId, 1); + + EventQueue eq = new EventQueue(); + try (SCMNodeManager nodemanager = createNodeManager(conf)) { + eq.addHandler(SCMNodeManager.DATANODE_COMMAND, nodemanager); + + nodemanager + .register(datanodeDetails, TestUtils.createNodeReport(reports)); + eq.fireEvent(SCMNodeManager.DATANODE_COMMAND, + new CommandForDatanode(datanodeDetails.getUuid(), + new CloseContainerCommand(1L, ReplicationType.STAND_ALONE))); + + eq.processAll(1000L); + List command = + nodemanager.sendHeartbeat(datanodeDetails, null); + Assert.assertEquals(1, command.size()); + Assert + .assertEquals(command.get(0).getClass(), CloseContainerCommand.class); + } catch (IOException e) { + e.printStackTrace(); + } + } + } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java deleted file mode 100644 index 8f412dedda..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.commons.collections.ListUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.test.PathUtils; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test for SCM node pool manager. - */ -public class TestSCMNodePoolManager { - private static final Logger LOG = - LoggerFactory.getLogger(TestSCMNodePoolManager.class); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private final File testDir = PathUtils.getTestDir( - TestSCMNodePoolManager.class); - - SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf) - throws IOException { - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - return new SCMNodePoolManager(conf); - } - - /** - * Test default node pool. - * - * @throws IOException - */ - @Test - public void testDefaultNodePool() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - try { - final String defaultPool = "DefaultPool"; - NodePoolManager npMgr = createNodePoolManager(conf); - - final int nodeCount = 4; - final List nodes = TestUtils - .getListOfDatanodeDetails(nodeCount); - assertEquals(0, npMgr.getNodePools().size()); - for (DatanodeDetails node: nodes) { - npMgr.addNode(defaultPool, node); - } - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - - DatanodeDetails nodeRemoved = nodes.remove(2); - npMgr.removeNode(defaultPool, nodeRemoved); - List nodesAfterRemove = npMgr.getNodes(defaultPool); - assertTwoDatanodeListsEqual(nodes, nodesAfterRemove); - - List nonExistSet = npMgr.getNodes("NonExistSet"); - assertEquals(0, nonExistSet.size()); - } finally { - FileUtil.fullyDelete(testDir); - } - } - - - /** - * Test default node pool reload. - * - * @throws IOException - */ - @Test - public void testDefaultNodePoolReload() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - final String defaultPool = "DefaultPool"; - final int nodeCount = 4; - final List nodes = TestUtils - .getListOfDatanodeDetails(nodeCount); - - try { - try { - SCMNodePoolManager npMgr = createNodePoolManager(conf); - assertEquals(0, npMgr.getNodePools().size()); - for (DatanodeDetails node : nodes) { - npMgr.addNode(defaultPool, node); - } - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - npMgr.close(); - } finally { - LOG.info("testDefaultNodePoolReload: Finish adding nodes to pool" + - " and close."); - } - - // try reload with a new NodePoolManager instance - try { - SCMNodePoolManager npMgr = createNodePoolManager(conf); - List nodesRetrieved = npMgr.getNodes(defaultPool); - assertEquals(nodeCount, nodesRetrieved.size()); - assertTwoDatanodeListsEqual(nodes, nodesRetrieved); - } finally { - LOG.info("testDefaultNodePoolReload: Finish reloading node pool."); - } - } finally { - FileUtil.fullyDelete(testDir); - } - } - - /** - * Compare and verify that two datanode lists are equal. - * @param list1 - datanode list 1. - * @param list2 - datanode list 2. - */ - private void assertTwoDatanodeListsEqual(List list1, - List list2) { - assertEquals(list1.size(), list2.size()); - Collections.sort(list1); - Collections.sort(list2); - assertTrue(ListUtils.isEqualList(list1, list2)); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java new file mode 100644 index 0000000000..326a34b792 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.server; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher + .ContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher + .NodeReportFromDatanode; +import org.apache.hadoop.hdds.server.events.Event; +import org.apache.hadoop.hdds.server.events.EventPublisher; + +import org.junit.Assert; +import org.junit.Test; + +/** + * This class tests the behavior of SCMDatanodeHeartbeatDispatcher. + */ +public class TestSCMDatanodeHeartbeatDispatcher { + + + @Test + public void testNodeReportDispatcher() throws IOException { + + Configuration conf = new OzoneConfiguration(); + + AtomicInteger eventReceived = new AtomicInteger(); + + NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); + + SCMDatanodeHeartbeatDispatcher dispatcher = + new SCMDatanodeHeartbeatDispatcher(new EventPublisher() { + @Override + public > void fireEvent( + EVENT_TYPE event, PAYLOAD payload) { + Assert.assertEquals(event, + SCMDatanodeHeartbeatDispatcher.NODE_REPORT); + eventReceived.incrementAndGet(); + Assert.assertEquals(nodeReport, ((NodeReportFromDatanode)payload).getReport()); + + } + }); + + DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); + + SCMHeartbeatRequestProto heartbeat = + SCMHeartbeatRequestProto.newBuilder() + .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) + .setNodeReport(nodeReport) + .build(); + dispatcher.dispatch(heartbeat); + Assert.assertEquals(1, eventReceived.get()); + + + } + + @Test + public void testContainerReportDispatcher() throws IOException { + + Configuration conf = new OzoneConfiguration(); + + AtomicInteger eventReceived = new AtomicInteger(); + + ContainerReportsProto containerReport = + ContainerReportsProto.getDefaultInstance(); + + SCMDatanodeHeartbeatDispatcher dispatcher = + new SCMDatanodeHeartbeatDispatcher(new EventPublisher() { + @Override + public > void fireEvent( + EVENT_TYPE event, PAYLOAD payload) { + Assert.assertEquals(event, + SCMDatanodeHeartbeatDispatcher.CONTAINER_REPORT); + Assert.assertEquals(containerReport, ((ContainerReportFromDatanode)payload).getReport()); + eventReceived.incrementAndGet(); + } + }); + + DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); + + SCMHeartbeatRequestProto heartbeat = + SCMHeartbeatRequestProto.newBuilder() + .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) + .setContainerReport(containerReport) + .build(); + dispatcher.dispatch(heartbeat); + Assert.assertEquals(1, eventReceived.get()); + + + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java deleted file mode 100644 index 776ae88754..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test cases to verify SCMDatanodeContainerReportHandler's behavior. - */ -public class TestSCMDatanodeContainerReportHandler { - - //TODO: add test cases to verify SCMDatanodeContainerReportHandler. - - @Test - public void dummyTest() { - Assert.assertTrue(true); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index 5d086471c1..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * This class tests the behavior of SCMDatanodeHeartbeatDispatcher. - */ -public class TestSCMDatanodeHeartbeatDispatcher { - - @Test - public void testSCMDatanodeHeartbeatDispatcherBuilder() { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandlerFor(NodeReportProto.class) - .addHandlerFor(ContainerReportsProto.class) - .build(); - Assert.assertNotNull(dispatcher); - } - - @Test - public void testNodeReportDispatcher() throws IOException { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeNodeReportHandler nodeReportHandler = - Mockito.mock(SCMDatanodeNodeReportHandler.class); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandler(NodeReportProto.class, nodeReportHandler) - .build(); - - DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); - NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setNodeReport(nodeReport) - .build(); - dispatcher.dispatch(heartbeat); - verify(nodeReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), eq(nodeReport)); - } - - @Test - public void testContainerReportDispatcher() throws IOException { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeContainerReportHandler containerReportHandler = - Mockito.mock(SCMDatanodeContainerReportHandler.class); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandler(ContainerReportsProto.class, containerReportHandler) - .build(); - - DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); - ContainerReportsProto containerReport = - ContainerReportsProto.getDefaultInstance(); - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setContainerReport(containerReport) - .build(); - dispatcher.dispatch(heartbeat); - verify(containerReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), - any(ContainerReportsProto.class)); - } - - @Test - public void testNodeAndContainerReportDispatcher() throws IOException { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeNodeReportHandler nodeReportHandler = - Mockito.mock(SCMDatanodeNodeReportHandler.class); - SCMDatanodeContainerReportHandler containerReportHandler = - Mockito.mock(SCMDatanodeContainerReportHandler.class); - SCMDatanodeHeartbeatDispatcher dispatcher = - SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null) - .addHandler(NodeReportProto.class, nodeReportHandler) - .addHandler(ContainerReportsProto.class, containerReportHandler) - .build(); - - DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); - NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); - ContainerReportsProto containerReport = - ContainerReportsProto.getDefaultInstance(); - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setNodeReport(nodeReport) - .setContainerReport(containerReport) - .build(); - dispatcher.dispatch(heartbeat); - verify(nodeReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), any(NodeReportProto.class)); - verify(containerReportHandler, - times(1)) - .processReport(any(DatanodeDetails.class), - any(ContainerReportsProto.class)); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java deleted file mode 100644 index 30a753c024..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test cases to verify TestSCMDatanodeNodeReportHandler's behavior. - */ -public class TestSCMDatanodeNodeReportHandler { - - - //TODO: add test cases to verify SCMDatanodeNodeReportHandler. - - @Test - public void dummyTest() { - Assert.assertTrue(true); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java deleted file mode 100644 index 4b918f76c7..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test cases to verify the functionality of SCMDatanodeReportHandlerFactory. - */ -public class TestSCMDatanodeReportHandlerFactory { - - @Test - public void testNodeReportHandlerConstruction() { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeReportHandlerFactory factory = - new SCMDatanodeReportHandlerFactory(conf, null); - Assert.assertTrue(factory.getHandlerFor(NodeReportProto.class) - instanceof SCMDatanodeNodeReportHandler); - } - - @Test - public void testContainerReporttHandlerConstruction() { - Configuration conf = new OzoneConfiguration(); - SCMDatanodeReportHandlerFactory factory = - new SCMDatanodeReportHandlerFactory(conf, null); - Assert.assertTrue(factory.getHandlerFor(ContainerReportsProto.class) - instanceof SCMDatanodeContainerReportHandler); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java deleted file mode 100644 index 4a3f59f016..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server.report; -/** - * Contains test-cases to test Datanode report handlers in SCM. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 072d821247..1a4dcd7ad2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.CommandQueue; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -201,10 +200,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails dd) { return null; } - @Override - public NodePoolManager getNodePoolManager() { - return Mockito.mock(NodePoolManager.class); - } /** * Wait for the heartbeat is processed by NodeManager. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java deleted file mode 100644 index ffcd752e84..0000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodePoolManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * Pool Manager replication mock. - */ -public class ReplicationNodePoolManagerMock implements NodePoolManager { - - private final Map nodeMemberShip; - - /** - * A node pool manager for testing. - */ - public ReplicationNodePoolManagerMock() { - nodeMemberShip = new HashMap<>(); - } - - /** - * Add a node to a node pool. - * - * @param pool - name of the node pool. - * @param node - data node. - */ - @Override - public void addNode(String pool, DatanodeDetails node) { - nodeMemberShip.put(node, pool); - } - - /** - * Remove a node from a node pool. - * - * @param pool - name of the node pool. - * @param node - data node. - * @throws SCMException - */ - @Override - public void removeNode(String pool, DatanodeDetails node) - throws SCMException { - nodeMemberShip.remove(node); - - } - - /** - * Get a list of known node pools. - * - * @return a list of known node pool names or an empty list if not node pool - * is defined. - */ - @Override - public List getNodePools() { - Set poolSet = new HashSet<>(); - for (Map.Entry entry : nodeMemberShip.entrySet()) { - poolSet.add(entry.getValue()); - } - return new ArrayList<>(poolSet); - - } - - /** - * Get all nodes of a node pool given the name of the node pool. - * - * @param pool - name of the node pool. - * @return a list of datanode ids or an empty list if the node pool was not - * found. - */ - @Override - public List getNodes(String pool) { - Set datanodeSet = new HashSet<>(); - for (Map.Entry entry : nodeMemberShip.entrySet()) { - if (entry.getValue().equals(pool)) { - datanodeSet.add(entry.getKey()); - } - } - return new ArrayList<>(datanodeSet); - } - - /** - * Get the node pool name if the node has been added to a node pool. - * - * @param datanodeDetails DatanodeDetails. - * @return node pool name if it has been assigned. null if the node has not - * been assigned to any node pool yet. - */ - @Override - public String getNodePool(DatanodeDetails datanodeDetails) { - return nodeMemberShip.get(datanodeDetails); - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java index ad7b0569f4..de9bbdab7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java @@ -19,8 +19,8 @@ import java.util.Arrays; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java new file mode 100644 index 0000000000..e9959c192d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemMultipartUploader; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.MultipartUploaderFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; + +/** + * Support for HDFS multipart uploads, built on + * {@link FileSystem#concat(Path, Path[])}. + */ +public class DFSMultipartUploaderFactory extends MultipartUploaderFactory { + protected MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) { + if (fs.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) { + return new FileSystemMultipartUploader(fs); + } + return null; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java index fe39df6305..5dfcc736b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java index e83c8ae92b..a8c73a4220 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java @@ -31,7 +31,7 @@ import com.google.common.io.ByteArrayDataOutput; import com.google.common.io.ByteStreams; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java index dc77a47a94..f873b84c8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.HadoopIllegalArgumentException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java index d8a7de2b7b..e80f12aa0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java @@ -19,8 +19,8 @@ import java.util.Date; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index daa77be118..6c9f27796a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -22,8 +22,8 @@ import javax.annotation.Nullable; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.InvalidRequestException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java index f1441b5727..0b851caff8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.crypto.CipherSuite; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java index 39489b479c..3559ab97d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.protocol; import com.google.common.base.Preconditions; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java index a5b95cb217..c8a2722621 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.protocol; import com.google.common.base.Preconditions; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import java.io.Serializable; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java index 7939662ee3..8413c84df9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index 6f8a8fa8f2..2262003112 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -29,7 +29,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.net.DomainPeer; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index c2f0350bc3..9c2d2e0ecb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -34,7 +34,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.collections.map.LinkedMap; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java index fb0e06f4ac..b9fcadae52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java @@ -25,8 +25,8 @@ import java.util.NoSuchElementException; import java.util.Random; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.io.nativeio.NativeIO; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 673acd6fa7..ec60a186c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -56,8 +56,6 @@ import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import org.apache.commons.io.IOUtils; import org.apache.commons.io.input.BoundedInputStream; import org.apache.hadoop.conf.Configuration; @@ -121,6 +119,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; @@ -172,8 +171,6 @@ public class WebHdfsFileSystem extends FileSystem private boolean disallowFallbackToInsecureCluster; private String restCsrfCustomHeader; private Set restCsrfMethodsToIgnore; - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); private DFSOpsCountStatistics storageStatistics; @@ -476,7 +473,7 @@ private Path makeAbsolute(Path f) { + "\" (parsed=\"" + parsed + "\")"); } } - return READER.readValue(in); + return JsonSerialization.mapReader().readValue(in); } finally { in.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java index c6ebdd67eb..3e3fbfbd91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hdfs.web.oauth2; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import com.squareup.okhttp.OkHttpClient; import com.squareup.okhttp.Request; import com.squareup.okhttp.RequestBody; @@ -28,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.Timer; import org.apache.http.HttpStatus; @@ -55,8 +54,6 @@ @InterfaceStability.Evolving public class ConfRefreshTokenBasedAccessTokenProvider extends AccessTokenProvider { - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); public static final String OAUTH_REFRESH_TOKEN_KEY = "dfs.webhdfs.oauth2.refresh.token"; @@ -129,7 +126,8 @@ void refresh() throws IOException { + responseBody.code() + ", text = " + responseBody.toString()); } - Map response = READER.readValue(responseBody.body().string()); + Map response = JsonSerialization.mapReader().readValue( + responseBody.body().string()); String newExpiresIn = response.get(EXPIRES_IN).toString(); accessTokenTimer.setExpiresIn(newExpiresIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java index 5c629e0165..bfd7055990 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hdfs.web.oauth2; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import com.squareup.okhttp.OkHttpClient; import com.squareup.okhttp.Request; import com.squareup.okhttp.RequestBody; @@ -28,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.Timer; import org.apache.http.HttpStatus; @@ -55,8 +54,6 @@ @InterfaceStability.Evolving public abstract class CredentialBasedAccessTokenProvider extends AccessTokenProvider { - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); public static final String OAUTH_CREDENTIAL_KEY = "dfs.webhdfs.oauth2.credential"; @@ -123,7 +120,8 @@ void refresh() throws IOException { + responseBody.code() + ", text = " + responseBody.toString()); } - Map response = READER.readValue(responseBody.body().string()); + Map response = JsonSerialization.mapReader().readValue( + responseBody.body().string()); String newExpiresIn = response.get(EXPIRES_IN).toString(); timer.setExpiresIn(newExpiresIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory new file mode 100644 index 0000000000..b153fd9924 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.hadoop.hdfs.DFSMultipartUploaderFactory diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 8913f1a5ea..5c33ef6f6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -28,7 +28,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index e9525e21b5..42d2c008a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -123,11 +123,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io compile - - commons-lang - commons-lang - compile - commons-logging commons-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java index 458fec203f..868476a826 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java @@ -24,7 +24,7 @@ import java.util.List; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java index 0959eaa34a..cf78be3190 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; /** * Base class for objects that are unique to a namespace. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java index 6b288b3555..60dbcdc10e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java @@ -30,7 +30,7 @@ import java.util.Collections; import java.util.List; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java index 005882ebdf..49cdf10364 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java @@ -26,7 +26,7 @@ import java.util.SortedMap; import java.util.TreeMap; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.HdfsConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index eaf9361e9f..fcd5ae1940 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -113,11 +113,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io compile - - commons-lang - commons-lang - compile - commons-logging commons-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index bc8e81f976..dde7eb79c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -581,7 +581,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_BALANCER_BLOCK_MOVE_TIMEOUT = "dfs.balancer.block-move.timeout"; public static final int DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT = 0; public static final String DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.balancer.max-no-move-interval"; - public static final int DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute + public static final int DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute + public static final String DFS_BALANCER_MAX_ITERATION_TIME_KEY = "dfs.balancer.max-iteration-time"; + public static final long DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT = 20 * 60 * 1000L; // 20 mins public static final String DFS_MOVER_MOVEDWINWIDTH_KEY = "dfs.mover.movedWinWidth"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java index 89cf641a02..f8987a367b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java @@ -21,7 +21,7 @@ import java.util.Date; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java index e96fd4da60..64ac11ca23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java @@ -31,7 +31,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.commons.lang.StringEscapeUtils; +import org.apache.commons.lang3.StringEscapeUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -157,7 +157,7 @@ private boolean checkStorageInfoOrSendError(JNStorage storage, int myNsId = storage.getNamespaceID(); String myClusterId = storage.getClusterID(); - String theirStorageInfoString = StringEscapeUtils.escapeHtml( + String theirStorageInfoString = StringEscapeUtils.escapeHtml4( request.getParameter(STORAGEINFO_PARAM)); if (theirStorageInfoString != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 452664a947..8f25d260b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -31,7 +31,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang.math.LongRange; +import org.apache.commons.lang3.Range; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -842,8 +842,8 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, // Paranoid sanity check: if the new log is shorter than the log we // currently have, we should not end up discarding any transactions // which are already Committed. - if (txnRange(currentSegment).containsLong(committedTxnId.get()) && - !txnRange(segment).containsLong(committedTxnId.get())) { + if (txnRange(currentSegment).contains(committedTxnId.get()) && + !txnRange(segment).contains(committedTxnId.get())) { throw new AssertionError( "Cannot replace segment " + TextFormat.shortDebugString(currentSegment) + @@ -862,7 +862,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, // If we're shortening the log, update our highest txid // used for lag metrics. - if (txnRange(currentSegment).containsLong(highestWrittenTxId)) { + if (txnRange(currentSegment).contains(highestWrittenTxId)) { updateHighestWrittenTxId(segment.getEndTxId()); } } @@ -906,10 +906,10 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, TextFormat.shortDebugString(newData) + " ; journal id: " + journalId); } - private LongRange txnRange(SegmentStateProto seg) { + private Range txnRange(SegmentStateProto seg) { Preconditions.checkArgument(seg.hasEndTxId(), "invalid segment: %s ; journal id: %s", seg, journalId); - return new LongRange(seg.getStartTxId(), seg.getEndTxId()); + return Range.between(seg.getStartTxId(), seg.getEndTxId()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 13d584644d..426c7ab074 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -289,13 +289,17 @@ static int getInt(Configuration conf, String key, int defaultValue) { final int maxNoMoveInterval = conf.getInt( DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT); + final long maxIterationTime = conf.getLong( + DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, + DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT); this.nnc = theblockpool; this.dispatcher = new Dispatcher(theblockpool, p.getIncludedNodes(), p.getExcludedNodes(), movedWinWidth, moverThreads, dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize, - getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval, conf); + getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval, + maxIterationTime, conf); this.threshold = p.getThreshold(); this.policy = p.getBalancingPolicy(); this.sourceNodes = p.getSourceNodes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index 349ced13f3..060c013e37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -138,6 +138,8 @@ public class Dispatcher { private final boolean connectToDnViaHostname; private BlockPlacementPolicies placementPolicies; + private long maxIterationTime; + static class Allocator { private final int max; private int count = 0; @@ -346,13 +348,19 @@ private boolean addTo(StorageGroup g) { /** Dispatch the move to the proxy source & wait for the response. */ private void dispatch() { - LOG.info("Start moving " + this); - assert !(reportedBlock instanceof DBlockStriped); - Socket sock = new Socket(); DataOutputStream out = null; DataInputStream in = null; try { + if (source.isIterationOver()){ + LOG.info("Cancel moving " + this + + " as iteration is already cancelled due to" + + " dfs.balancer.max-iteration-time is passed."); + throw new IOException("Block move cancelled."); + } + LOG.info("Start moving " + this); + assert !(reportedBlock instanceof DBlockStriped); + sock.connect( NetUtils.createSocketAddr(target.getDatanodeInfo(). getXferAddr(Dispatcher.this.connectToDnViaHostname)), @@ -760,7 +768,10 @@ private Source(StorageType storageType, long maxSize2Move, DDatanode dn) { * Check if the iteration is over */ public boolean isIterationOver() { - return (Time.monotonicNow()-startTime > MAX_ITERATION_TIME); + if (maxIterationTime < 0){ + return false; + } + return (Time.monotonicNow()-startTime > maxIterationTime); } /** Add a task */ @@ -908,8 +919,6 @@ private boolean shouldFetchMoreBlocks() { return blocksToReceive > 0; } - private static final long MAX_ITERATION_TIME = 20 * 60 * 1000L; // 20 mins - /** * This method iteratively does the following: it first selects a block to * move, then sends a request to the proxy source to start the block move @@ -990,7 +999,7 @@ private void dispatchBlocks(long delay) { } if (isIterationOver()) { - LOG.info("The maximum iteration time (" + MAX_ITERATION_TIME/1000 + LOG.info("The maximum iteration time (" + maxIterationTime/1000 + " seconds) has been reached. Stopping " + this); } } @@ -1013,14 +1022,14 @@ public Dispatcher(NameNodeConnector nnc, Set includedNodes, int maxNoMoveInterval, Configuration conf) { this(nnc, includedNodes, excludedNodes, movedWinWidth, moverThreads, dispatcherThreads, maxConcurrentMovesPerNode, - 0L, 0L, 0, maxNoMoveInterval, conf); + 0L, 0L, 0, maxNoMoveInterval, -1, conf); } Dispatcher(NameNodeConnector nnc, Set includedNodes, Set excludedNodes, long movedWinWidth, int moverThreads, int dispatcherThreads, int maxConcurrentMovesPerNode, - long getBlocksSize, long getBlocksMinBlockSize, - int blockMoveTimeout, int maxNoMoveInterval, Configuration conf) { + long getBlocksSize, long getBlocksMinBlockSize, int blockMoveTimeout, + int maxNoMoveInterval, long maxIterationTime, Configuration conf) { this.nnc = nnc; this.excludedNodes = excludedNodes; this.includedNodes = includedNodes; @@ -1047,6 +1056,7 @@ public Dispatcher(NameNodeConnector nnc, Set includedNodes, HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null); + this.maxIterationTime = maxIterationTime; } public DistributedFileSystem getDistributedFileSystem() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 76a77816d5..72ea1c0692 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -4576,7 +4576,7 @@ private void scanAndCompactStorages() throws InterruptedException { datanodesAndStorages.add(node.getDatanodeUuid()); datanodesAndStorages.add(storage.getStorageID()); } - LOG.info("StorageInfo TreeSet fill ratio {} : {}{}", + LOG.debug("StorageInfo TreeSet fill ratio {} : {}{}", storage.getStorageID(), ratio, (ratio < storageInfoDefragmentRatio) ? " (queued for defragmentation)" : ""); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index ab9743cffc..39665e3e95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -38,7 +38,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -165,7 +165,7 @@ static class ScanInfoPerBlockPool extends /** * Merges {@code that} ScanInfoPerBlockPool into this one * - * @param the ScanInfoPerBlockPool to merge + * @param that ScanInfoPerBlockPool to merge */ public void addAll(ScanInfoPerBlockPool that) { if (that == null) return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index f70d4afe29..767b150e1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -43,7 +43,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.time.DurationFormatUtils; +import org.apache.commons.lang3.time.DurationFormatUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ChecksumException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java index 8eacdecf7b..968a5a77f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java @@ -26,8 +26,8 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java index b765885e0f..90cc0c4800 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java @@ -22,8 +22,8 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java index 58ef5ce51a..5f4e0f716f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java @@ -24,8 +24,8 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index d06cd1cdef..5604a218d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -34,8 +34,8 @@ import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 7160b861f7..769c13757b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.XAttr; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a8c1926051..f94f6d072b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.commons.lang.StringEscapeUtils.escapeJava; +import static org.apache.commons.lang3.StringEscapeUtils.escapeJava; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT; @@ -1827,7 +1827,7 @@ public BatchedListEntries getFilesBlockingDecom(long prevId, INodeFile inodeFile = ucFile.asFile(); String fullPathName = inodeFile.getFullPathName(); - if (org.apache.commons.lang.StringUtils.isEmpty(path) + if (org.apache.commons.lang3.StringUtils.isEmpty(path) || fullPathName.startsWith(path)) { openFileEntries.add(new OpenFileEntry(inodeFile.getId(), inodeFile.getFullPathName(), @@ -2383,7 +2383,7 @@ private HdfsFileStatus startFileInt(String src, boolean shouldReplicate = flag.contains(CreateFlag.SHOULD_REPLICATE); if (shouldReplicate && - (!org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName))) { + (!org.apache.commons.lang3.StringUtils.isEmpty(ecPolicyName))) { throw new HadoopIllegalArgumentException("SHOULD_REPLICATE flag and " + "ecPolicyName are exclusive parameters. Set both is not allowed!"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index 900f8a2291..5992e54124 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -107,6 +107,8 @@ public Long initialValue() { private static final String WRITE_LOCK_METRIC_PREFIX = "FSNWriteLock"; private static final String LOCK_METRIC_SUFFIX = "Nanos"; + private static final String OVERALL_METRIC_NAME = "Overall"; + FSNamesystemLock(Configuration conf, MutableRatesWithAggregation detailedHoldTimeMetrics) { this(conf, detailedHoldTimeMetrics, new Timer()); @@ -320,12 +322,17 @@ public int getQueueLength() { */ private void addMetric(String operationName, long value, boolean isWrite) { if (metricsEnabled) { - String metricName = - (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) + - org.apache.commons.lang.StringUtils.capitalize(operationName) + - LOCK_METRIC_SUFFIX; - detailedHoldTimeMetrics.add(metricName, value); + String opMetric = getMetricName(operationName, isWrite); + detailedHoldTimeMetrics.add(opMetric, value); + + String overallMetric = getMetricName(OVERALL_METRIC_NAME, isWrite); + detailedHoldTimeMetrics.add(overallMetric, value); } } + private static String getMetricName(String operationName, boolean isWrite) { + return (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) + + org.apache.commons.lang3.StringUtils.capitalize(operationName) + + LOCK_METRIC_SUFFIX; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index 2f9bc370da..8392463d94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java index 9b23e09e49..0baf99d994 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java @@ -18,9 +18,9 @@ import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.lang.builder.CompareToBuilder; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.CompareToBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java index 2719c8857e..4d61d0f95b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.top.metrics; import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index d8cbfc6b2e..9781ea14dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -22,7 +22,7 @@ import java.util.LinkedList; import java.util.List; -import org.apache.commons.lang.WordUtils; +import org.apache.commons.lang3.text.WordUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index bec44a99e9..280a2d775c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.util; import com.google.common.base.Preconditions; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import java.util.Arrays; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index b55421c162..146ae6c9c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3540,6 +3540,16 @@ + + dfs.balancer.max-iteration-time + 1200000 + + Maximum amount of time while an iteration can be run by the Balancer. After + this time the Balancer will stop the iteration, and reevaluate the work + needs to be done to Balance the cluster. The default value is 20 minutes. + + + dfs.block.invalidate.limit 1000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index 9cd46c191d..417d31ba52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -35,8 +35,8 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.collections.map.LinkedMap; -import org.apache.commons.lang.SystemUtils; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.SystemUtils; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 32d960ad6f..7027f3bc6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -26,7 +26,7 @@ import java.util.regex.Pattern; import com.google.common.collect.Ordering; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java new file mode 100644 index 0000000000..96c50938b3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestName; + +import java.io.IOException; + +public class TestHDFSMultipartUploader + extends AbstractSystemMultipartUploaderTest { + + private static MiniDFSCluster cluster; + private Path tmp; + + @Rule + public TestName name = new TestName(); + + @BeforeClass + public static void init() throws IOException { + HdfsConfiguration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf, + GenericTestUtils.getRandomizedTestDir()) + .numDataNodes(1) + .build(); + cluster.waitClusterUp(); + } + + @AfterClass + public static void cleanup() throws IOException { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Before + public void setup() throws IOException { + tmp = new Path(cluster.getFileSystem().getWorkingDirectory(), + name.getMethodName()); + cluster.getFileSystem().mkdirs(tmp); + } + + @Override + public FileSystem getFS() throws IOException { + return cluster.getFileSystem(); + } + + @Override + public Path getBaseTestPath() { + return tmp; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java index 72fc6e6274..7544835c7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 63199f31dd..e6a2a00252 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -81,7 +81,6 @@ import com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.UnhandledException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -2278,7 +2277,8 @@ public Boolean get() { ", current value = " + currentValue); return currentValue == expectedValue; } catch (Exception e) { - throw new UnhandledException("Test failed due to unexpected exception", e); + throw new RuntimeException( + "Test failed due to unexpected exception", e); } } }, 1000, 60000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index c352dc99a2..b19bdeab57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -37,7 +37,7 @@ import com.google.common.base.Supplier; import com.google.common.collect.Lists; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.log4j.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index c0a595bcb7..42b4257d71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -38,7 +38,7 @@ import com.google.common.base.Supplier; import com.google.common.collect.Lists; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java index cc456b244f..7aa9f2362d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java @@ -28,7 +28,7 @@ import java.util.ArrayList; import java.util.Arrays; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java index e2426907ca..3463f57379 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java @@ -28,7 +28,7 @@ import java.util.Set; import com.google.common.collect.Sets; -import org.apache.commons.lang.ClassUtils; +import org.apache.commons.lang3.ClassUtils; import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer; import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer; import org.apache.hadoop.hdfs.server.datanode.DataNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index 0b6bc6adfd..c87a6d17e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Supplier; -import org.apache.commons.lang.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java index 7c4763c13f..db97c02e09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java @@ -107,6 +107,8 @@ public class TestSecureEncryptionZoneWithKMS { // MiniKMS private static MiniKMS miniKMS; private final String testKey = "test_key"; + private static boolean testKeyCreated = false; + private static final long AUTH_TOKEN_VALIDITY = 1; // MiniDFS private MiniDFSCluster cluster; @@ -128,7 +130,7 @@ public static File getTestDir() throws Exception { } @Rule - public Timeout timeout = new Timeout(30000); + public Timeout timeout = new Timeout(120000); @BeforeClass public static void init() throws Exception { @@ -215,6 +217,9 @@ public static void init() throws Exception { "HTTP/localhost"); kmsConf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); kmsConf.set("hadoop.kms.acl.GENERATE_EEK", "hdfs"); + // set kms auth token expiration low for testCreateZoneAfterAuthTokenExpiry + kmsConf.setLong("hadoop.kms.authentication.token.validity", + AUTH_TOKEN_VALIDITY); Writer writer = new FileWriter(kmsFile); kmsConf.writeXml(writer); @@ -260,7 +265,10 @@ public void setup() throws Exception { cluster.waitActive(); // Create a test key - DFSTestUtil.createKey(testKey, cluster, conf); + if (!testKeyCreated) { + DFSTestUtil.createKey(testKey, cluster, conf); + testKeyCreated = true; + } } @After @@ -307,4 +315,26 @@ public Void run() throws IOException { } }); } + + @Test + public void testCreateZoneAfterAuthTokenExpiry() throws Exception { + final UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(hdfsPrincipal, keytab); + LOG.info("Created ugi: {} ", ugi); + + ugi.doAs((PrivilegedExceptionAction) () -> { + final Path zone = new Path("/expire1"); + fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true); + dfsAdmin.createEncryptionZone(zone, testKey, NO_TRASH); + + final Path zone1 = new Path("/expire2"); + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + final long sleepInterval = (AUTH_TOKEN_VALIDITY + 1) * 1000; + LOG.info("Sleeping {} seconds to wait for kms auth token expiration", + sleepInterval); + Thread.sleep(sleepInterval); + dfsAdmin.createEncryptionZone(zone1, testKey, NO_TRASH); + return null; + }); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java index d03d095399..63ce45b72d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java @@ -35,7 +35,7 @@ import java.io.File; import java.util.Properties; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.http.HttpConfig; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 9579b82c09..35ebe781ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -73,7 +73,7 @@ import java.util.Set; import java.util.concurrent.TimeoutException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -1580,6 +1580,85 @@ public void testBalancerCliWithIncludeListWithPortsInAFile() throws Exception { CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true); } + + @Test(timeout = 100000) + public void testMaxIterationTime() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + int blockSize = 10*1024*1024; // 10MB block size + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); + // limit the worker thread count of Balancer to have only 1 queue per DN + conf.setInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY, 1); + // limit the bandwitdh to 1 packet per sec to emulate slow block moves + conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, + 64 * 1024); + // set client socket timeout to have an IN_PROGRESS notification back from + // the DataNode about the copy in every second. + conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2000L); + // set max iteration time to 2 seconds to timeout before moving any block + conf.setLong(DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, 2000L); + // setup the cluster + final long capacity = 10L * blockSize; + final long[] dnCapacities = new long[] {capacity, capacity}; + final short rep = 1; + final long seed = 0xFAFAFA; + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + try { + cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); + cluster.waitClusterUp(); + cluster.waitActive(); + final Path path = new Path("/testMaxIterationTime.dat"); + DistributedFileSystem fs = cluster.getFileSystem(); + // fill the DN to 40% + DFSTestUtil.createFile(fs, path, 4L * blockSize, rep, seed); + // start a new DN + cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); + cluster.triggerHeartbeats(); + // setup Balancer and run one iteration + List connectors = Collections.emptyList(); + try { + BalancerParameters bParams = BalancerParameters.DEFAULT; + connectors = NameNodeConnector.newNameNodeConnectors( + DFSUtil.getInternalNsRpcUris(conf), Balancer.class.getSimpleName(), + Balancer.BALANCER_ID_PATH, conf, bParams.getMaxIdleIteration()); + for (NameNodeConnector nnc : connectors) { + LOG.info("NNC to work on: " + nnc); + Balancer b = new Balancer(nnc, bParams, conf); + long startTime = Time.monotonicNow(); + Result r = b.runOneIteration(); + long runtime = Time.monotonicNow() - startTime; + assertEquals("We expect ExitStatus.IN_PROGRESS to be reported.", + ExitStatus.IN_PROGRESS, r.exitStatus); + // accept runtime if it is under 3.5 seconds, as we need to wait for + // IN_PROGRESS report from DN, and some spare to be able to finish. + // NOTE: This can be a source of flaky tests, if the box is busy, + // assertion here is based on the following: Balancer is already set + // up, iteration gets the blocks from the NN, and makes the decision + // to move 2 blocks. After that the PendingMoves are scheduled, and + // DataNode heartbeats in for the Balancer every second, iteration is + // two seconds long. This means that it will fail if the setup and the + // heartbeat from the DataNode takes more than 500ms, as the iteration + // should end at the 3rd second from start. As the number of + // operations seems to be pretty low, and all comm happens locally, I + // think the possibility of a failure due to node busyness is low. + assertTrue("Unexpected iteration runtime: " + runtime + "ms > 3.5s", + runtime < 3500); + } + } finally { + for (NameNodeConnector nnc : connectors) { + IOUtils.cleanupWithLogger(null, nnc); + } + } + } finally { + cluster.shutdown(true, true); + } + } + /* * Test Balancer with Ram_Disk configured * One DN has two files on RAM_DISK, other DN has no files on RAM_DISK. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 987ba97d64..eb9461f746 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -38,7 +38,7 @@ import javax.management.StandardMBean; import com.google.common.math.LongMath; -import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index dee2a905c8..f2e998e20e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -43,7 +43,7 @@ import java.util.List; import java.util.Scanner; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 36e7bb9840..62c91bf9e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -685,6 +685,52 @@ public void testMoverFailedRetry() throws Exception { } } + @Test(timeout=100000) + public void testBalancerMaxIterationTimeNotAffectMover() throws Exception { + long blockSize = 10*1024*1024; + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + conf.setInt(DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY, 1); + conf.setInt( + DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 1); + // set a fairly large block size to run into the limitation + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); + // set a somewhat grater than zero max iteration time to have the move time + // to surely exceed it + conf.setLong(DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, 200L); + conf.setInt(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, 1); + // set client socket timeout to have an IN_PROGRESS notification back from + // the DataNode about the copy in every second. + conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 1000L); + + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(2) + .storageTypes( + new StorageType[][] {{StorageType.DISK, StorageType.DISK}, + {StorageType.ARCHIVE, StorageType.ARCHIVE}}) + .build(); + try { + cluster.waitActive(); + final DistributedFileSystem fs = cluster.getFileSystem(); + final String file = "/testMaxIterationTime.dat"; + final Path path = new Path(file); + short rep_factor = 1; + int seed = 0xFAFAFA; + // write to DISK + DFSTestUtil.createFile(fs, path, 4L * blockSize, rep_factor, seed); + + // move to ARCHIVE + fs.setStoragePolicy(new Path(file), "COLD"); + int rc = ToolRunner.run(conf, new Mover.Cli(), + new String[] {"-p", file}); + Assert.assertEquals("Retcode expected to be ExitStatus.SUCCESS (0).", + ExitStatus.SUCCESS.getExitCode(), rc); + } finally { + cluster.shutdown(); + } + } + private final ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 7be645f36e..f990c5eb6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -42,7 +42,7 @@ import java.util.Properties; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index 3b601d5b4c..b85527a948 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -24,7 +24,7 @@ import java.io.IOException; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.commons.lang.reflect.FieldUtils; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 5b4f1f491b..76cc9063da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index c58e090333..551670e1d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -41,7 +41,7 @@ import java.util.LinkedList; import java.util.List; -import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.time.DateUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java index 28169bbf16..1e8ee9c555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java @@ -29,7 +29,7 @@ import java.util.ArrayList; import java.util.Collection; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 46010e078d..10f571c4d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -84,6 +84,8 @@ public TestEditLogRace(boolean useAsyncEditLog) { TestEditLogRace.useAsyncEditLog = useAsyncEditLog; } + private static final String NAME_DIR = MiniDFSCluster.getBaseDirectory() + "name-0-1"; + private static final Log LOG = LogFactory.getLog(TestEditLogRace.class); // This test creates NUM_THREADS threads and each thread continuously writes @@ -363,8 +365,8 @@ private Configuration getConf() { useAsyncEditLog); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - //conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR); - //conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR); + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); return conf; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index 2daf5c2cf6..49506fe54d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -333,7 +333,7 @@ public void testDetailedHoldMetrics() throws Exception { FSNamesystemLock fsLock = new FSNamesystemLock(conf, rates, timer); fsLock.readLock(); - timer.advanceNanos(1200000); + timer.advanceNanos(1300000); fsLock.readUnlock("foo"); fsLock.readLock(); timer.advanceNanos(2400000); @@ -353,12 +353,18 @@ public void testDetailedHoldMetrics() throws Exception { MetricsRecordBuilder rb = MetricsAsserts.mockMetricsRecordBuilder(); rates.snapshot(rb, true); - assertGauge("FSNReadLockFooNanosAvgTime", 1800000.0, rb); + assertGauge("FSNReadLockFooNanosAvgTime", 1850000.0, rb); assertCounter("FSNReadLockFooNanosNumOps", 2L, rb); assertGauge("FSNReadLockBarNanosAvgTime", 2000000.0, rb); assertCounter("FSNReadLockBarNanosNumOps", 1L, rb); assertGauge("FSNWriteLockBazNanosAvgTime", 1000000.0, rb); assertCounter("FSNWriteLockBazNanosNumOps", 1L, rb); + + // Overall + assertGauge("FSNReadLockOverallNanosAvgTime", 1900000.0, rb); + assertCounter("FSNReadLockOverallNanosNumOps", 3L, rb); + assertGauge("FSNWriteLockOverallNanosAvgTime", 1000000.0, rb); + assertCounter("FSNWriteLockOverallNanosNumOps", 1L, rb); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index d5f548736f..24016087da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -728,8 +728,8 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { assertTrue(nnDirs.iterator().hasNext()); assertEquals( "NN dir should be created after NN startup.", - nnDirStr, - nnDirs.iterator().next().getPath()); + new File(nnDirStr), + new File(nnDirs.iterator().next().getPath())); final File nnDir = new File(nnDirStr); assertTrue(nnDir.exists()); assertTrue(nnDir.isDirectory()); @@ -738,7 +738,7 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { /* set read only */ assertTrue( "Setting NN dir read only should succeed.", - nnDir.setReadOnly()); + FileUtil.setWritable(nnDir, false)); cluster.restartNameNodes(); fail("Restarting NN should fail on read only NN dir."); } catch (InconsistentFSStateException e) { @@ -750,7 +750,8 @@ public void testNNFailToStartOnReadOnlyNNDir() throws Exception { "storage directory does not exist or is not accessible.")))); } finally { /* set back to writable in order to clean it */ - assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true)); + assertTrue("Setting NN dir should succeed.", + FileUtil.setWritable(nnDir, true)); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index e34deead95..05cf2ea622 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; @@ -271,7 +272,8 @@ public void testVolumeFailures() throws Exception { File dataDir = new File(fsVolume.getBaseURI()); long capacity = fsVolume.getCapacity(); volumeReferences.close(); - DataNodeTestUtils.injectDataDirFailure(dataDir); + File storageDir = new File(dataDir, Storage.STORAGE_DIR_CURRENT); + DataNodeTestUtils.injectDataDirFailure(storageDir); DataNodeTestUtils.waitForDiskError(dn, fsVolume); DataNodeTestUtils.triggerHeartbeat(dn); BlockManagerTestUtil.checkHeartbeat(bm); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index 5da6a25055..4e2cedef56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -36,7 +36,7 @@ import net.jcip.annotations.NotThreadSafe; import org.apache.commons.collections.map.LinkedMap; -import org.apache.commons.lang.mutable.MutableBoolean; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 647327cc33..12452473d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -27,7 +27,7 @@ import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.text.StrBuilder; +import org.apache.commons.lang3.text.StrBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java index aa4d481915..b85a8d8b18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby; @@ -97,6 +98,17 @@ private void setUpHaCluster(boolean security) throws Exception { System.setOut(new PrintStream(out)); System.setErr(new PrintStream(err)); + + // Reduce the number of retries to speed up the tests. + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3); + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, + 500); + conf.setInt(HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY, 2); + conf.setInt(HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY, 2); + conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY, 0); + conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, 0); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java index bdad46a4e6..04c85a12a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java @@ -21,7 +21,7 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 609923f6a1..b1b7b8f72d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -853,7 +853,7 @@ public void testDoubleTooManyFetchFailure() throws Exception { @Test - public void testAppDiognosticEventOnUnassignedTask() throws Exception { + public void testAppDiagnosticEventOnUnassignedTask() { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); @@ -978,7 +978,7 @@ public void testTooManyFetchFailureAfterKill() throws Exception { } @Test - public void testAppDiognosticEventOnNewTask() throws Exception { + public void testAppDiagnosticEventOnNewTask() { ApplicationId appId = ApplicationId.newInstance(1, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( appId, 0); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 246986f3be..ebf9341048 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -34,8 +34,6 @@ import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.databind.JsonMappingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -61,6 +59,7 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.api.records.ReservationId; @@ -71,8 +70,6 @@ class JobSubmitter { protected static final Logger LOG = LoggerFactory.getLogger(JobSubmitter.class); - private static final ObjectReader READER = - new ObjectMapper().readerFor(Map.class); private static final String SHUFFLE_KEYGEN_ALGORITHM = "HmacSHA1"; private static final int SHUFFLE_KEY_LENGTH = 64; private FileSystem jtFs; @@ -406,7 +403,8 @@ private void readTokensFromFiles(Configuration conf, Credentials credentials) try { // read JSON - Map nm = READER.readValue(new File(localFileName)); + Map nm = JsonSerialization.mapReader().readValue( + new File(localFileName)); for(Map.Entry ent: nm.entrySet()) { credentials.addSecretKey(new Text(ent.getKey()), ent.getValue() diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java index c7a737cf87..b16e127292 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java @@ -425,8 +425,8 @@ void createSplits(Map> nodeToBlocks, if (completedNodes.size() == totalNodes || totalLength == 0) { // All nodes have been walked over and marked as completed or all blocks // have been assigned. The rest should be handled via rackLock assignment. - LOG.info("DEBUG: Terminated node allocation with : CompletedNodes: " - + completedNodes.size() + ", size left: " + totalLength); + LOG.debug("Terminated node allocation with : CompletedNodes: {}, size left: {}", + completedNodes.size(), totalLength); break; } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java index 5316f383b0..d2116c041d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java @@ -409,7 +409,7 @@ boolean checkSymlink(File jar) { linkPath == null ? null : linkPath.getParent(); java.nio.file.Path normalizedLinkPath = linkPathParent == null ? null : linkPathParent.normalize(); - if (normalizedLinkPath != null && jarParent.equals( + if (normalizedLinkPath != null && jarParent.normalize().equals( normalizedLinkPath)) { LOG.info(String.format("Ignoring same directory link %s to %s", jarPath.toString(), link.toString())); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java index c12902c399..9c72f72713 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java @@ -440,6 +440,19 @@ public void testNativeIO() throws IOException { } Assert.assertTrue(uploader.checkSymlink(symlinkToTarget)); + // Create a symlink to the target with /./ in the path + symlinkToTarget = new File(parent.getAbsolutePath() + + "/./symlinkToTarget2.txt"); + try { + Files.createSymbolicLink( + Paths.get(symlinkToTarget.getAbsolutePath()), + Paths.get(targetFile.getAbsolutePath())); + } catch (UnsupportedOperationException e) { + // Symlinks are not supported, so ignore the test + Assume.assumeTrue(false); + } + Assert.assertTrue(uploader.checkSymlink(symlinkToTarget)); + // Create a symlink outside the current directory File symlinkOutside = new File(parent, "symlinkToParent.txt"); try { diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml index b31d158c13..d6b18b4e35 100644 --- a/hadoop-maven-plugins/pom.xml +++ b/hadoop-maven-plugins/pom.xml @@ -26,7 +26,7 @@ maven-plugin Apache Hadoop Maven Plugins - 3.0 + 3.0.5 3.5.1 @@ -45,6 +45,14 @@ maven-plugin-annotations ${maven.plugin-tools.version} provided + + + + org.apache.maven + maven-artifact + + commons-io @@ -60,16 +68,28 @@ ${maven-shade-plugin.version} provided - + - org.apache.maven.shared - maven-dependency-tree + org.apache.maven + maven-artifact + + + org.apache.maven + maven-compat + + + org.apache.maven + maven-core + + + org.apache.maven + maven-model + + + org.apache.maven + maven-plugin-api - org.vafer jdependency diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java index e7ab663e42..5bf84c21fe 100644 --- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.maven.plugin.resourcegz; -import com.google.inject.internal.util.Lists; +import com.google.common.collect.Lists; import org.apache.commons.io.IOUtils; import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh index 0e212a2834..ee9c6b80a5 100755 --- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh +++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh @@ -15,4 +15,4 @@ # limitations under the License. DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -$DIR/robot.sh $DIR/../../src/test/robotframework/acceptance \ No newline at end of file +$DIR/robot.sh $DIR/../../src/test/acceptance diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh new file mode 100755 index 0000000000..9f1d367141 --- /dev/null +++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +#Dir od the definition of the dind based test exeucution container +DOCKERDIR="$DIR/../docker" + +#Dir to save the results +TARGETDIR="$DIR/../../target/dnd" + +#Dir to mount the distribution from +OZONEDIST="$DIR/../../../../hadoop-dist/target/ozone" + +#Name and imagename of the temporary, dind based test containers +DOCKER_IMAGE_NAME=ozoneacceptance +DOCKER_INSTANCE_NAME="${DOCKER_INSTANCE_NAME:-ozoneacceptance}" + +teardown() { + docker stop "$DOCKER_INSTANCE_NAME" +} + +trap teardown EXIT + +#Make sure it will work even if the ozone is built by an other user. We +# eneable to run the distribution by an other user +mkdir -p "$TARGETDIR" +mkdir -p "$OZONEDIST/logs" +chmod o+w "$OZONEDIST/logs" || true +chmod -R o+w "$OZONEDIST/etc/hadoop" || true +chmod o+w "$OZONEDIST" || true + +rm "$TARGETDIR/docker-compose.log" +docker rm "$DOCKER_INSTANCE_NAME" || true +docker build -t "$DOCKER_IMAGE_NAME" $DIR/../docker + +#Starting the dind based environment +docker run --rm -v $DIR/../../../..:/opt/hadoop --privileged -d --name "$DOCKER_INSTANCE_NAME" $DOCKER_IMAGE_NAME +sleep 5 + +#Starting the tests +docker exec "$DOCKER_INSTANCE_NAME" /opt/hadoop/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh +RESULT=$? + +docker cp "$DOCKER_INSTANCE_NAME:/root/log.html" "$TARGETDIR/" +docker cp "$DOCKER_INSTANCE_NAME:/root/junit-results.xml" "$TARGETDIR/" +docker cp "$DOCKER_INSTANCE_NAME:/root/docker-compose.log" "$TARGETDIR/" +exit $RESULT diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh index b651f76d2f..ef2a111066 100755 --- a/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh +++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh @@ -14,10 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -x + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -#basedir is the directory of the whole hadoop project. Used to calculate the -#exact path to the hadoop-dist project -BASEDIR=${DIR}/../../../.. if [ ! "$(which robot)" ] ; then echo "" @@ -29,10 +28,10 @@ if [ ! "$(which robot)" ] ; then exit -1 fi -OZONEDISTDIR="$BASEDIR/hadoop-dist/target/ozone" +OZONEDISTDIR="$DIR/../../../../hadoop-dist/target/ozone" if [ ! -d "$OZONEDISTDIR" ]; then echo "Ozone can't be found in the $OZONEDISTDIR." echo "You may need a full build with -Phdds and -Pdist profiles" exit -1 fi -robot -v basedir:$BASEDIR $@ +robot -x junit-results.xml "$@" diff --git a/hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile b/hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile new file mode 100644 index 0000000000..06feda6221 --- /dev/null +++ b/hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM docker:18-dind +RUN apk add --update python3 bash curl jq sudo +RUN pip3 install robotframework docker-compose +WORKDIR /root +USER root diff --git a/hadoop-ozone/acceptance-test/dev-support/docker/docker-compose.yaml b/hadoop-ozone/acceptance-test/dev-support/docker/docker-compose.yaml new file mode 100644 index 0000000000..6f16b0ac3b --- /dev/null +++ b/hadoop-ozone/acceptance-test/dev-support/docker/docker-compose.yaml @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + robotenv: + build: . + privileged: true + volumes: + - ../../../..:/opt/hadoop diff --git a/hadoop-ozone/acceptance-test/pom.xml b/hadoop-ozone/acceptance-test/pom.xml index ef45c443e6..fee41f1d49 100644 --- a/hadoop-ozone/acceptance-test/pom.xml +++ b/hadoop-ozone/acceptance-test/pom.xml @@ -43,6 +43,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> run + src/test/acceptance basedir:${project.basedir}/../.. diff --git a/hadoop-ozone/acceptance-test/src/test/compose/.env b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/.env similarity index 93% rename from hadoop-ozone/acceptance-test/src/test/compose/.env rename to hadoop-ozone/acceptance-test/src/test/acceptance/basic/.env index cf22168909..98234cb112 100644 --- a/hadoop-ozone/acceptance-test/src/test/compose/.env +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/.env @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -OZONEDIR=../../../hadoop-dist/target/ozone +OZONEDIR=../../../../../../hadoop-dist/target/ozone diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot new file mode 100644 index 0000000000..c741588c19 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Smoketest ozone cluster startup +Library OperatingSystem +Suite Setup Startup Ozone cluster with size 5 +Suite Teardown Teardown Ozone cluster +Resource ../commonlib.robot + +*** Variables *** +${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" +${COMPOSEFILE} ${CURDIR}/docker-compose.yaml +${PROJECTDIR} ${CURDIR}/../../../../../.. + + +*** Test Cases *** + +Test rest interface + ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1" + Should contain ${result} 201 Created + ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" + Should contain ${result} 201 Created + ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" + Should contain ${result} 200 OK + ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1" + Should contain ${result} 200 OK + +Check webui static resources + ${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js + Should contain ${result} 200 + ${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js + Should contain ${result} 200 + +Start freon testing + ${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10 + Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 + Should Not Contain ${result} ERROR diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml new file mode 100644 index 0000000000..b50f42d3e9 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + datanode: + image: apache/hadoop-runner + volumes: + - ${OZONEDIR}:/opt/hadoop + ports: + - 9864 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + ksm: + image: apache/hadoop-runner + hostname: ksm + volumes: + - ${OZONEDIR}:/opt/hadoop + ports: + - 9874 + environment: + ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION + env_file: + - ./docker-config + command: ["/opt/hadoop/bin/ozone","ksm"] + scm: + image: apache/hadoop-runner + volumes: + - ${OZONEDIR}:/opt/hadoop + ports: + - 9876 + env_file: + - ./docker-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + command: ["/opt/hadoop/bin/ozone","scm"] diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config similarity index 83% rename from hadoop-ozone/acceptance-test/src/test/compose/docker-config rename to hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config index 0591a7aac2..c3ec2ef71b 100644 --- a/hadoop-ozone/acceptance-test/src/test/compose/docker-config +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874 OZONE-SITE.XML_ozone.scm.names=scm @@ -24,13 +23,11 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode +OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.category.org.apache.hadoop.util.NativeCodeLoader=ERROR diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot new file mode 100644 index 0000000000..9521ad60be --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot @@ -0,0 +1,85 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone shell CLI usage +Library OperatingSystem +Suite Setup Startup Ozone cluster with size 5 +Suite Teardown Teardown Ozone cluster +Resource ../commonlib.robot +Test Timeout 2 minute + +*** Variables *** +${basedir} +${COMPOSEFILE} ${CURDIR}/docker-compose.yaml +${PROJECTDIR} ${CURDIR}/../../../../../.. + +*** Test Cases *** +RestClient without http port + Test ozone shell http:// ksm restwoport True + +RestClient with http port + Test ozone shell http:// ksm:9874 restwport True + +RestClient without host name + Test ozone shell http:// ${EMPTY} restwohost True + +RpcClient with port + Test ozone shell o3:// ksm:9862 rpcwoport False + +RpcClient without host + Test ozone shell o3:// ${EMPTY} rpcwport False + +RpcClient without scheme + Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme False + + +*** Keywords *** +Test ozone shell + [arguments] ${protocol} ${server} ${volume} ${withkeytest} + ${result} = Execute on datanode ozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root + Should not contain ${result} Failed + Should contain ${result} Creating Volume: ${volume} + ${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' + Should contain ${result} createdOn + Execute on datanode ozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB + ${result} = Execute on datanode ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' + Should Be Equal ${result} bill + ${result} = Execute on datanode ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size' + Should Be Equal ${result} 10 + Execute on datanode ozone oz -createBucket ${protocol}${server}/${volume}/bb1 + ${result} = Execute on datanode ozone oz -infoBucket ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' + Should Be Equal ${result} DISK + ${result} = Execute on datanode ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' + Should Be Equal ${result} GROUP + ${result} = Execute on datanode ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' + Should Be Equal ${result} USER + ${result} = Execute on datanode ozone oz -listBucket o3://ksm/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' + Should Be Equal ${result} ${volume} + Run Keyword and Return If ${withkeytest} Test key handling ${protocol} ${server} ${volume} + Execute on datanode ozone oz -deleteBucket ${protocol}${server}/${volume}/bb1 + Execute on datanode ozone oz -deleteVolume ${protocol}${server}/${volume} -user bilbo + +Test key handling + [arguments] ${protocol} ${server} ${volume} + Execute on datanode ozone oz -putKey ${protocol}${server}/${volume}/bb1/key1 -file NOTICE.txt + Execute on datanode rm -f NOTICE.txt.1 + Execute on datanode ozone oz -getKey ${protocol}${server}/${volume}/bb1/key1 -file NOTICE.txt.1 + Execute on datanode ls -l NOTICE.txt.1 + ${result} = Execute on datanode ozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' + Should contain ${result} createdOn + ${result} = Execute on datanode ozone oz -listKey o3://ksm/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' + Should Be Equal ${result} key1 + Execute on datanode ozone oz -deleteKey ${protocol}${server}/${volume}/bb1/key1 -v diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot similarity index 53% rename from hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot rename to hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot index 7bb60b6a8c..a5ea30af34 100644 --- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot @@ -13,21 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -*** Settings *** -Documentation Smoke test to start cluster with docker-compose environments. -Library OperatingSystem -Suite Setup Startup Ozone Cluster -Suite Teardown Teardown Ozone Cluster +*** Keywords *** -*** Variables *** -${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" -${basedir} -*** Test Cases *** +Startup Ozone cluster with size + [arguments] ${datanodeno} + ${rc} ${output} = Run docker compose down + Run echo "Starting new docker-compose environment" >> docker-compose.log + ${rc} ${output} = Run docker compose up -d + Should Be Equal As Integers ${rc} 0 + Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening + Daemons are running without error + Scale datanodes up 5 Daemons are running without error Is daemon running without error ksm Is daemon running without error scm - Is daemon running without error namenode Is daemon running without error datanode Check if datanode is connected to the scm @@ -37,38 +37,15 @@ Scale it up to 5 datanodes Scale datanodes up 5 Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 5 -Test rest interface - ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1" - Should contain ${result} 201 Created - ${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" - Should contain ${result} 201 Created - ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1" - Should contain ${result} 200 OK - ${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1" - Should contain ${result} 200 OK +Scale datanodes up + [arguments] ${datanodeno} + Run docker compose scale datanode=${datanodeno} + Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes ${datanodeno} -Check webui static resources - ${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js - Should contain ${result} 200 - ${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js - Should contain ${result} 200 - -Start freon testing - ${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10 - Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 - Should Not Contain ${result} ERROR - -*** Keywords *** - -Startup Ozone Cluster - ${rc} ${output} = Run docker compose down - ${rc} ${output} = Run docker compose up -d - Should Be Equal As Integers ${rc} 0 - Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening - -Teardown Ozone Cluster +Teardown Ozone cluster Run docker compose down - + Run docker compose logs >> docker-compose.log + Is daemon running without error [arguments] ${name} ${result} = Run docker ps @@ -86,19 +63,16 @@ Have healthy datanodes ${result} = Execute on scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value' Should Be Equal ${result} ${requirednodes} -Scale datanodes up - [arguments] ${requirednodes} - Run docker compose scale datanode=${requirednodes} - Execute on [arguments] ${componentname} ${command} - ${rc} ${return} = Run docker compose exec ${componentname} ${command} + ${rc} ${return} = Run docker compose exec -T ${componentname} ${command} [return] ${return} Run docker compose [arguments] ${command} - Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone - ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command} + Set Environment Variable COMPOSE_INTERACTIVE_NO_CLI 1 + Set Environment Variable OZONEDIR ${PROJECTDIR}/hadoop-dist/target/ozone + ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${COMPOSEFILE} ${command} Log ${output} Should Be Equal As Integers ${rc} 0 [return] ${rc} ${output} diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/.env b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/.env new file mode 100644 index 0000000000..98234cb112 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/.env @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OZONEDIR=../../../../../../hadoop-dist/target/ozone diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml similarity index 86% rename from hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml rename to hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml index 44bd4a0aae..12022dfe61 100644 --- a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml @@ -16,18 +16,6 @@ version: "3" services: - namenode: - image: apache/hadoop-runner - hostname: namenode - volumes: - - ${OZONEDIR}:/opt/hadoop - ports: - - 9870 - environment: - ENSURE_NAMENODE_DIR: /data/namenode - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/hdfs","namenode"] datanode: image: apache/hadoop-runner volumes: @@ -60,3 +48,12 @@ services: environment: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["/opt/hadoop/bin/ozone","scm"] + hadooplast: + image: flokkr/hadoop:3.1.0 + volumes: + - ${OZONEDIR}:/opt/ozone + env_file: + - ./docker-config + environment: + HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozonefs/hadoop-ozone-filesystem.jar + command: ["watch","-n","100000","ls"] diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config new file mode 100644 index 0000000000..e06d434bb4 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem +OZONE-SITE.XML_ozone.ksm.address=ksm +OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.enabled=True +OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.handler.type=distributed +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s +HDFS-SITE.XML_rpc.metrics.quantile.enable=true +HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 +LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout +LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender +LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +LOG4J.PROPERTIES_log4j.category.org.apache.hadoop.util.NativeCodeLoader=ERROR diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot new file mode 100644 index 0000000000..9e8a5d2004 --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Ozonefs test +Library OperatingSystem +Suite Setup Startup Ozone cluster with size 5 +Suite Teardown Teardown Ozone cluster +Resource ../commonlib.robot + +*** Variables *** +${COMPOSEFILE} ${CURDIR}/docker-compose.yaml +${PROJECTDIR} ${CURDIR}/../../../../../.. + + +*** Test Cases *** +Create volume and bucket + Execute on datanode ozone oz -createVolume http://ksm/fstest -user bilbo -quota 100TB -root + Execute on datanode ozone oz -createBucket http://ksm/fstest/bucket1 + +Check volume from ozonefs + ${result} = Execute on hadooplast hdfs dfs -ls o3://bucket1.fstest/ + +Create directory from ozonefs + Execute on hadooplast hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep + ${result} = Execute on ksm ozone oz -listKey o3://ksm/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + Should contain ${result} testdir/deep diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot deleted file mode 100644 index 1a91a9388e..0000000000 --- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot +++ /dev/null @@ -1,256 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoke test to start cluster with docker-compose environments. -Library OperatingSystem -Suite Setup Startup Ozone Cluster -Suite Teardown Teardown Ozone Cluster - -*** Variables *** -${basedir} -*** Test Cases *** - -Daemons are running without error - Is daemon running without error ksm - Is daemon running without error scm - Is daemon running without error namenode - Is daemon running without error datanode - -Check if datanode is connected to the scm - Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 1 - -Scale it up to 5 datanodes - Scale datanodes up 5 - Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 5 - -Test ozone shell (RestClient without http port) - Execute on datanode ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume http://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume http://ksm/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket http://ksm/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket http://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -putKey http://ksm/hive/bb1/key1 -file NOTICE.txt - Execute on datanode rm -f NOTICE.txt.1 - Execute on datanode ozone oz -getKey http://ksm/hive/bb1/key1 -file NOTICE.txt.1 - Execute on datanode ls -l NOTICE.txt.1 - ${result} = Execute on datanode ozone oz -infoKey http://ksm/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute on datanode ozone oz -listKey http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' - Should Be Equal ${result} key1 - Execute on datanode ozone oz -deleteKey http://ksm/hive/bb1/key1 -v - Execute on datanode ozone oz -deleteBucket http://ksm/hive/bb1 - Execute on datanode ozone oz -deleteVolume http://ksm/hive -user bilbo - -Test ozone shell (RestClient with http port) - Execute on datanode ozone oz -createVolume http://ksm:9874/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume http://ksm:9874 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume http://ksm:9874/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket http://ksm:9874/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket http://ksm:9874/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket http://ksm:9874/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket http://ksm:9874/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket http://ksm:9874/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -putKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt - Execute on datanode rm -f NOTICE.txt.1 - Execute on datanode ozone oz -getKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt.1 - Execute on datanode ls -l NOTICE.txt.1 - ${result} = Execute on datanode ozone oz -infoKey http://ksm:9874/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute on datanode ozone oz -listKey http://ksm:9874/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' - Should Be Equal ${result} key1 - Execute on datanode ozone oz -deleteKey http://ksm:9874/hive/bb1/key1 -v - Execute on datanode ozone oz -deleteBucket http://ksm:9874/hive/bb1 - Execute on datanode ozone oz -deleteVolume http://ksm:9874/hive -user bilbo - -Test ozone shell (RestClient without hostname) - Execute on datanode ozone oz -createVolume http:///hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume http:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume http:///hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket http:///hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket http:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket http:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket http:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket http:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -putKey http:///hive/bb1/key1 -file NOTICE.txt - Execute on datanode rm -f NOTICE.txt.1 - Execute on datanode ozone oz -getKey http:///hive/bb1/key1 -file NOTICE.txt.1 - Execute on datanode ls -l NOTICE.txt.1 - ${result} = Execute on datanode ozone oz -infoKey http:///hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' - Should contain ${result} createdOn - ${result} = Execute on datanode ozone oz -listKey http:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' - Should Be Equal ${result} key1 - Execute on datanode ozone oz -deleteKey http:///hive/bb1/key1 -v - Execute on datanode ozone oz -deleteBucket http:///hive/bb1 - Execute on datanode ozone oz -deleteVolume http:///hive -user bilbo - -Test ozone shell (RpcClient without http port) - Execute on datanode ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket o3://ksm/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket o3://ksm/hive/bb1 - Execute on datanode ozone oz -deleteVolume o3://ksm/hive -user bilbo - -Test ozone shell (RpcClient with http port) - Execute on datanode ozone oz -createVolume o3://ksm:9862/hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume o3://ksm:9862 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume o3://ksm:9862/hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket o3://ksm:9862/hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket o3://ksm:9862/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm:9862/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket o3://ksm:9862/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket o3://ksm:9862/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket o3://ksm:9862/hive/bb1 - Execute on datanode ozone oz -deleteVolume o3://ksm:9862/hive -user bilbo - -Test ozone shell (RpcClient without hostname) - Execute on datanode ozone oz -createVolume o3:///hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume o3:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume o3:///hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket o3:///hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket o3:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket o3:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket o3:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket o3:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket o3:///hive/bb1 - Execute on datanode ozone oz -deleteVolume o3:///hive -user bilbo - -Test ozone shell (no scheme - RpcClient used by default) - Execute on datanode ozone oz -createVolume /hive -user bilbo -quota 100TB -root - ${result} = Execute on datanode ozone oz -listVolume / -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")' - Should contain ${result} createdOn - Execute on datanode ozone oz -updateVolume /hive -user bill -quota 10TB - ${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name' - Should Be Equal ${result} bill - ${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size' - Should Be Equal ${result} 10 - Execute on datanode ozone oz -createBucket /hive/bb1 - ${result} = Execute on datanode ozone oz -infoBucket /hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type' - Should Be Equal ${result} GROUP - ${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' - Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket /hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' - Should Be Equal ${result} hive - Execute on datanode ozone oz -deleteBucket /hive/bb1 - Execute on datanode ozone oz -deleteVolume /hive -user bilbo - -*** Keywords *** - -Startup Ozone Cluster - ${rc} ${output} = Run docker compose down - ${rc} ${output} = Run docker compose up -d - Should Be Equal As Integers ${rc} 0 - Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening - -Teardown Ozone Cluster - Run docker compose down - -Is daemon running without error - [arguments] ${name} - ${result} = Run docker ps - Should contain ${result} _${name}_1 - ${rc} ${result} = Run docker compose logs ${name} - Should not contain ${result} ERROR - -Is Daemon started - [arguments] ${name} ${expression} - ${rc} ${result} = Run docker compose logs - Should contain ${result} ${expression} - -Have healthy datanodes - [arguments] ${requirednodes} - ${result} = Execute on scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value' - Should Be Equal ${result} ${requirednodes} - -Scale datanodes up - [arguments] ${requirednodes} - Run docker compose scale datanode=${requirednodes} - -Execute on - [arguments] ${componentname} ${command} - ${rc} ${return} = Run docker compose exec ${componentname} ${command} - [return] ${return} - -Run docker compose - [arguments] ${command} - Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone - ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${rc} ${output} diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index 6843bddca5..390f0895b7 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -34,7 +34,7 @@ function hadoop_usage hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" - hadoop_add_subcommand "datanode" daemon "run a DFS datanode" + hadoop_add_subcommand "datanode" daemon "run a HDDS datanode" hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" hadoop_add_subcommand "freon" client "runs an ozone data generator" hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning." @@ -45,7 +45,7 @@ function hadoop_usage hadoop_add_subcommand "o3" client "command line interface for ozone" hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data" hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" - hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager " + hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager" hadoop_add_subcommand "version" client "print the version" hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path" @@ -68,10 +68,7 @@ function ozonecmd_case ;; datanode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter" - HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode' - hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR - hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR + HADOOP_CLASSNAME=org.apache.hadoop.ozone.HddsDatanodeService ;; envvars) echo "JAVA_HOME='${JAVA_HOME}'" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 0254984d23..50cdd54858 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.XceiverClient; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.TestGenericTestUtils; @@ -92,13 +91,12 @@ public void testStartMultipleDatanodes() throws Exception { for(HddsDatanodeService dn : datanodes) { // Create a single member pipe line DatanodeDetails datanodeDetails = dn.getDatanodeDetails(); - final PipelineChannel pipelineChannel = - new PipelineChannel(datanodeDetails.getUuidString(), + final Pipeline pipeline = + new Pipeline(datanodeDetails.getUuidString(), HddsProtos.LifeCycleState.OPEN, HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE, "test"); - pipelineChannel.addMember(datanodeDetails); - Pipeline pipeline = new Pipeline(pipelineChannel); + pipeline.addMember(datanodeDetails); // Verify client is able to connect to the container try (XceiverClient client = new XceiverClient(pipeline, conf)){ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 9decdb9fc0..b720549ea5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.KeyData; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; @@ -136,14 +135,14 @@ public static Pipeline createPipeline( Preconditions.checkArgument(i.hasNext()); final DatanodeDetails leader = i.next(); String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(3); - final PipelineChannel pipelineChannel = - new PipelineChannel(leader.getUuidString(), LifeCycleState.OPEN, + final Pipeline pipeline = + new Pipeline(leader.getUuidString(), LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName); - pipelineChannel.addMember(leader); + pipeline.addMember(leader); for(; i.hasNext();) { - pipelineChannel.addMember(i.next()); + pipeline.addMember(i.next()); } - return new Pipeline(pipelineChannel); + return pipeline; } /** @@ -207,8 +206,6 @@ public static ContainerCommandRequestProto getWriteChunkRequest( ContainerProtos.WriteChunkRequestProto .newBuilder(); - Pipeline newPipeline = - new Pipeline(pipeline.getPipelineChannel()); writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf()); byte[] data = getData(datalen); @@ -223,7 +220,7 @@ public static ContainerCommandRequestProto getWriteChunkRequest( request.setCmdType(ContainerProtos.Type.WriteChunk); request.setWriteChunk(writeRequest); request.setTraceID(UUID.randomUUID().toString()); - request.setDatanodeUuid(newPipeline.getLeader().getUuidString()); + request.setDatanodeUuid(pipeline.getLeader().getUuidString()); return request.build(); } @@ -241,8 +238,6 @@ public static ContainerCommandRequestProto getWriteSmallFileRequest( throws Exception { ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest = ContainerProtos.PutSmallFileRequestProto.newBuilder(); - Pipeline newPipeline = - new Pipeline(pipeline.getPipelineChannel()); byte[] data = getData(dataLen); ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen); setDataChecksum(info, data); @@ -266,7 +261,7 @@ public static ContainerCommandRequestProto getWriteSmallFileRequest( request.setCmdType(ContainerProtos.Type.PutSmallFile); request.setPutSmallFile(smallFileRequest); request.setTraceID(UUID.randomUUID().toString()); - request.setDatanodeUuid(newPipeline.getLeader().getUuidString()); + request.setDatanodeUuid(pipeline.getLeader().getUuidString()); return request.build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 0686e4e5d3..8d01c806a3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -280,10 +280,11 @@ public void testBlockDeletionTimeout() throws Exception { ContainerManager containerManager = createContainerManager(conf); createToDeleteBlocks(containerManager, conf, 1, 3, 1, chunksDir); - // set timeout value as 1ms to trigger timeout behavior + // set timeout value as 1ns to trigger timeout behavior long timeout = 1; - BlockDeletingService svc = - new BlockDeletingService(containerManager, 1000, timeout, conf); + BlockDeletingService svc = new BlockDeletingService(containerManager, + TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, + conf); svc.start(); LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG); @@ -303,7 +304,9 @@ public void testBlockDeletionTimeout() throws Exception { // test for normal case that doesn't have timeout limitation timeout = 0; createToDeleteBlocks(containerManager, conf, 1, 3, 1, chunksDir); - svc = new BlockDeletingService(containerManager, 1000, timeout, conf); + svc = new BlockDeletingService(containerManager, + TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, + conf); svc.start(); // get container meta data diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java new file mode 100644 index 0000000000..a5b101fa70 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.rest.OzoneException; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.apache.hadoop.test.GenericTestUtils; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_SIZE_GB; +import org.junit.Test; + +/** + * Tests the behavior of the datanode, when replicate container command is + * received. + */ +public class TestReplicateContainerHandler { + + @Test + public void test() throws IOException, TimeoutException, InterruptedException, + OzoneException { + + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(ReplicateContainerCommandHandler.LOG); + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1"); + MiniOzoneCluster cluster = + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); + cluster.waitForClusterToBeReady(); + + DatanodeDetails datanodeDetails = + cluster.getHddsDatanodes().get(0).getDatanodeDetails(); + //send the order to replicate the container + cluster.getStorageContainerManager().getScmNodeManager() + .addDatanodeCommand(datanodeDetails.getUuid(), + new ReplicateContainerCommand(1L, + new ArrayList<>())); + + //TODO: here we test only the serialization/unserialization as + // the implementation is not yet done + GenericTestUtils + .waitFor(() -> logCapturer.getOutput().contains("not yet handled"), 500, + 5 * 1000); + + } + +} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index cfd1159469..c2f5eb7f76 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.genconf; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.test.GenericTestUtils; import org.hamcrest.CoreMatchers; import org.junit.AfterClass; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index 4d70af84a2..b4ed2b12c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -51,12 +51,9 @@ import java.util.HashMap; import java.util.UUID; -import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.KB; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** * This class tests the CLI that transforms container into SQLite DB files. @@ -176,34 +173,6 @@ public void shutdown() throws InterruptedException { } } - @Test - public void testConvertNodepoolDB() throws Exception { - String dbOutPath = GenericTestUtils.getTempPath( - UUID.randomUUID() + "/out_sql.db"); - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); - String dbPath = dbRootPath + "/" + NODEPOOL_DB; - String[] args = {"-p", dbPath, "-o", dbOutPath}; - - cli.run(args); - - // verify the sqlite db - HashMap expectedPool = new HashMap<>(); - for (DatanodeDetails dnid : nodeManager.getAllNodes()) { - expectedPool.put(dnid.getUuidString(), "DefaultNodePool"); - } - Connection conn = connectDB(dbOutPath); - String sql = "SELECT * FROM nodePool"; - ResultSet rs = executeQuery(conn, sql); - while(rs.next()) { - String datanodeUUID = rs.getString("datanodeUUID"); - String poolName = rs.getString("poolName"); - assertTrue(expectedPool.remove(datanodeUUID).equals(poolName)); - } - assertEquals(0, expectedPool.size()); - - Files.delete(Paths.get(dbOutPath)); - } - @Test public void testConvertContainerDB() throws Exception { String dbOutPath = GenericTestUtils.getTempPath( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java deleted file mode 100644 index ecddf8eaca..0000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java +++ /dev/null @@ -1,253 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; - -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getLongGauge; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; - -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.report - .SCMDatanodeContainerReportHandler; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * This class tests the metrics of Storage Container Manager. - */ -public class TestSCMMetrics { - /** - * Set the timeout for each test. - */ - @Rule - public Timeout testTimeout = new Timeout(90000); - - private static MiniOzoneCluster cluster = null; - - @Test - public void testContainerMetrics() throws Exception { - int nodeCount = 2; - int numReport = 2; - long size = OzoneConsts.GB * 5; - long used = OzoneConsts.GB * 2; - long readBytes = OzoneConsts.GB * 1; - long writeBytes = OzoneConsts.GB * 2; - int keyCount = 1000; - int readCount = 100; - int writeCount = 50; - OzoneConfiguration conf = new OzoneConfiguration(); - - try { - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(nodeCount).build(); - cluster.waitForClusterToBeReady(); - - ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes, - writeBytes, readCount, writeCount); - StorageContainerManager scmManager = cluster.getStorageContainerManager(); - DatanodeDetails fstDatanodeDetails = TestUtils.getDatanodeDetails(); - ContainerReportsProto request = createContainerReport(numReport, stat); - String fstDatanodeUuid = fstDatanodeDetails.getUuidString(); - SCMDatanodeContainerReportHandler containerReportHandler = - new SCMDatanodeContainerReportHandler(); - containerReportHandler.setConf(conf); - containerReportHandler.init(scmManager); - containerReportHandler.processReport( - fstDatanodeDetails, request); - - // verify container stat metrics - MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(size * numReport, - getLongGauge("LastContainerReportSize", scmMetrics)); - assertEquals(used * numReport, - getLongGauge("LastContainerReportUsed", scmMetrics)); - assertEquals(readBytes * numReport, - getLongGauge("LastContainerReportReadBytes", scmMetrics)); - assertEquals(writeBytes * numReport, - getLongGauge("LastContainerReportWriteBytes", scmMetrics)); - - assertEquals(keyCount * numReport, - getLongGauge("LastContainerReportKeyCount", scmMetrics)); - assertEquals(readCount * numReport, - getLongGauge("LastContainerReportReadCount", scmMetrics)); - assertEquals(writeCount * numReport, - getLongGauge("LastContainerReportWriteCount", scmMetrics)); - - // add one new report - DatanodeDetails sndDatanodeDetails = TestUtils.getDatanodeDetails(); - request = createContainerReport(1, stat); - String sndDatanodeUuid = sndDatanodeDetails.getUuidString(); - containerReportHandler.processReport( - sndDatanodeDetails, request); - - scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(size * (numReport + 1), - getLongCounter("ContainerReportSize", scmMetrics)); - assertEquals(used * (numReport + 1), - getLongCounter("ContainerReportUsed", scmMetrics)); - assertEquals(readBytes * (numReport + 1), - getLongCounter("ContainerReportReadBytes", scmMetrics)); - assertEquals(writeBytes * (numReport + 1), - getLongCounter("ContainerReportWriteBytes", scmMetrics)); - - assertEquals(keyCount * (numReport + 1), - getLongCounter("ContainerReportKeyCount", scmMetrics)); - assertEquals(readCount * (numReport + 1), - getLongCounter("ContainerReportReadCount", scmMetrics)); - assertEquals(writeCount * (numReport + 1), - getLongCounter("ContainerReportWriteCount", scmMetrics)); - - // Re-send reports but with different value for validating - // the aggregation. - stat = new ContainerStat(100, 50, 3, 50, 60, 5, 6); - containerReportHandler.processReport( - fstDatanodeDetails, createContainerReport(1, stat)); - - stat = new ContainerStat(1, 1, 1, 1, 1, 1, 1); - containerReportHandler.processReport( - sndDatanodeDetails, createContainerReport(1, stat)); - - // the global container metrics value should be updated - scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(101, getLongCounter("ContainerReportSize", scmMetrics)); - assertEquals(51, getLongCounter("ContainerReportUsed", scmMetrics)); - assertEquals(51, getLongCounter("ContainerReportReadBytes", scmMetrics)); - assertEquals(61, getLongCounter("ContainerReportWriteBytes", scmMetrics)); - - assertEquals(4, getLongCounter("ContainerReportKeyCount", scmMetrics)); - assertEquals(6, getLongCounter("ContainerReportReadCount", scmMetrics)); - assertEquals(7, getLongCounter("ContainerReportWriteCount", scmMetrics)); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } - - @Test - public void testStaleNodeContainerReport() throws Exception { - int nodeCount = 2; - int numReport = 2; - long size = OzoneConsts.GB * 5; - long used = OzoneConsts.GB * 2; - long readBytes = OzoneConsts.GB * 1; - long writeBytes = OzoneConsts.GB * 2; - int keyCount = 1000; - int readCount = 100; - int writeCount = 50; - OzoneConfiguration conf = new OzoneConfiguration(); - - try { - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(nodeCount).build(); - cluster.waitForClusterToBeReady(); - - ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes, - writeBytes, readCount, writeCount); - StorageContainerManager scmManager = cluster.getStorageContainerManager(); - - DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails(); - SCMDatanodeContainerReportHandler containerReportHandler = - new SCMDatanodeContainerReportHandler(); - containerReportHandler.setConf(conf); - containerReportHandler.init(scmManager); - ContainerReportsProto request = createContainerReport(numReport, stat); - containerReportHandler.processReport( - datanodeDetails, request); - - MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME); - assertEquals(size * numReport, - getLongCounter("ContainerReportSize", scmMetrics)); - assertEquals(used * numReport, - getLongCounter("ContainerReportUsed", scmMetrics)); - assertEquals(readBytes * numReport, - getLongCounter("ContainerReportReadBytes", scmMetrics)); - assertEquals(writeBytes * numReport, - getLongCounter("ContainerReportWriteBytes", scmMetrics)); - - assertEquals(keyCount * numReport, - getLongCounter("ContainerReportKeyCount", scmMetrics)); - assertEquals(readCount * numReport, - getLongCounter("ContainerReportReadCount", scmMetrics)); - assertEquals(writeCount * numReport, - getLongCounter("ContainerReportWriteCount", scmMetrics)); - - // reset stale interval time to move node from healthy to stale - SCMNodeManager nodeManager = (SCMNodeManager) cluster - .getStorageContainerManager().getScmNodeManager(); - nodeManager.setStaleNodeIntervalMs(100); - - // verify the metrics when node becomes stale - GenericTestUtils.waitFor(() -> { - MetricsRecordBuilder metrics = getMetrics(SCMMetrics.SOURCE_NAME); - return 0 == getLongCounter("ContainerReportSize", metrics) - && 0 == getLongCounter("ContainerReportUsed", metrics) - && 0 == getLongCounter("ContainerReportReadBytes", metrics) - && 0 == getLongCounter("ContainerReportWriteBytes", metrics) - && 0 == getLongCounter("ContainerReportKeyCount", metrics) - && 0 == getLongCounter("ContainerReportReadCount", metrics) - && 0 == getLongCounter("ContainerReportWriteCount", metrics); - }, 1000, 60000); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } - - private ContainerReportsProto createContainerReport(int numReport, - ContainerStat stat) { - StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder - reportsBuilder = StorageContainerDatanodeProtocolProtos - .ContainerReportsProto.newBuilder(); - - for (int i = 0; i < numReport; i++) { - ContainerReport report = new ContainerReport( - RandomUtils.nextLong(), DigestUtils.sha256Hex("Simulated")); - report.setSize(stat.getSize().get()); - report.setBytesUsed(stat.getUsed().get()); - report.setReadCount(stat.getReadCount().get()); - report.setReadBytes(stat.getReadBytes().get()); - report.setKeyCount(stat.getKeyCount().get()); - report.setWriteCount(stat.getWriteCount().get()); - report.setWriteBytes(stat.getWriteBytes().get()); - reportsBuilder.addReports(report.getProtoBufMessage()); - } - return reportsBuilder.build(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js index 7fb52b1292..ab6f73bfc9 100644 --- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js +++ b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js @@ -48,7 +48,9 @@ labelType: 'value', duration: 500, labelThreshold: 0.01, - labelSunbeamLayout: true, + valueFormat: function(d) { + return d3.format('d')(d); + }, legend: { margin: { top: 5, diff --git a/hadoop-tools/hadoop-ozone/pom.xml b/hadoop-ozone/ozonefs/pom.xml similarity index 70% rename from hadoop-tools/hadoop-ozone/pom.xml rename to hadoop-ozone/ozonefs/pom.xml index a7d0cfaf83..c3de4d1b32 100644 --- a/hadoop-tools/hadoop-ozone/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -18,14 +18,13 @@ 4.0.0 org.apache.hadoop - hadoop-project - 3.2.0-SNAPSHOT - ../../hadoop-project + hadoop-ozone + 0.2.1-SNAPSHOT hadoop-ozone-filesystem Apache Hadoop Ozone FileSystem jar - + 0.2.1-SNAPSHOT UTF-8 true @@ -44,6 +43,46 @@ + + org.apache.maven.plugins + maven-shade-plugin + 3.1.1 + + + + com.google.guava:guava:jar + org.slf4j:slf4j-api:jar + com.google.protobuf:protobuf-java + com.nimbusds:nimbus-jose-jwt:jar + com.github.stephenc.jcip:jcip-annotations + com.google.code.findbugs:jsr305:jar + org.apache.hadoop:hadoop-ozone-client + org.apache.hadoop:hadoop-hdds-client + org.apache.hadoop:hadoop-hdds-common + org.fusesource.leveldbjni:leveldbjni-all + org.apache.ratis:ratis-server + org.apache.ratis:ratis-proto-shaded:jar + com.google.auto.value:auto-value-annotations + com.squareup:javapoet:jar + org.jctools:jctools-core + org.apache.ratis:ratis-common + org.apache.ratis:ratis-client + org.apache.ratis:ratis-netty + org.apache.ratis:ratis-grpc + org.rocksdb:rocksdbjni + org.apache.hadoop:hadoop-ozone-common + + + + + + package + + shade + + + + org.apache.maven.plugins maven-dependency-plugin @@ -56,7 +95,9 @@ - ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt + + ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt + @@ -83,7 +124,6 @@ org.apache.hadoop hadoop-hdds-common - provided org.apache.hadoop @@ -108,12 +148,10 @@ org.apache.hadoop hadoop-hdds-client - test org.apache.hadoop hadoop-ozone-common - provided org.apache.hadoop @@ -129,7 +167,6 @@ org.apache.hadoop hadoop-ozone-client - provided org.apache.hadoop diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java similarity index 99% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index 0ff1d50cd6..6906a9dc47 100644 --- a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.http.client.utils.URIBuilder; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java rename to hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java similarity index 99% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index b82c4a135d..ad21f28ec4 100644 --- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.ozone; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java similarity index 99% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 5a7cb4f6f8..a2257023b6 100644 --- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -31,7 +31,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.junit.After; import org.apache.hadoop.hdds.conf.OzoneConfiguration; diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java similarity index 98% rename from hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java rename to hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index 176b614d4d..8417e463f9 100644 --- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.ozone.contract; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-ozone/src/test/resources/contract/ozone.xml b/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/resources/contract/ozone.xml rename to hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml diff --git a/hadoop-tools/hadoop-ozone/src/test/resources/log4j.properties b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties similarity index 100% rename from hadoop-tools/hadoop-ozone/src/test/resources/log4j.properties rename to hadoop-ozone/ozonefs/src/test/resources/log4j.properties diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index cffef14e6e..b655088c57 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -34,6 +34,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> client ozone-manager tools + ozonefs integration-test objectstore-service docs diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java index 1b1153b18a..375450ca09 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.util.Time; @@ -150,14 +149,14 @@ public static Pipeline createPipeline(String containerName, Preconditions.checkArgument(i.hasNext()); final DatanodeDetails leader = i.next(); String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(5); - final PipelineChannel pipelineChannel = - new PipelineChannel(leader.getUuidString(), OPEN, + final Pipeline pipeline = + new Pipeline(leader.getUuidString(), OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName); - pipelineChannel.addMember(leader); + pipeline.addMember(leader); for (; i.hasNext();) { - pipelineChannel.addMember(i.next()); + pipeline.addMember(i.next()); } - return new Pipeline(pipelineChannel); + return pipeline; } @Benchmark diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java index c14c1b9306..1e73165971 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.genesis; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.ratis.shaded.com.google.protobuf.ByteString; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.FileUtils; @@ -32,7 +33,6 @@ import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.util.Time; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; @@ -78,7 +78,7 @@ public class BenchMarkDatanodeDispatcher { private String baseDir; private String datanodeUuid; private Dispatcher dispatcher; - private PipelineChannel pipelineChannel; + private Pipeline pipeline; private ByteString data; private Random random; private AtomicInteger containerCount; @@ -96,7 +96,7 @@ public class BenchMarkDatanodeDispatcher { @Setup(Level.Trial) public void initialize() throws IOException { datanodeUuid = UUID.randomUUID().toString(); - pipelineChannel = new PipelineChannel("127.0.0.1", + pipeline = new Pipeline("127.0.0.1", LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "SA-" + UUID.randomUUID()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index d4ac994cff..edc0d7b597 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -19,7 +19,6 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import com.google.protobuf.ByteString; import org.apache.commons.cli.BasicParser; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -60,13 +59,11 @@ import java.util.HashSet; import java.util.Set; -import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB; /** @@ -111,15 +108,6 @@ public class SQLCLI extends Configured implements Tool { private static final String INSERT_CONTAINER_MEMBERS = "INSERT INTO containerMembers (containerName, datanodeUUID) " + "VALUES (\"%s\", \"%s\")"; - // for nodepool.db - private static final String CREATE_NODE_POOL = - "CREATE TABLE nodePool (" + - "datanodeUUID TEXT NOT NULL," + - "poolName TEXT NOT NULL," + - "PRIMARY KEY(datanodeUUID, poolName))"; - private static final String INSERT_NODE_POOL = - "INSERT INTO nodePool (datanodeUUID, poolName) " + - "VALUES (\"%s\", \"%s\")"; // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO // for openContainer.db private static final String CREATE_OPEN_CONTAINER = @@ -285,9 +273,6 @@ public int run(String[] args) throws Exception { if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) { LOG.info("Converting container DB"); convertContainerDB(dbPath, outPath); - } else if (dbName.toString().equals(NODEPOOL_DB)) { - LOG.info("Converting node pool DB"); - convertNodePoolDB(dbPath, outPath); } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) { LOG.info("Converting open container DB"); convertOpenContainerDB(dbPath, outPath); @@ -519,11 +504,11 @@ private void insertContainerDB(Connection conn, long containerID, LOG.info("Insert to sql container db, for container {}", containerID); String insertContainerInfo = String.format( INSERT_CONTAINER_INFO, containerID, - pipeline.getPipelineChannel().getLeaderID()); + pipeline.getLeaderID()); executeSQL(conn, insertContainerInfo); for (HddsProtos.DatanodeDetailsProto dd : - pipeline.getPipelineChannel().getMembersList()) { + pipeline.getMembersList()) { String uuid = dd.getUuid(); if (!uuidChecked.contains(uuid)) { // we may also not use this checked set, but catch exception instead @@ -543,66 +528,7 @@ private void insertContainerDB(Connection conn, long containerID, } LOG.info("Insertion completed."); } - /** - * Converts nodePool.db to sqlite. The schema of sql db: - * two tables, nodePool and datanodeInfo (the same datanode Info as for - * container.db). - * - * nodePool - * --------------------------------------------------------- - * datanodeUUID* | poolName* - * --------------------------------------------------------- - * - * datanodeInfo: - * --------------------------------------------------------- - * hostname | datanodeUUid* | xferPort | ipcPort - * --------------------------------------------------------- - * - * -------------------------------- - * |containerPort - * -------------------------------- - * - * @param dbPath path to container db. - * @param outPath path to output sqlite - * @throws IOException throws exception. - */ - private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception { - LOG.info("Create table for sql node pool db."); - File dbFile = dbPath.toFile(); - try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf).setDbFile(dbFile).build(); - Connection conn = connectDB(outPath.toString())) { - executeSQL(conn, CREATE_NODE_POOL); - executeSQL(conn, CREATE_DATANODE_INFO); - dbStore.iterate(null, (key, value) -> { - DatanodeDetails nodeId = DatanodeDetails - .getFromProtoBuf(HddsProtos.DatanodeDetailsProto - .PARSER.parseFrom(key)); - String blockPool = DFSUtil.bytes2String(value); - try { - insertNodePoolDB(conn, blockPool, nodeId); - return true; - } catch (SQLException e) { - throw new IOException(e); - } - }); - } - } - - private void insertNodePoolDB(Connection conn, String blockPool, - DatanodeDetails datanodeDetails) throws SQLException { - String insertNodePool = String.format(INSERT_NODE_POOL, - datanodeDetails.getUuidString(), blockPool); - executeSQL(conn, insertNodePool); - - String insertDatanodeDetails = String - .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(), - datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(), - datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue()); - executeSQL(conn, insertDatanodeDetails); - } /** * Convert openContainer.db to sqlite db file. This is rather simple db, diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 8cb5bfc48b..8e28afec4e 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -586,7 +586,11 @@ hadoop-ozone-docs ${hdds.version} - + + org.apache.hadoop + hadoop-ozone-filesystem + ${hdds.version} + org.apache.hadoop hadoop-hdds-common @@ -1039,11 +1043,6 @@ junit 4.11 - - commons-lang - commons-lang - 2.6 - commons-collections commons-collections @@ -2060,10 +2059,5 @@ - - dynamodb-local-oregon - DynamoDB Local Release Repository - https://s3-us-west-2.amazonaws.com/dynamodb-local/release - diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java index 58c14a943b..32367aff17 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java @@ -22,7 +22,7 @@ import com.aliyun.oss.common.auth.CredentialsProvider; import com.aliyun.oss.common.auth.DefaultCredentials; import com.aliyun.oss.common.auth.InvalidCredentialsException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import java.io.IOException; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java index 93e31d57e8..4fbb6fb8b1 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java @@ -30,7 +30,7 @@ import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java index cc050c876e..5e2175926a 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java @@ -45,7 +45,7 @@ import com.aliyun.oss.model.UploadPartRequest; import com.aliyun.oss.model.UploadPartResult; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java index 2fe06c1b05..a7536d6d7a 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java @@ -23,7 +23,7 @@ import com.aliyun.oss.common.auth.CredentialsProvider; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.security.ProviderUtils; diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java index 901cb2bd08..79e0de3492 100644 --- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java +++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.aliyun.oss; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.junit.internal.AssumptionViolatedException; diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 24ed11dee4..c6dddb0223 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -36,7 +36,6 @@ true ${project.build.directory}/test - 1.11.86 unset @@ -49,6 +48,8 @@ false false local + + 200000 @@ -162,6 +163,7 @@ ${fs.s3a.s3guard.test.authoritative} ${fs.s3a.s3guard.test.implementation} + ${test.integration.timeout} @@ -299,23 +301,10 @@ - - - dynamodblocal - - - dynamodblocal - - - - dynamodblocal - - - - non-auth + auth auth @@ -346,6 +335,9 @@ maven-surefire-plugin 3600 + + ${test.integration.timeout} + @@ -417,26 +409,6 @@ aws-java-sdk-bundle compile - - com.amazonaws - DynamoDBLocal - ${dynamodb.local.version} - test - - - org.hamcrest - hamcrest-core - - - org.eclipse.jetty - jetty-http - - - org.apache.commons - commons-lang3 - - - junit junit diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java index e0bee0fdf6..10201f00d3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java @@ -23,7 +23,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AnonymousAWSCredentials; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java index b1899e2293..01bcc6a05e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java @@ -21,7 +21,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.AWSCredentials; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 4c958439b0..c52193698f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -401,6 +401,17 @@ private Constants() { public static final String S3GUARD_DDB_TABLE_NAME_KEY = "fs.s3a.s3guard.ddb.table"; + /** + * Test table name to use during DynamoDB integration test. + * + * The table will be modified, and deleted in the end of the tests. + * If this value is not set, the integration tests that would be destructive + * won't run. + */ + @InterfaceStability.Unstable + public static final String S3GUARD_DDB_TEST_TABLE_NAME_KEY = + "fs.s3a.s3guard.ddb.test.table"; + /** * Whether to create the DynamoDB table if the table does not exist. */ diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 4b0c208805..737d7da95c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -121,8 +121,8 @@ import static org.apache.hadoop.fs.s3a.Invoker.*; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.*; -import static org.apache.commons.lang.StringUtils.isNotBlank; -import static org.apache.commons.lang.StringUtils.isNotEmpty; +import static org.apache.commons.lang3.StringUtils.isNotBlank; +import static org.apache.commons.lang3.StringUtils.isNotEmpty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java index c54d3e2621..440739d9d1 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java @@ -24,7 +24,7 @@ import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.amazonaws.services.s3.model.SSECustomerKey; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CanSetReadahead; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java new file mode 100644 index 0000000000..34c88d43f6 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.s3a; + +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.google.common.base.Charsets; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BBPartHandle; +import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.MultipartUploaderFactory; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.UploadHandle; +import org.apache.hadoop.hdfs.DFSUtilClient; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; + +/** + * MultipartUploader for S3AFileSystem. This uses the S3 multipart + * upload mechanism. + */ +public class S3AMultipartUploader extends MultipartUploader { + + private final S3AFileSystem s3a; + + public S3AMultipartUploader(FileSystem fs, Configuration conf) { + if (!(fs instanceof S3AFileSystem)) { + throw new IllegalArgumentException( + "S3A MultipartUploads must use S3AFileSystem"); + } + s3a = (S3AFileSystem) fs; + } + + @Override + public UploadHandle initialize(Path filePath) throws IOException { + String key = s3a.pathToKey(filePath); + InitiateMultipartUploadRequest request = + new InitiateMultipartUploadRequest(s3a.getBucket(), key); + LOG.debug("initialize request: {}", request); + InitiateMultipartUploadResult result = s3a.initiateMultipartUpload(request); + String uploadId = result.getUploadId(); + return BBUploadHandle.from(ByteBuffer.wrap( + uploadId.getBytes(Charsets.UTF_8))); + } + + @Override + public PartHandle putPart(Path filePath, InputStream inputStream, + int partNumber, UploadHandle uploadId, long lengthInBytes) { + String key = s3a.pathToKey(filePath); + UploadPartRequest request = new UploadPartRequest(); + byte[] uploadIdBytes = uploadId.toByteArray(); + request.setUploadId(new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8)); + request.setInputStream(inputStream); + request.setPartSize(lengthInBytes); + request.setPartNumber(partNumber); + request.setBucketName(s3a.getBucket()); + request.setKey(key); + LOG.debug("putPart request: {}", request); + UploadPartResult result = s3a.uploadPart(request); + String eTag = result.getETag(); + return BBPartHandle.from(ByteBuffer.wrap(eTag.getBytes(Charsets.UTF_8))); + } + + @Override + public PathHandle complete(Path filePath, + List> handles, UploadHandle uploadId) { + String key = s3a.pathToKey(filePath); + CompleteMultipartUploadRequest request = + new CompleteMultipartUploadRequest(); + request.setBucketName(s3a.getBucket()); + request.setKey(key); + byte[] uploadIdBytes = uploadId.toByteArray(); + request.setUploadId(new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8)); + List eTags = handles + .stream() + .map(handle -> { + byte[] partEtagBytes = handle.getRight().toByteArray(); + return new PartETag(handle.getLeft(), + new String(partEtagBytes, 0, partEtagBytes.length, + Charsets.UTF_8)); + }) + .collect(Collectors.toList()); + request.setPartETags(eTags); + LOG.debug("Complete request: {}", request); + CompleteMultipartUploadResult completeMultipartUploadResult = + s3a.getAmazonS3Client().completeMultipartUpload(request); + + byte[] eTag = DFSUtilClient.string2Bytes( + completeMultipartUploadResult.getETag()); + return (PathHandle) () -> ByteBuffer.wrap(eTag); + } + + @Override + public void abort(Path filePath, UploadHandle uploadId) { + String key = s3a.pathToKey(filePath); + byte[] uploadIdBytes = uploadId.toByteArray(); + String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(s3a + .getBucket(), key, uploadIdString); + LOG.debug("Abort request: {}", request); + s3a.getAmazonS3Client().abortMultipartUpload(request); + } + + /** + * Factory for creating MultipartUploader objects for s3a:// FileSystems. + */ + public static class Factory extends MultipartUploaderFactory { + @Override + protected MultipartUploader createMultipartUploader(FileSystem fs, + Configuration conf) { + if (fs.getScheme().equals("s3a")) { + return new S3AMultipartUploader(fs, conf); + } + return null; + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index 6a01a80308..a5f7d75449 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -35,7 +35,7 @@ import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java index 9939bb2571..7f9e57e2e2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java @@ -21,7 +21,7 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java index e959908a8d..3b89bde198 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java @@ -21,7 +21,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicSessionCredentials; import com.amazonaws.auth.AWSCredentials; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import java.io.IOException; import java.net.URI; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java index 4b6a77e0dc..fdaf9bd544 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java index 85cc38a846..596dd95685 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java @@ -33,7 +33,7 @@ import com.amazonaws.services.s3.model.PartETag; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java index 6cf1f1e63a..cf84cb32eb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java index cf365c260d..a619fc7b7d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.WriteOperationHelper; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java index a4d39d75d6..d5d256aefb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java @@ -30,7 +30,7 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.UncheckedExecutionException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java index 66ada497aa..91e64cddf6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java @@ -26,7 +26,7 @@ import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index b942ed7349..116827dd4f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -60,7 +60,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -261,6 +261,7 @@ private static DynamoDB createDynamoDB(Configuration conf, String s3Region) @Override @Retries.OnceRaw public void initialize(FileSystem fs) throws IOException { + Preconditions.checkNotNull(fs, "Null filesystem"); Preconditions.checkArgument(fs instanceof S3AFileSystem, "DynamoDBMetadataStore only supports S3A filesystem."); owner = (S3AFileSystem) fs; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java new file mode 100644 index 0000000000..6040d672ac --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import javax.annotation.Nullable; + +/** + * LocalMetadataEntry is used to store entries in the cache of + * LocalMetadataStore. PathMetadata or dirListingMetadata can be null. The + * entry is not immutable. + */ +public final class LocalMetadataEntry { + @Nullable + private PathMetadata pathMetadata; + @Nullable + private DirListingMetadata dirListingMetadata; + + LocalMetadataEntry(PathMetadata pmd){ + pathMetadata = pmd; + dirListingMetadata = null; + } + + LocalMetadataEntry(DirListingMetadata dlm){ + pathMetadata = null; + dirListingMetadata = dlm; + } + + public PathMetadata getFileMeta() { + return pathMetadata; + } + + public DirListingMetadata getDirListingMeta() { + return dirListingMetadata; + } + + + public boolean hasPathMeta() { + return this.pathMetadata != null; + } + + public boolean hasDirMeta() { + return this.dirListingMetadata != null; + } + + public void setPathMetadata(PathMetadata pathMetadata) { + this.pathMetadata = pathMetadata; + } + + public void setDirListingMetadata(DirListingMetadata dirListingMetadata) { + this.dirListingMetadata = dirListingMetadata; + } + + @Override public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("LocalMetadataEntry{"); + if(pathMetadata != null) { + sb.append("pathMetadata=" + pathMetadata.getFileStatus().getPath()); + } + if(dirListingMetadata != null){ + sb.append("; dirListingMetadata=" + dirListingMetadata.getPath()); + } + sb.append("}"); + return sb.toString(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java index 95689e11fb..f0ffb44623 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java @@ -23,7 +23,7 @@ import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -37,13 +37,12 @@ import java.net.URI; import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.Map; import java.util.concurrent.TimeUnit; /** - * This is a local, in-memory, implementation of MetadataStore. + * This is a local, in-memory implementation of MetadataStore. * This is not a coherent cache across processes. It is only * locally-coherent. * @@ -56,12 +55,12 @@ * non-recursive removal of non-empty directories. It is assumed the caller * already has to perform these sorts of checks. * - * Contains cache internally with time based eviction. + * Contains one cache internally with time based eviction. */ public class LocalMetadataStore implements MetadataStore { public static final Logger LOG = LoggerFactory.getLogger(MetadataStore.class); - public static final int DEFAULT_MAX_RECORDS = 128; + public static final int DEFAULT_MAX_RECORDS = 256; public static final int DEFAULT_CACHE_ENTRY_TTL_MSEC = 10 * 1000; /** @@ -79,11 +78,8 @@ public class LocalMetadataStore implements MetadataStore { public static final String CONF_CACHE_ENTRY_TTL = "fs.metadatastore.local.ttl"; - /** Contains directories and files. */ - private Cache fileCache; - - /** Contains directory listings. */ - private Cache dirCache; + /** Contains directory and file listings. */ + private Cache localCache; private FileSystem fs; /* Null iff this FS does not have an associated URI host. */ @@ -116,8 +112,7 @@ public void initialize(Configuration conf) throws IOException { builder.expireAfterAccess(ttl, TimeUnit.MILLISECONDS); } - fileCache = builder.build(); - dirCache = builder.build(); + localCache = builder.build(); } @Override @@ -155,8 +150,7 @@ private synchronized void doDelete(Path p, boolean recursive, boolean if (recursive) { // Remove all entries that have this dir as path prefix. - deleteEntryByAncestor(path, dirCache, tombstone); - deleteEntryByAncestor(path, fileCache, tombstone); + deleteEntryByAncestor(path, localCache, tombstone); } } @@ -170,7 +164,7 @@ public PathMetadata get(Path p, boolean wantEmptyDirectoryFlag) throws IOException { Path path = standardize(p); synchronized (this) { - PathMetadata m = fileCache.getIfPresent(path); + PathMetadata m = getFileMeta(path); if (wantEmptyDirectoryFlag && m != null && m.getFileStatus().isDirectory()) { @@ -191,15 +185,15 @@ public PathMetadata get(Path p, boolean wantEmptyDirectoryFlag) * @return TRUE / FALSE if known empty / not-empty, UNKNOWN otherwise. */ private Tristate isEmptyDirectory(Path p) { - DirListingMetadata dirMeta = dirCache.getIfPresent(p); - return dirMeta.withoutTombstones().isEmpty(); + DirListingMetadata dlm = getDirListingMeta(p); + return dlm.withoutTombstones().isEmpty(); } @Override public synchronized DirListingMetadata listChildren(Path p) throws IOException { Path path = standardize(p); - DirListingMetadata listing = dirCache.getIfPresent(path); + DirListingMetadata listing = getDirListingMeta(path); if (LOG.isDebugEnabled()) { LOG.debug("listChildren({}) -> {}", path, listing == null ? "null" : listing.prettyPrint()); @@ -211,6 +205,7 @@ public synchronized DirListingMetadata listChildren(Path p) throws @Override public void move(Collection pathsToDelete, Collection pathsToCreate) throws IOException { + LOG.info("Move {} to {}", pathsToDelete, pathsToCreate); Preconditions.checkNotNull(pathsToDelete, "pathsToDelete is null"); Preconditions.checkNotNull(pathsToCreate, "pathsToCreate is null"); @@ -258,7 +253,12 @@ public void put(PathMetadata meta) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("put {} -> {}", path, meta.prettyPrint()); } - fileCache.put(path, meta); + LocalMetadataEntry entry = localCache.getIfPresent(path); + if(entry == null){ + entry = new LocalMetadataEntry(meta); + } else { + entry.setPathMetadata(meta); + } /* Directory case: * We also make sure we have an entry in the dirCache, so subsequent @@ -271,27 +271,32 @@ public void put(PathMetadata meta) throws IOException { * saving round trips to underlying store for subsequent listStatus() */ - if (status.isDirectory()) { - DirListingMetadata dir = dirCache.getIfPresent(path); - if (dir == null) { - dirCache.put(path, new DirListingMetadata(path, DirListingMetadata - .EMPTY_DIR, false)); - } + // only create DirListingMetadata if the entry does not have one + if (status.isDirectory() && !entry.hasDirMeta()) { + DirListingMetadata dlm = + new DirListingMetadata(path, DirListingMetadata.EMPTY_DIR, false); + entry.setDirListingMetadata(dlm); } + localCache.put(path, entry); /* Update cached parent dir. */ Path parentPath = path.getParent(); if (parentPath != null) { - DirListingMetadata parent = dirCache.getIfPresent(parentPath); - if (parent == null) { - /* Track this new file's listing in parent. Parent is not - * authoritative, since there may be other items in it we don't know - * about. */ - parent = new DirListingMetadata(parentPath, - DirListingMetadata.EMPTY_DIR, false); - dirCache.put(parentPath, parent); + LocalMetadataEntry parentMeta = localCache.getIfPresent(parentPath); + DirListingMetadata parentDirMeta = + new DirListingMetadata(parentPath, DirListingMetadata.EMPTY_DIR, + false); + parentDirMeta.put(status); + + getDirListingMeta(parentPath); + + if (parentMeta == null){ + localCache.put(parentPath, new LocalMetadataEntry(parentDirMeta)); + } else if (!parentMeta.hasDirMeta()) { + parentMeta.setDirListingMetadata(parentDirMeta); + } else { + parentMeta.getDirListingMeta().put(status); } - parent.put(status); } } } @@ -301,7 +306,13 @@ public synchronized void put(DirListingMetadata meta) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("put dirMeta {}", meta.prettyPrint()); } - dirCache.put(standardize(meta.getPath()), meta); + LocalMetadataEntry entry = + localCache.getIfPresent(standardize(meta.getPath())); + if(entry == null){ + localCache.put(standardize(meta.getPath()), new LocalMetadataEntry(meta)); + } else { + entry.setDirListingMetadata(meta); + } put(meta.getListing()); } @@ -319,8 +330,8 @@ public void close() throws IOException { @Override public void destroy() throws IOException { - if (dirCache != null) { - dirCache.invalidateAll(); + if (localCache != null) { + localCache.invalidateAll(); } } @@ -330,42 +341,44 @@ public void prune(long modTime) throws IOException{ } @Override - public synchronized void prune(long modTime, String keyPrefix) - throws IOException { - Iterator> files = - fileCache.asMap().entrySet().iterator(); - while (files.hasNext()) { - Map.Entry entry = files.next(); - if (expired(entry.getValue().getFileStatus(), modTime, keyPrefix)) { - files.remove(); - } - } - Iterator> dirs = - dirCache.asMap().entrySet().iterator(); - while (dirs.hasNext()) { - Map.Entry entry = dirs.next(); - Path path = entry.getKey(); - DirListingMetadata metadata = entry.getValue(); - Collection oldChildren = metadata.getListing(); - Collection newChildren = new LinkedList<>(); + public synchronized void prune(long modTime, String keyPrefix) { + // prune files + // filter path_metadata (files), filter expired, remove expired + localCache.asMap().entrySet().stream() + .filter(entry -> entry.getValue().hasPathMeta()) + .filter(entry -> expired( + entry.getValue().getFileMeta().getFileStatus(), modTime, keyPrefix)) + .forEach(entry -> localCache.invalidate(entry.getKey())); - for (PathMetadata child : oldChildren) { - FileStatus status = child.getFileStatus(); - if (!expired(status, modTime, keyPrefix)) { - newChildren.add(child); - } - } - if (newChildren.size() != oldChildren.size()) { - dirCache.put(path, new DirListingMetadata(path, newChildren, false)); - if (!path.isRoot()) { - DirListingMetadata parent = null; - parent = dirCache.getIfPresent(path.getParent()); - if (parent != null) { - parent.setAuthoritative(false); + + // prune dirs + // filter DIR_LISTING_METADATA, remove expired, remove authoritative bit + localCache.asMap().entrySet().stream() + .filter(entry -> entry.getValue().hasDirMeta()) + .forEach(entry -> { + Path path = entry.getKey(); + DirListingMetadata metadata = entry.getValue().getDirListingMeta(); + Collection oldChildren = metadata.getListing(); + Collection newChildren = new LinkedList<>(); + + for (PathMetadata child : oldChildren) { + FileStatus status = child.getFileStatus(); + if (!expired(status, modTime, keyPrefix)) { + newChildren.add(child); + } } - } - } - } + if (newChildren.size() != oldChildren.size()) { + DirListingMetadata dlm = + new DirListingMetadata(path, newChildren, false); + localCache.put(path, new LocalMetadataEntry(dlm)); + if (!path.isRoot()) { + DirListingMetadata parent = getDirListingMeta(path.getParent()); + if (parent != null) { + parent.setAuthoritative(false); + } + } + } + }); } private boolean expired(FileStatus status, long expiry, String keyPrefix) { @@ -390,31 +403,26 @@ private boolean expired(FileStatus status, long expiry, String keyPrefix) { } @VisibleForTesting - static void deleteEntryByAncestor(Path ancestor, Cache cache, - boolean tombstone) { - for (Iterator> it = cache.asMap().entrySet().iterator(); - it.hasNext();) { - Map.Entry entry = it.next(); - Path f = entry.getKey(); - T meta = entry.getValue(); - if (isAncestorOf(ancestor, f)) { - if (tombstone) { - if (meta instanceof PathMetadata) { - cache.put(f, (T) PathMetadata.tombstone(f)); - } else if (meta instanceof DirListingMetadata) { - it.remove(); + static void deleteEntryByAncestor(Path ancestor, + Cache cache, boolean tombstone) { + + cache.asMap().entrySet().stream() + .filter(entry -> isAncestorOf(ancestor, entry.getKey())) + .forEach(entry -> { + LocalMetadataEntry meta = entry.getValue(); + Path path = entry.getKey(); + if(meta.hasDirMeta()){ + cache.invalidate(path); + } else if(tombstone && meta.hasPathMeta()){ + meta.setPathMetadata(PathMetadata.tombstone(path)); } else { - throw new IllegalStateException("Unknown type in cache"); + cache.invalidate(path); } - } else { - it.remove(); - } - } - } + }); } /** - * @return true iff 'ancestor' is ancestor dir in path 'f'. + * @return true if 'ancestor' is ancestor dir in path 'f'. * All paths here are absolute. Dir does not count as its own ancestor. */ private static boolean isAncestorOf(Path ancestor, Path f) { @@ -431,27 +439,41 @@ private static boolean isAncestorOf(Path ancestor, Path f) { * lock held. */ private void deleteCacheEntries(Path path, boolean tombstone) { - - // Remove target file/dir - LOG.debug("delete file entry for {}", path); - if (tombstone) { - fileCache.put(path, PathMetadata.tombstone(path)); - } else { - fileCache.invalidate(path); + LocalMetadataEntry entry = localCache.getIfPresent(path); + // If there's no entry, delete should silently succeed + // (based on MetadataStoreTestBase#testDeleteNonExisting) + if(entry == null){ + LOG.warn("Delete: path {} is missing from cache.", path); + return; } - // Update this and parent dir listing, if any + // Remove target file entry + LOG.debug("delete file entry for {}", path); + if(entry.hasPathMeta()){ + if (tombstone) { + PathMetadata pmd = PathMetadata.tombstone(path); + entry.setPathMetadata(pmd); + } else { + entry.setPathMetadata(null); + } + } - /* If this path is a dir, remove its listing */ - LOG.debug("removing listing of {}", path); + // If this path is a dir, remove its listing + if(entry.hasDirMeta()) { + LOG.debug("removing listing of {}", path); + entry.setDirListingMetadata(null); + } - dirCache.invalidate(path); + // If the entry is empty (contains no dirMeta or pathMeta) remove it from + // the cache. + if(!entry.hasDirMeta() && !entry.hasPathMeta()){ + localCache.invalidate(entry); + } /* Remove this path from parent's dir listing */ Path parent = path.getParent(); if (parent != null) { - DirListingMetadata dir = null; - dir = dirCache.getIfPresent(parent); + DirListingMetadata dir = getDirListingMeta(parent); if (dir != null) { LOG.debug("removing parent's entry for {} ", path); if (tombstone) { @@ -494,4 +516,23 @@ public Map getDiagnostics() throws IOException { public void updateParameters(Map parameters) throws IOException { } + + PathMetadata getFileMeta(Path p){ + LocalMetadataEntry entry = localCache.getIfPresent(p); + if(entry != null && entry.hasPathMeta()){ + return entry.getFileMeta(); + } else { + return null; + } + } + + DirListingMetadata getDirListingMeta(Path p){ + LocalMetadataEntry entry = localCache.getIfPresent(p); + if(entry != null && entry.hasDirMeta()){ + return entry.getDirListingMeta(); + } else { + return null; + } + } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index fbffba915d..527697f00f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; @@ -805,7 +805,9 @@ private static void printDiff(FileStatus msStatus, */ private void compareDir(FileStatus msDir, FileStatus s3Dir, PrintStream out) throws IOException { - Preconditions.checkArgument(!(msDir == null && s3Dir == null)); + Preconditions.checkArgument(!(msDir == null && s3Dir == null), + "The path does not exist in metadata store and on s3."); + if (msDir != null && s3Dir != null) { Preconditions.checkArgument(msDir.getPath().equals(s3Dir.getPath()), String.format("The path from metadata store and s3 are different:" + diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java index bfac9750e7..9e2f34def3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3native; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -31,7 +31,7 @@ import java.net.URLDecoder; import java.util.Objects; -import static org.apache.commons.lang.StringUtils.equalsIgnoreCase; +import static org.apache.commons.lang3.StringUtils.equalsIgnoreCase; /** * Class to aid logging in to S3 endpoints. diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/org.apache.hadoop.fs.MultipartUploaderFactory new file mode 100644 index 0000000000..2e4bc241d0 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/org.apache.hadoop.fs.MultipartUploaderFactory @@ -0,0 +1,15 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +org.apache.hadoop.fs.s3a.S3AMultipartUploader$Factory diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader new file mode 100644 index 0000000000..d16846b25b --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.hadoop.fs.s3a.S3AMultipartUploader diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java index 73e71f41fd..f22af49635 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java @@ -29,13 +29,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; import java.io.IOException; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeEnableS3Guard; -import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC_COMMITTER_ENABLED; /** * An extension of the contract test base set up for S3A tests. @@ -78,23 +75,7 @@ protected int getTestTimeoutMillis() { */ @Override protected Configuration createConfiguration() { - Configuration conf = super.createConfiguration(); - // patch in S3Guard options - maybeEnableS3Guard(conf); - // set hadoop temp dir to a default value - String testUniqueForkId = - System.getProperty(TEST_UNIQUE_FORK_ID); - String tmpDir = conf.get(Constants.HADOOP_TMP_DIR, "target/build/test"); - if (testUniqueForkId != null) { - // patch temp dir for the specific branch - tmpDir = tmpDir + File.pathSeparatorChar + testUniqueForkId; - conf.set(Constants.HADOOP_TMP_DIR, tmpDir); - } - conf.set(Constants.BUFFER_DIR, tmpDir); - // add this so that even on tests where the FS is shared, - // the FS is always "magic" - conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); - return conf; + return S3ATestUtils.prepareTestConfiguration(super.createConfiguration()); } protected Configuration getConfiguration() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java index f1799ac856..aa6b5d8659 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java @@ -22,8 +22,8 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.S3ClientOptions; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.reflect.FieldUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java index 50c9fb554e..a8a78f6282 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java index 4c953bd289..c1708305ec 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java index 7f7802d24d..0f7b418c1e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java @@ -143,7 +143,6 @@ public interface S3ATestConstants { String TEST_S3GUARD_IMPLEMENTATION = TEST_S3GUARD_PREFIX + ".implementation"; String TEST_S3GUARD_IMPLEMENTATION_LOCAL = "local"; String TEST_S3GUARD_IMPLEMENTATION_DYNAMO = "dynamo"; - String TEST_S3GUARD_IMPLEMENTATION_DYNAMODBLOCAL = "dynamodblocal"; String TEST_S3GUARD_IMPLEMENTATION_NONE = "none"; /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 4414746f96..869997b44e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -30,9 +30,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.s3a.commit.CommitConstants; -import org.apache.hadoop.fs.s3a.s3guard.DynamoDBClientFactory; -import org.apache.hadoop.fs.s3a.s3guard.DynamoDBLocalClientFactory; -import org.apache.hadoop.fs.s3a.s3guard.S3Guard; import org.hamcrest.core.Is; import org.junit.Assert; @@ -42,6 +39,7 @@ import org.slf4j.LoggerFactory; import java.io.Closeable; +import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -56,6 +54,7 @@ import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3AUtils.propagateBucketOptions; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC_COMMITTER_ENABLED; import static org.junit.Assert.*; /** @@ -393,9 +392,6 @@ public static void maybeEnableS3Guard(Configuration conf) { case TEST_S3GUARD_IMPLEMENTATION_LOCAL: implClass = S3GUARD_METASTORE_LOCAL; break; - case TEST_S3GUARD_IMPLEMENTATION_DYNAMODBLOCAL: - conf.setClass(S3Guard.S3GUARD_DDB_CLIENT_FACTORY_IMPL, - DynamoDBLocalClientFactory.class, DynamoDBClientFactory.class); case TEST_S3GUARD_IMPLEMENTATION_DYNAMO: implClass = S3GUARD_METASTORE_DYNAMO; break; @@ -489,6 +485,32 @@ public static E interceptClosing( }); } + /** + * Patch a configuration for testing. + * This includes possibly enabling s3guard, setting up the local + * FS temp dir and anything else needed for test runs. + * @param conf configuration to patch + * @return the now-patched configuration + */ + public static Configuration prepareTestConfiguration(final Configuration conf) { + // patch in S3Guard options + maybeEnableS3Guard(conf); + // set hadoop temp dir to a default value + String testUniqueForkId = + System.getProperty(TEST_UNIQUE_FORK_ID); + String tmpDir = conf.get(HADOOP_TMP_DIR, "target/build/test"); + if (testUniqueForkId != null) { + // patch temp dir for the specific branch + tmpDir = tmpDir + File.pathSeparatorChar + testUniqueForkId; + conf.set(HADOOP_TMP_DIR, tmpDir); + } + conf.set(BUFFER_DIR, tmpDir); + // add this so that even on tests where the FS is shared, + // the FS is always "magic" + conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); + return conf; + } + /** * Helper class to do diffs of metrics. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java index b8610d64cd..90e88945b3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java index 38d5156ea0..d81c747fce 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java @@ -49,6 +49,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.mockito.invocation.InvocationOnMock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -514,6 +515,21 @@ public boolean isRecover() { } } + /** + * InvocationOnMock.getArgumentAt comes and goes with Mockito versions; this + * helper method is designed to be resilient to change. + * @param invocation invocation to query + * @param index argument index + * @param clazz class of return type + * @param type of return + * @return the argument of the invocation, cast to the given type. + */ + @SuppressWarnings("unchecked") + private static T getArgumentAt(InvocationOnMock invocation, int index, + Class clazz) { + return (T)invocation.getArguments()[index]; + } + /** * Instantiate mock client with the results and errors requested. * @param results results to accrue @@ -539,7 +555,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, "Mock Fail on init " + results.requests.size()); } String uploadId = UUID.randomUUID().toString(); - InitiateMultipartUploadRequest req = invocation.getArgumentAt( + InitiateMultipartUploadRequest req = getArgumentAt(invocation, 0, InitiateMultipartUploadRequest.class); results.requests.put(uploadId, req); results.activeUploads.put(uploadId, req.getKey()); @@ -561,7 +577,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, throw new AmazonClientException( "Mock Fail on upload " + results.parts.size()); } - UploadPartRequest req = invocation.getArgumentAt( + UploadPartRequest req = getArgumentAt(invocation, 0, UploadPartRequest.class); results.parts.add(req); String etag = UUID.randomUUID().toString(); @@ -588,7 +604,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, throw new AmazonClientException( "Mock Fail on commit " + results.commits.size()); } - CompleteMultipartUploadRequest req = invocation.getArgumentAt( + CompleteMultipartUploadRequest req = getArgumentAt(invocation, 0, CompleteMultipartUploadRequest.class); results.commits.add(req); results.activeUploads.remove(req.getUploadId()); @@ -608,7 +624,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, throw new AmazonClientException( "Mock Fail on abort " + results.aborts.size()); } - AbortMultipartUploadRequest req = invocation.getArgumentAt( + AbortMultipartUploadRequest req = getArgumentAt(invocation, 0, AbortMultipartUploadRequest.class); String id = req.getUploadId(); String p = results.activeUploads.remove(id); @@ -630,7 +646,7 @@ public static AmazonS3 newMockS3Client(final ClientResults results, doAnswer(invocation -> { LOG.debug("deleteObject for {}", mockClient); synchronized (lock) { - results.deletes.add(invocation.getArgumentAt( + results.deletes.add(getArgumentAt(invocation, 0, DeleteObjectRequest.class)); return null; } @@ -643,8 +659,8 @@ public static AmazonS3 newMockS3Client(final ClientResults results, LOG.debug("deleteObject for {}", mockClient); synchronized (lock) { results.deletes.add(new DeleteObjectRequest( - invocation.getArgumentAt(0, String.class), - invocation.getArgumentAt(1, String.class) + getArgumentAt(invocation, 0, String.class), + getArgumentAt(invocation, 1, String.class) )); return null; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBLocalClientFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBLocalClientFactory.java deleted file mode 100644 index 9894ac4347..0000000000 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBLocalClientFactory.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3a.s3guard; - -import java.io.File; -import java.io.IOException; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; -import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; -import com.amazonaws.services.dynamodbv2.local.main.ServerRunner; -import com.amazonaws.services.dynamodbv2.local.server.DynamoDBProxyServer; -import org.apache.commons.lang3.StringUtils; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.s3a.DefaultS3ClientFactory; -import org.apache.hadoop.net.ServerSocketUtil; - -import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER; -import static org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProviderSet; -import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBClientFactory.DefaultDynamoDBClientFactory.getRegion; - -/** - * A DynamoDBClientFactory implementation that creates AmazonDynamoDB clients - * against an in-memory DynamoDBLocal server instance. - * - * You won't be charged bills for issuing any DynamoDB requests. However, the - * DynamoDBLocal is considered a simulator of the DynamoDB web service, so it - * may be stale or different. For example, the throttling is not yet supported - * in DynamoDBLocal. This is for testing purpose only. - * - * To use this for creating DynamoDB client in tests: - *

    - *
  1. - * As all DynamoDBClientFactory implementations, this should be configured. - *
  2. - *
  3. - * The singleton DynamoDBLocal server instance is started automatically when - * creating the AmazonDynamoDB client for the first time. It still merits to - * launch the server before all the tests and fail fast if error happens. - *
  4. - *
  5. - * The server can be stopped explicitly, which is not actually needed in - * tests as JVM termination will do that. - *
  6. - *
- * - * @see DefaultDynamoDBClientFactory - */ -public class DynamoDBLocalClientFactory extends Configured - implements DynamoDBClientFactory { - - /** The DynamoDBLocal dynamoDBLocalServer instance for testing. */ - private static DynamoDBProxyServer dynamoDBLocalServer; - private static String ddbEndpoint; - - private static final String SYSPROP_SQLITE_LIB = "sqlite4java.library.path"; - - @Override - public AmazonDynamoDB createDynamoDBClient(String defaultRegion) - throws IOException { - startSingletonServer(); - - final Configuration conf = getConf(); - // use the default credential provider chain - conf.unset(AWS_CREDENTIALS_PROVIDER); - final AWSCredentialsProvider credentials = - createAWSCredentialProviderSet(null, conf); - final ClientConfiguration awsConf = - DefaultS3ClientFactory.createAwsConf(conf); - // fail fast in case of service errors - awsConf.setMaxErrorRetry(3); - - final String region = getRegion(conf, defaultRegion); - LOG.info("Creating DynamoDBLocal client using endpoint {} in region {}", - ddbEndpoint, region); - - return AmazonDynamoDBClientBuilder.standard() - .withCredentials(credentials) - .withClientConfiguration(awsConf) - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(ddbEndpoint, region)) - .build(); - } - - /** - * Start a singleton in-memory DynamoDBLocal server if not started yet. - * @throws IOException if any error occurs - */ - public synchronized static void startSingletonServer() throws IOException { - if (dynamoDBLocalServer != null) { - return; - } - - // Set this property if it has not been set elsewhere - if (StringUtils.isEmpty(System.getProperty(SYSPROP_SQLITE_LIB))) { - String projectBuildDir = System.getProperty("project.build.directory"); - if (StringUtils.isEmpty(projectBuildDir)) { - projectBuildDir = "target"; - } - // sqlite4java lib should have been copied to $projectBuildDir/native-libs - System.setProperty(SYSPROP_SQLITE_LIB, - projectBuildDir + File.separator + "native-libs"); - LOG.info("Setting {} -> {}", - SYSPROP_SQLITE_LIB, System.getProperty(SYSPROP_SQLITE_LIB)); - } - - try { - // Start an in-memory local DynamoDB instance - final String port = String.valueOf(ServerSocketUtil.getPort(0, 100)); - ddbEndpoint = "http://localhost:" + port; - dynamoDBLocalServer = ServerRunner.createServerFromCommandLineArgs( - new String[]{"-inMemory", "-port", port}); - dynamoDBLocalServer.start(); - LOG.info("DynamoDBLocal singleton server was started at {}", ddbEndpoint); - } catch (Exception t) { - String msg = "Error starting DynamoDBLocal server at " + ddbEndpoint - + " " + t; - LOG.error(msg, t); - throw new IOException(msg, t); - } - } - - /** - * Stop the in-memory DynamoDBLocal server if it is started. - * @throws IOException if any error occurs - */ - public synchronized static void stopSingletonServer() throws IOException { - if (dynamoDBLocalServer != null) { - LOG.info("Shutting down the in-memory DynamoDBLocal server"); - try { - dynamoDBLocalServer.stop(); - } catch (Throwable t) { - String msg = "Error stopping DynamoDBLocal server at " + ddbEndpoint; - LOG.error(msg, t); - throw new IOException(msg, t); - } - } - } - -} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java index 806940bb7b..5a59400849 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java @@ -28,7 +28,6 @@ import com.google.common.collect.Sets; import org.junit.After; -import org.junit.Assert; import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -43,6 +42,7 @@ import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Tristate; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.HadoopTestBase; /** * Main test class for MetadataStore implementations. @@ -51,7 +51,7 @@ * If your implementation may return missing results for recently set paths, * override {@link MetadataStoreTestBase#allowMissing()}. */ -public abstract class MetadataStoreTestBase extends Assert { +public abstract class MetadataStoreTestBase extends HadoopTestBase { private static final Logger LOG = LoggerFactory.getLogger(MetadataStoreTestBase.class); @@ -836,7 +836,7 @@ private void assertDirectorySize(String pathStr, int size) throws IOException { DirListingMetadata dirMeta = ms.listChildren(strToPath(pathStr)); if (!allowMissing()) { - assertNotNull("Directory " + pathStr + " in cache", dirMeta); + assertNotNull("Directory " + pathStr + " is null in cache", dirMeta); } if (!allowMissing() || dirMeta != null) { dirMeta = dirMeta.withoutTombstones(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java deleted file mode 100644 index 5763b8336e..0000000000 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java +++ /dev/null @@ -1,589 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3a.s3guard; - -import java.io.IOException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.dynamodbv2.document.DynamoDB; -import com.amazonaws.services.dynamodbv2.document.Item; -import com.amazonaws.services.dynamodbv2.document.PrimaryKey; -import com.amazonaws.services.dynamodbv2.document.Table; -import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; -import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException; -import com.amazonaws.services.dynamodbv2.model.TableDescription; - -import com.google.common.collect.Lists; -import org.apache.commons.collections.CollectionUtils; -import org.apache.hadoop.fs.s3a.Tristate; - -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.s3a.MockS3ClientFactory; -import org.apache.hadoop.fs.s3a.S3AFileStatus; -import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.apache.hadoop.fs.s3a.S3ClientFactory; -import org.apache.hadoop.security.UserGroupInformation; - -import static org.apache.hadoop.fs.s3a.Constants.*; -import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*; -import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.*; -import static org.apache.hadoop.test.LambdaTestUtils.*; - -/** - * Test that {@link DynamoDBMetadataStore} implements {@link MetadataStore}. - * - * In this unit test, we use an in-memory DynamoDBLocal server instead of real - * AWS DynamoDB. An {@link S3AFileSystem} object is created and shared for - * initializing {@link DynamoDBMetadataStore} objects. There are no real S3 - * request issued as the underlying AWS S3Client is mocked. You won't be - * charged bills for AWS S3 or DynamoDB when you run this test. - * - * According to the base class, every test case will have independent contract - * to create a new {@link DynamoDBMetadataStore} instance and initializes it. - * A table will be created for each test by the test contract, and will be - * destroyed after the test case finishes. - */ -public class TestDynamoDBMetadataStore extends MetadataStoreTestBase { - private static final Logger LOG = - LoggerFactory.getLogger(TestDynamoDBMetadataStore.class); - private static final String BUCKET = "TestDynamoDBMetadataStore"; - private static final String S3URI = - URI.create(FS_S3A + "://" + BUCKET + "/").toString(); - public static final PrimaryKey - VERSION_MARKER_PRIMARY_KEY = createVersionMarkerPrimaryKey( - DynamoDBMetadataStore.VERSION_MARKER); - - /** The DynamoDB instance that can issue requests directly to server. */ - private static DynamoDB dynamoDB; - - @Rule - public final Timeout timeout = new Timeout(60 * 1000); - - /** - * Start the in-memory DynamoDBLocal server and initializes s3 file system. - */ - @BeforeClass - public static void setUpBeforeClass() throws Exception { - DynamoDBLocalClientFactory.startSingletonServer(); - try { - dynamoDB = new DynamoDBMSContract().getMetadataStore().getDynamoDB(); - } catch (AmazonServiceException e) { - final String msg = "Cannot initialize a DynamoDBMetadataStore instance " - + "against the local DynamoDB server. Perhaps the DynamoDBLocal " - + "server is not configured correctly. "; - LOG.error(msg, e); - // fail fast if the DynamoDBLocal server can not work - throw e; - } - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - if (dynamoDB != null) { - dynamoDB.shutdown(); - } - DynamoDBLocalClientFactory.stopSingletonServer(); - } - - /** - * Each contract has its own S3AFileSystem and DynamoDBMetadataStore objects. - */ - private static class DynamoDBMSContract extends AbstractMSContract { - private final S3AFileSystem s3afs; - private final DynamoDBMetadataStore ms = new DynamoDBMetadataStore(); - - DynamoDBMSContract() throws IOException { - this(new Configuration()); - } - - DynamoDBMSContract(Configuration conf) throws IOException { - // using mocked S3 clients - conf.setClass(S3_CLIENT_FACTORY_IMPL, MockS3ClientFactory.class, - S3ClientFactory.class); - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, S3URI); - // setting config for creating a DynamoDBClient against local server - conf.set(ACCESS_KEY, "dummy-access-key"); - conf.set(SECRET_KEY, "dummy-secret-key"); - conf.setBoolean(S3GUARD_DDB_TABLE_CREATE_KEY, true); - conf.setClass(S3Guard.S3GUARD_DDB_CLIENT_FACTORY_IMPL, - DynamoDBLocalClientFactory.class, DynamoDBClientFactory.class); - - // always create new file system object for a test contract - s3afs = (S3AFileSystem) FileSystem.newInstance(conf); - ms.initialize(s3afs); - } - - @Override - public S3AFileSystem getFileSystem() { - return s3afs; - } - - @Override - public DynamoDBMetadataStore getMetadataStore() { - return ms; - } - } - - @Override - public DynamoDBMSContract createContract() throws IOException { - return new DynamoDBMSContract(); - } - - @Override - public DynamoDBMSContract createContract(Configuration conf) throws - IOException { - return new DynamoDBMSContract(conf); - } - - @Override - FileStatus basicFileStatus(Path path, int size, boolean isDir) - throws IOException { - String owner = UserGroupInformation.getCurrentUser().getShortUserName(); - return isDir - ? new S3AFileStatus(true, path, owner) - : new S3AFileStatus(size, getModTime(), path, BLOCK_SIZE, owner); - } - - private DynamoDBMetadataStore getDynamoMetadataStore() throws IOException { - return (DynamoDBMetadataStore) getContract().getMetadataStore(); - } - - private S3AFileSystem getFileSystem() throws IOException { - return (S3AFileSystem) getContract().getFileSystem(); - } - - /** - * This tests that after initialize() using an S3AFileSystem object, the - * instance should have been initialized successfully, and tables are ACTIVE. - */ - @Test - public void testInitialize() throws IOException { - final String tableName = "testInitializeWithFileSystem"; - final S3AFileSystem s3afs = getFileSystem(); - final Configuration conf = s3afs.getConf(); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(s3afs); - verifyTableInitialized(tableName); - assertNotNull(ddbms.getTable()); - assertEquals(tableName, ddbms.getTable().getTableName()); - String expectedRegion = conf.get(S3GUARD_DDB_REGION_KEY, - s3afs.getBucketLocation(tableName)); - assertEquals("DynamoDB table should be in configured region or the same" + - " region as S3 bucket", - expectedRegion, - ddbms.getRegion()); - } - } - - /** - * This tests that after initialize() using a Configuration object, the - * instance should have been initialized successfully, and tables are ACTIVE. - */ - @Test - public void testInitializeWithConfiguration() throws IOException { - final String tableName = "testInitializeWithConfiguration"; - final Configuration conf = getFileSystem().getConf(); - conf.unset(S3GUARD_DDB_TABLE_NAME_KEY); - String savedRegion = conf.get(S3GUARD_DDB_REGION_KEY, - getFileSystem().getBucketLocation()); - conf.unset(S3GUARD_DDB_REGION_KEY); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(conf); - fail("Should have failed because the table name is not set!"); - } catch (IllegalArgumentException ignored) { - } - // config table name - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(conf); - fail("Should have failed because as the region is not set!"); - } catch (IllegalArgumentException ignored) { - } - // config region - conf.set(S3GUARD_DDB_REGION_KEY, savedRegion); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(conf); - verifyTableInitialized(tableName); - assertNotNull(ddbms.getTable()); - assertEquals(tableName, ddbms.getTable().getTableName()); - assertEquals("Unexpected key schema found!", - keySchema(), - ddbms.getTable().describe().getKeySchema()); - } - } - - /** - * Test that for a large batch write request, the limit is handled correctly. - */ - @Test - public void testBatchWrite() throws IOException { - final int[] numMetasToDeleteOrPut = { - -1, // null - 0, // empty collection - 1, // one path - S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT, // exact limit of a batch request - S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT + 1 // limit + 1 - }; - for (int numOldMetas : numMetasToDeleteOrPut) { - for (int numNewMetas : numMetasToDeleteOrPut) { - doTestBatchWrite(numOldMetas, numNewMetas); - } - } - } - - private void doTestBatchWrite(int numDelete, int numPut) throws IOException { - final String root = S3URI + "/testBatchWrite_" + numDelete + '_' + numPut; - final Path oldDir = new Path(root, "oldDir"); - final Path newDir = new Path(root, "newDir"); - LOG.info("doTestBatchWrite: oldDir={}, newDir={}", oldDir, newDir); - - DynamoDBMetadataStore ms = getDynamoMetadataStore(); - ms.put(new PathMetadata(basicFileStatus(oldDir, 0, true))); - ms.put(new PathMetadata(basicFileStatus(newDir, 0, true))); - - final List oldMetas = - numDelete < 0 ? null : new ArrayList(numDelete); - for (int i = 0; i < numDelete; i++) { - oldMetas.add(new PathMetadata( - basicFileStatus(new Path(oldDir, "child" + i), i, true))); - } - final List newMetas = - numPut < 0 ? null : new ArrayList(numPut); - for (int i = 0; i < numPut; i++) { - newMetas.add(new PathMetadata( - basicFileStatus(new Path(newDir, "child" + i), i, false))); - } - - Collection pathsToDelete = null; - if (oldMetas != null) { - // put all metadata of old paths and verify - ms.put(new DirListingMetadata(oldDir, oldMetas, false)); - assertEquals(0, ms.listChildren(newDir).withoutTombstones().numEntries()); - assertTrue(CollectionUtils.isEqualCollection(oldMetas, - ms.listChildren(oldDir).getListing())); - - pathsToDelete = new ArrayList<>(oldMetas.size()); - for (PathMetadata meta : oldMetas) { - pathsToDelete.add(meta.getFileStatus().getPath()); - } - } - - // move the old paths to new paths and verify - ms.move(pathsToDelete, newMetas); - assertEquals(0, ms.listChildren(oldDir).withoutTombstones().numEntries()); - if (newMetas != null) { - assertTrue(CollectionUtils.isEqualCollection(newMetas, - ms.listChildren(newDir).getListing())); - } - } - - @Test - public void testInitExistingTable() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - final String tableName = ddbms.getTable().getTableName(); - verifyTableInitialized(tableName); - // create existing table - ddbms.initTable(); - verifyTableInitialized(tableName); - } - - /** - * Test the low level version check code. - */ - @Test - public void testItemVersionCompatibility() throws Throwable { - verifyVersionCompatibility("table", - createVersionMarker(VERSION_MARKER, VERSION, 0)); - } - - /** - * Test that a version marker entry without the version number field - * is rejected as incompatible with a meaningful error message. - */ - @Test - public void testItemLacksVersion() throws Throwable { - intercept(IOException.class, E_NOT_VERSION_MARKER, - new VoidCallable() { - @Override - public void call() throws Exception { - verifyVersionCompatibility("table", - new Item().withPrimaryKey( - createVersionMarkerPrimaryKey(VERSION_MARKER))); - } - }); - } - - /** - * Delete the version marker and verify that table init fails. - */ - @Test - public void testTableVersionRequired() throws Exception { - Configuration conf = getFileSystem().getConf(); - int maxRetries = conf.getInt(S3GUARD_DDB_MAX_RETRIES, - S3GUARD_DDB_MAX_RETRIES_DEFAULT); - conf.setInt(S3GUARD_DDB_MAX_RETRIES, 3); - - final DynamoDBMetadataStore ddbms = createContract(conf).getMetadataStore(); - String tableName = conf.get(S3GUARD_DDB_TABLE_NAME_KEY, BUCKET); - Table table = verifyTableInitialized(tableName); - table.deleteItem(VERSION_MARKER_PRIMARY_KEY); - - // create existing table - intercept(IOException.class, E_NO_VERSION_MARKER, - new VoidCallable() { - @Override - public void call() throws Exception { - ddbms.initTable(); - } - }); - - conf.setInt(S3GUARD_DDB_MAX_RETRIES, maxRetries); - } - - /** - * Set the version value to a different number and verify that - * table init fails. - */ - @Test - public void testTableVersionMismatch() throws Exception { - final DynamoDBMetadataStore ddbms = createContract().getMetadataStore(); - String tableName = getFileSystem().getConf() - .get(S3GUARD_DDB_TABLE_NAME_KEY, BUCKET); - Table table = verifyTableInitialized(tableName); - table.deleteItem(VERSION_MARKER_PRIMARY_KEY); - Item v200 = createVersionMarker(VERSION_MARKER, 200, 0); - table.putItem(v200); - - // create existing table - intercept(IOException.class, E_INCOMPATIBLE_VERSION, - new VoidCallable() { - @Override - public void call() throws Exception { - ddbms.initTable(); - } - }); - } - - /** - * Test that initTable fails with IOException when table does not exist and - * table auto-creation is disabled. - */ - @Test - public void testFailNonexistentTable() throws IOException { - final String tableName = "testFailNonexistentTable"; - final S3AFileSystem s3afs = getFileSystem(); - final Configuration conf = s3afs.getConf(); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - conf.unset(S3GUARD_DDB_TABLE_CREATE_KEY); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(s3afs); - fail("Should have failed as table does not exist and table auto-creation" - + " is disabled"); - } catch (IOException ignored) { - } - } - - /** - * Test cases about root directory as it is not in the DynamoDB table. - */ - @Test - public void testRootDirectory() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - Path rootPath = new Path(S3URI); - verifyRootDirectory(ddbms.get(rootPath), true); - - ddbms.put(new PathMetadata(new S3AFileStatus(true, - new Path(rootPath, "foo"), - UserGroupInformation.getCurrentUser().getShortUserName()))); - verifyRootDirectory(ddbms.get(new Path(S3URI)), false); - } - - private void verifyRootDirectory(PathMetadata rootMeta, boolean isEmpty) { - assertNotNull(rootMeta); - final FileStatus status = rootMeta.getFileStatus(); - assertNotNull(status); - assertTrue(status.isDirectory()); - // UNKNOWN is always a valid option, but true / false should not contradict - if (isEmpty) { - assertNotSame("Should not be marked non-empty", - Tristate.FALSE, - rootMeta.isEmptyDirectory()); - } else { - assertNotSame("Should not be marked empty", - Tristate.TRUE, - rootMeta.isEmptyDirectory()); - } - } - - /** - * Test that when moving nested paths, all its ancestors up to destination - * root will also be created. - * Here is the directory tree before move: - *
-   * testMovePopulateAncestors
-   * ├── a
-   * │   └── b
-   * │       └── src
-   * │           ├── dir1
-   * │           │   └── dir2
-   * │           └── file1.txt
-   * └── c
-   *     └── d
-   *         └── dest
-   *
- * As part of rename(a/b/src, d/c/dest), S3A will enumerate the subtree at - * a/b/src. This test verifies that after the move, the new subtree at - * 'dest' is reachable from the root (i.e. c/ and c/d exist in the table. - * DynamoDBMetadataStore depends on this property to do recursive delete - * without a full table scan. - */ - @Test - public void testMovePopulatesAncestors() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - final String testRoot = "/testMovePopulatesAncestors"; - final String srcRoot = testRoot + "/a/b/src"; - final String destRoot = testRoot + "/c/d/e/dest"; - - final Path nestedPath1 = strToPath(srcRoot + "/file1.txt"); - ddbms.put(new PathMetadata(basicFileStatus(nestedPath1, 1024, false))); - final Path nestedPath2 = strToPath(srcRoot + "/dir1/dir2"); - ddbms.put(new PathMetadata(basicFileStatus(nestedPath2, 0, true))); - - // We don't put the destRoot path here, since put() would create ancestor - // entries, and we want to ensure that move() does it, instead. - - // Build enumeration of src / dest paths and do the move() - final Collection fullSourcePaths = Lists.newArrayList( - strToPath(srcRoot), - strToPath(srcRoot + "/dir1"), - strToPath(srcRoot + "/dir1/dir2"), - strToPath(srcRoot + "/file1.txt") - ); - final Collection pathsToCreate = Lists.newArrayList( - new PathMetadata(basicFileStatus(strToPath(destRoot), - 0, true)), - new PathMetadata(basicFileStatus(strToPath(destRoot + "/dir1"), - 0, true)), - new PathMetadata(basicFileStatus(strToPath(destRoot + "/dir1/dir2"), - 0, true)), - new PathMetadata(basicFileStatus(strToPath(destRoot + "/file1.txt"), - 1024, false)) - ); - - ddbms.move(fullSourcePaths, pathsToCreate); - - // assert that all the ancestors should have been populated automatically - assertCached(testRoot + "/c"); - assertCached(testRoot + "/c/d"); - assertCached(testRoot + "/c/d/e"); - assertCached(destRoot /* /c/d/e/dest */); - - // Also check moved files while we're at it - assertCached(destRoot + "/dir1"); - assertCached(destRoot + "/dir1/dir2"); - assertCached(destRoot + "/file1.txt"); - } - - @Test - public void testProvisionTable() throws IOException { - final DynamoDBMetadataStore ddbms = getDynamoMetadataStore(); - final String tableName = ddbms.getTable().getTableName(); - final ProvisionedThroughputDescription oldProvision = - dynamoDB.getTable(tableName).describe().getProvisionedThroughput(); - ddbms.provisionTable(oldProvision.getReadCapacityUnits() * 2, - oldProvision.getWriteCapacityUnits() * 2); - final ProvisionedThroughputDescription newProvision = - dynamoDB.getTable(tableName).describe().getProvisionedThroughput(); - LOG.info("Old provision = {}, new provision = {}", - oldProvision, newProvision); - assertEquals(oldProvision.getReadCapacityUnits() * 2, - newProvision.getReadCapacityUnits().longValue()); - assertEquals(oldProvision.getWriteCapacityUnits() * 2, - newProvision.getWriteCapacityUnits().longValue()); - } - - @Test - public void testDeleteTable() throws Exception { - final String tableName = "testDeleteTable"; - final S3AFileSystem s3afs = getFileSystem(); - final Configuration conf = s3afs.getConf(); - conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName); - try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) { - ddbms.initialize(s3afs); - // we can list the empty table - ddbms.listChildren(new Path(S3URI)); - - ddbms.destroy(); - verifyTableNotExist(tableName); - - // delete table once more; be ResourceNotFoundException swallowed silently - ddbms.destroy(); - verifyTableNotExist(tableName); - try { - // we can no longer list the destroyed table - ddbms.listChildren(new Path(S3URI)); - fail("Should have failed after the table is destroyed!"); - } catch (IOException ignored) { - } - } - } - - /** - * This validates the table is created and ACTIVE in DynamoDB. - * - * This should not rely on the {@link DynamoDBMetadataStore} implementation. - * Return the table - */ - private static Table verifyTableInitialized(String tableName) { - final Table table = dynamoDB.getTable(tableName); - final TableDescription td = table.describe(); - assertEquals(tableName, td.getTableName()); - assertEquals("ACTIVE", td.getTableStatus()); - return table; - } - - /** - * This validates the table is not found in DynamoDB. - * - * This should not rely on the {@link DynamoDBMetadataStore} implementation. - */ - private static void verifyTableNotExist(String tableName) throws Exception{ - intercept(ResourceNotFoundException.class, - () -> dynamoDB.getTable(tableName).describe()); - } - -} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java index 074319f582..2ea20b26b0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java @@ -37,7 +37,6 @@ */ public class TestLocalMetadataStore extends MetadataStoreTestBase { - private static final String MAX_ENTRIES_STR = "16"; private final static class LocalMSContract extends AbstractMSContract { @@ -48,7 +47,6 @@ private LocalMSContract() throws IOException { } private LocalMSContract(Configuration config) throws IOException { - config.set(LocalMetadataStore.CONF_MAX_RECORDS, MAX_ENTRIES_STR); fs = FileSystem.getLocal(config); } @@ -76,8 +74,8 @@ public AbstractMSContract createContract(Configuration conf) throws } @Test - public void testClearByAncestor() { - Cache cache = CacheBuilder.newBuilder().build(); + public void testClearByAncestor() throws Exception { + Cache cache = CacheBuilder.newBuilder().build(); // 1. Test paths without scheme/host assertClearResult(cache, "", "/", 0); @@ -122,7 +120,7 @@ public void testCacheTimedEvictionAfterWrite() { final long ttl = t1 + 50; // between t1 and t2 - Cache cache = CacheBuilder.newBuilder() + Cache cache = CacheBuilder.newBuilder() .expireAfterWrite(ttl, TimeUnit.NANOSECONDS /* nanos to avoid conversions */) .ticker(testTicker) @@ -143,7 +141,7 @@ public void testCacheTimedEvictionAfterWrite() { assertEquals("Cache should contain 3 records before eviction", 3, cache.size()); - PathMetadata pm1 = cache.getIfPresent(path1); + LocalMetadataEntry pm1 = cache.getIfPresent(path1); assertNotNull("PathMetadata should not be null before eviction", pm1); // set the ticker to a time when timed eviction should occur @@ -159,7 +157,7 @@ public void testCacheTimedEvictionAfterWrite() { assertNull("PathMetadata should be null after eviction", pm1); } - private static void populateMap(Cache cache, + private static void populateMap(Cache cache, String prefix) { populateEntry(cache, new Path(prefix + "/dirA/dirB/")); populateEntry(cache, new Path(prefix + "/dirA/dirB/dirC")); @@ -168,23 +166,20 @@ private static void populateMap(Cache cache, populateEntry(cache, new Path(prefix + "/dirA/file1")); } - private static void populateEntry(Cache cache, + private static void populateEntry(Cache cache, Path path) { - cache.put(path, new PathMetadata(new FileStatus(0, true, 0, 0, 0, path))); + FileStatus fileStatus = new FileStatus(0, true, 0, 0, 0, path); + cache.put(path, new LocalMetadataEntry(new PathMetadata(fileStatus))); } - private static int sizeOfMap(Cache cache) { - int count = 0; - for (PathMetadata meta : cache.asMap().values()) { - if (!meta.isDeleted()) { - count++; - } - } - return count; + private static long sizeOfMap(Cache cache) { + return cache.asMap().values().stream() + .filter(entry -> !entry.getFileMeta().isDeleted()) + .count(); } - private static void assertClearResult(Cache cache, - String prefixStr, String pathStr, int leftoverSize) { + private static void assertClearResult(Cache cache, + String prefixStr, String pathStr, int leftoverSize) throws IOException { populateMap(cache, prefixStr); LocalMetadataStore.deleteEntryByAncestor(new Path(prefixStr + pathStr), cache, true); diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java index 3e149a6906..79e8a698da 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java @@ -42,7 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java index 12c2e3ffc7..a68e6ac2bb 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java @@ -25,7 +25,7 @@ import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider; import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider; -import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider; import org.apache.hadoop.fs.adl.oauth2.AzureADTokenProvider; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java index 9396a51fb2..197ab22be2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java @@ -41,7 +41,7 @@ import java.util.Map; import java.util.Set; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java index 9a8530826e..5f051effef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java @@ -44,7 +44,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.commons.codec.binary.Base64; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java index aa7ac2e1d7..850e552758 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java @@ -20,7 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index e05327e4b3..52027621ef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -48,7 +48,7 @@ import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -2886,7 +2886,7 @@ public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException // There is no metadata found for the path. LOG.debug("Did not find any metadata for path: {}", key); - throw new FileNotFoundException("File" + f + " does not exist."); + throw new FileNotFoundException(f + " is not found"); } return status.toArray(new FileStatus[0]); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java index 68ddcdf16e..6e98755e77 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java index ea77510164..76ced3b96d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azure.security.Constants; import org.apache.hadoop.io.retry.RetryPolicy; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java index a0204bef47..f4ec1721ec 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azure; -import org.apache.commons.lang.Validate; +import org.apache.commons.lang3.Validate; import org.apache.hadoop.fs.azure.security.Constants; import org.apache.hadoop.fs.azure.security.SpnegoToken; import org.apache.hadoop.fs.azure.security.WasbDelegationTokenIdentifier; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java index 20dd4706b4..9c40325e21 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/JsonUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.azure.security; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.util.JsonSerialization; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,8 +38,7 @@ private JsonUtils() { public static Map parse(final String jsonString) throws IOException { try { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readerFor(Map.class).readValue(jsonString); + return JsonSerialization.mapReader().readValue(jsonString); } catch (Exception e) { LOG.debug("JSON Parsing exception: {} while parsing {}", e.getMessage(), jsonString); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java index 0aa93935fb..f54a2e1787 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java @@ -29,7 +29,7 @@ import java.net.HttpURLConnection; import java.util.Arrays; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java index 29611bf24d..4e88b4551d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java @@ -22,7 +22,7 @@ import java.net.URI; import java.util.Arrays; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java index d5f6437d96..1739cff76d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java @@ -36,7 +36,7 @@ import java.util.List; import org.apache.commons.codec.DecoderException; import org.apache.commons.codec.net.URLCodec; -import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.http.client.utils.URIBuilder; import com.microsoft.azure.storage.AccessCondition; @@ -339,7 +339,7 @@ public Iterable listBlobs(String prefix, @Override public StorageUri getStorageUri() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } } @@ -590,20 +590,20 @@ public MockCloudPageBlobWrapper(URI uri, HashMap metadata, @Override public void create(long length, BlobRequestOptions options, OperationContext opContext) throws StorageException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public void uploadPages(InputStream sourceStream, long offset, long length, BlobRequestOptions options, OperationContext opContext) throws StorageException, IOException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override public ArrayList downloadPageRanges(BlobRequestOptions options, OperationContext opContext) throws StorageException { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override @@ -622,7 +622,7 @@ public void setWriteBlockSizeInBytes(int writeBlockSizeInBytes) { @Override public StorageUri getStorageUri() { - throw new NotImplementedException(); + throw new NotImplementedException("Code is not implemented"); } @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java index 820ce4f240..b8cf5ba8bf 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azure; import com.microsoft.azure.storage.blob.BlockEntry; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java index 2fbbcd1758..8d2a104eb4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java index ea99016b2c..9db0eb549c 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java @@ -20,7 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java index 668b594be6..e49feb5f69 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java @@ -28,7 +28,7 @@ import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java index faa4aa275a..c486bdbc23 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.EnumSet; -import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java index 9568171219..cf6da25311 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java index 8f9d434eb9..973838acb9 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.commons.lang.time.FastDateFormat; +import org.apache.commons.lang3.time.FastDateFormat; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -76,7 +76,7 @@ class ExecutionSummarizer implements StatListener { startTime = System.currentTimeMillis(); // flatten the args string and store it commandLineArgs = - org.apache.commons.lang.StringUtils.join(args, ' '); + org.apache.commons.lang3.StringUtils.join(args, ' '); } /** diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java index 427174295c..73662bf8aa 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java index 877d434e5a..494b9a11c9 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.Random; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java index afafd55a69..2accbac784 100644 --- a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java +++ b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java @@ -30,7 +30,6 @@ import java.util.List; import java.util.Map; -import org.apache.commons.lang.CharSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.resourceestimator.common.api.RecurrenceId; import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline; diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java index 5856626818..e6d09dcb3c 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools.rumen.anonymization; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; /** * Utility class to handle commonly performed tasks in a diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java index 1c92caf987..02fd48a071 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java @@ -25,7 +25,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRJobConfig; diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java index bd50ae0542..e1f6da5276 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java @@ -25,7 +25,7 @@ import java.io.FileOutputStream; import java.io.IOException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 21cc7cef8c..42ce94c829 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -192,20 +192,5 @@ - - - hdds - - false - - - - org.apache.hadoop - hadoop-ozone-filesystem - compile - ${project.version} - - - diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index f421e580ba..dca59d31af 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -67,15 +67,4 @@ - - - hdds - - false - - - hadoop-ozone - - - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java index eea81fe44d..a863910861 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java @@ -102,6 +102,26 @@ public static ResourceRequest newInstance(Priority priority, String hostName, .build(); } + /** + * Clone a ResourceRequest object (shallow copy). Please keep it loaded with + * all (new) fields + * + * @param rr the object to copy from + * @return the copied object + */ + @Public + @Evolving + public static ResourceRequest clone(ResourceRequest rr) { + // Please keep it loaded with all (new) fields + return ResourceRequest.newBuilder().priority(rr.getPriority()) + .resourceName(rr.getResourceName()).capability(rr.getCapability()) + .numContainers(rr.getNumContainers()) + .relaxLocality(rr.getRelaxLocality()) + .nodeLabelExpression(rr.getNodeLabelExpression()) + .executionTypeRequest(rr.getExecutionTypeRequest()) + .allocationRequestId(rr.getAllocationRequestId()).build(); + } + @Public @Unstable public static ResourceRequestBuilder newBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 5292a25053..5842d64357 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -240,7 +240,7 @@ private static void addDeprecatedKeys() { public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" + DEFAULT_RM_SCHEDULER_PORT; - /** Miniumum request grant-able by the RM scheduler. */ + /** Minimum request grant-able by the RM scheduler. */ public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB = YARN_PREFIX + "scheduler.minimum-allocation-mb"; public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java index 1d2d719d32..5b3c72cae4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java @@ -28,6 +28,8 @@ public interface RestApiErrorMessages { "than 63 characters"; String ERROR_COMPONENT_NAME_INVALID = "Component name must be no more than %s characters: %s"; + String ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME = + "Component name %s must not be same as service name %s"; String ERROR_USER_NAME_INVALID = "User name must be no more than 63 characters"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java index 549927327d..705e04065c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -143,6 +143,11 @@ public static void validateAndResolveService(Service service, throw new IllegalArgumentException(String.format(RestApiErrorMessages .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName())); } + if (service.getName().equals(comp.getName())) { + throw new IllegalArgumentException(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME, + comp.getName(), service.getName())); + } if (componentNames.contains(comp.getName())) { throw new IllegalArgumentException("Component name collision: " + comp.getName()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java index 243c6b3a61..ae031d4aad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java @@ -333,6 +333,24 @@ public void testDuplicateComponents() throws IOException { } } + @Test + public void testComponentNameSameAsServiceName() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + Service app = new Service(); + app.setName("test"); + app.setVersion("v1"); + app.addComponent(createValidComponent("test")); + + //component name same as service name + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "component name matches service name"); + } catch (IllegalArgumentException e) { + assertEquals("Component name test must not be same as service name test", + e.getMessage()); + } + } + @Test public void testExternalDuplicateComponent() throws IOException { Service ext = createValidApplication("comp1"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 36c3cf1d4e..7265d24ac0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -451,16 +451,7 @@ private List cloneAsks() { for(ResourceRequest r : ask) { // create a copy of ResourceRequest as we might change it while the // RPC layer is using it to send info across - ResourceRequest rr = - ResourceRequest.newBuilder().priority(r.getPriority()) - .resourceName(r.getResourceName()).capability(r.getCapability()) - .numContainers(r.getNumContainers()) - .relaxLocality(r.getRelaxLocality()) - .nodeLabelExpression(r.getNodeLabelExpression()) - .executionTypeRequest(r.getExecutionTypeRequest()) - .allocationRequestId(r.getAllocationRequestId()) - .build(); - askList.add(rr); + askList.add(ResourceRequest.clone(r)); } return askList; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java index 11d703d890..51048660ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java @@ -570,11 +570,7 @@ public synchronized Allocation allocate( ContainerUpdates updateRequests) { List askCopy = new ArrayList(); for (ResourceRequest req : ask) { - ResourceRequest reqCopy = - ResourceRequest.newInstance(req.getPriority(), - req.getResourceName(), req.getCapability(), - req.getNumContainers(), req.getRelaxLocality()); - askCopy.add(reqCopy); + askCopy.add(ResourceRequest.clone(req)); } lastAsk = ask; lastRelease = release; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 70ff47b746..17e43cacda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -18,41 +18,9 @@ package org.apache.hadoop.yarn.client.api.impl; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.lang.Thread.State; -import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.DataInputByteBuffer; -import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.io.Text; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; @@ -74,7 +42,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -92,7 +59,6 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.AHSClient; -import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -100,7 +66,6 @@ import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase; @@ -115,8 +80,28 @@ import org.mockito.ArgumentCaptor; import org.slf4j.event.Level; +import java.io.IOException; +import java.lang.Thread.State; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** - * This class is to test class {@link YarnClient) and {@link YarnClientImpl}. + * This class is to test class {@link YarnClient). */ public class TestYarnClient extends ParameterizedSchedulerTestBase { @@ -146,17 +131,6 @@ public void testClientStop() { rm.stop(); } - @Test - public void testStartWithTimelineV15() throws Exception { - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f); - YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); - client.init(conf); - client.start(); - client.stop(); - } - @Test public void testStartTimelineClientWithErrors() throws Exception { @@ -413,7 +387,7 @@ public void testApplicationType() throws Exception { RMApp app = rm.submitApp(2000); RMApp app1 = rm.submitApp(200, "name", "user", - new HashMap(), false, "default", -1, + new HashMap<>(), false, "default", -1, null, "MAPREDUCE"); Assert.assertEquals("YARN", app.getApplicationType()); Assert.assertEquals("MAPREDUCE", app1.getApplicationType()); @@ -427,7 +401,7 @@ public void testApplicationTypeLimit() throws Exception { rm.start(); RMApp app1 = rm.submitApp(200, "name", "user", - new HashMap(), false, "default", -1, + new HashMap<>(), false, "default", -1, null, "MAPREDUCE-LENGTH-IS-20"); Assert.assertEquals("MAPREDUCE-LENGTH-IS-", app1.getApplicationType()); rm.stop(); @@ -444,7 +418,7 @@ public void testGetApplications() throws YarnException, IOException { List reports = client.getApplications(); Assert.assertEquals(reports, expectedReports); - Set appTypes = new HashSet(); + Set appTypes = new HashSet<>(); appTypes.add("YARN"); appTypes.add("NON-YARN"); @@ -601,7 +575,7 @@ public void testGetLabelsToNodes() throws YarnException, IOException { Assert.assertEquals(labelsToNodes.size(), 3); // Get labels to nodes for selected labels - Set setLabels = new HashSet(Arrays.asList("x", "z")); + Set setLabels = new HashSet<>(Arrays.asList("x", "z")); expectedLabelsToNodes = ((MockYarnClient)client).getLabelsToNodesMap(setLabels); labelsToNodes = client.getLabelsToNodes(setLabels); @@ -633,12 +607,12 @@ private static class MockYarnClient extends YarnClientImpl { private ApplicationReport mockReport; private List reports; - private HashMap> attempts = - new HashMap>(); - private HashMap> containers = - new HashMap>(); + private HashMap> attempts = + new HashMap<>(); + private HashMap> containers = + new HashMap<>(); private HashMap> containersFromAHS = - new HashMap>(); + new HashMap<>(); GetApplicationsResponse mockAppResponse = mock(GetApplicationsResponse.class); @@ -739,9 +713,9 @@ private List createAppReports() { "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); - List applicationReports = new ArrayList(); + List applicationReports = new ArrayList<>(); applicationReports.add(newApplicationReport); - List appAttempts = new ArrayList(); + List appAttempts = new ArrayList<>(); ApplicationAttemptReport attempt = ApplicationAttemptReport.newInstance( ApplicationAttemptId.newInstance(applicationId, 1), "host", @@ -767,7 +741,7 @@ private List createAppReports() { appAttempts.add(attempt1); attempts.put(applicationId, appAttempts); - List containerReports = new ArrayList(); + List containerReports = new ArrayList<>(); ContainerReport container = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, @@ -785,7 +759,7 @@ private List createAppReports() { //add containers to be sent from AHS List containerReportsForAHS = - new ArrayList(); + new ArrayList<>(); container = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, @@ -843,7 +817,7 @@ private List getApplicationReports( List applicationReports, Set applicationTypes, EnumSet applicationStates) { - List appReports = new ArrayList(); + List appReports = new ArrayList<>(); for (ApplicationReport appReport : applicationReports) { if (applicationTypes != null && !applicationTypes.isEmpty()) { if (!applicationTypes.contains(appReport.getApplicationType())) { @@ -878,9 +852,9 @@ public Map> getLabelsToNodes(Set labels) } public Map> getLabelsToNodesMap() { - Map> map = new HashMap>(); + Map> map = new HashMap<>(); Set setNodeIds = - new HashSet(Arrays.asList( + new HashSet<>(Arrays.asList( NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0))); map.put("x", setNodeIds); map.put("y", setNodeIds); @@ -889,8 +863,8 @@ public Map> getLabelsToNodesMap() { } public Map> getLabelsToNodesMap(Set labels) { - Map> map = new HashMap>(); - Set setNodeIds = new HashSet(Arrays.asList( + Map> map = new HashMap<>(); + Set setNodeIds = new HashSet<>(Arrays.asList( NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0))); for (String label : labels) { map.put(label, setNodeIds); @@ -907,8 +881,8 @@ public Map> getNodeToLabels() throws YarnException, } public Map> getNodeToLabelsMap() { - Map> map = new HashMap>(); - Set setNodeLabels = new HashSet(Arrays.asList("x", "y")); + Map> map = new HashMap<>(); + Set setNodeLabels = new HashSet<>(Arrays.asList("x", "y")); map.put(NodeId.newInstance("host", 0), setNodeLabels); return map; } @@ -985,7 +959,7 @@ public List getContainersReport( private ContainerReport getContainer( ContainerId containerId, HashMap> containersToAppAttemptMapping) - throws YarnException, IOException { + throws YarnException { List containersForAppAttempt = containersToAppAttemptMapping.get(containerId .getApplicationAttemptId()); @@ -1119,174 +1093,6 @@ private void waitTillAccepted(YarnClient rmClient, ApplicationId appId, Assert.assertEquals(unmanagedApplication, report.isUnmanagedApp()); } - @Test - public void testAsyncAPIPollTimeout() { - testAsyncAPIPollTimeoutHelper(null, false); - testAsyncAPIPollTimeoutHelper(0L, true); - testAsyncAPIPollTimeoutHelper(1L, true); - } - - private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout, - boolean expectedTimeoutEnforcement) { - YarnClientImpl client = new YarnClientImpl(); - try { - Configuration conf = getConf(); - if (valueForTimeout != null) { - conf.setLong( - YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, - valueForTimeout); - } - - client.init(conf); - - Assert.assertEquals( - expectedTimeoutEnforcement, client.enforceAsyncAPITimeout()); - } finally { - IOUtils.closeQuietly(client); - } - } - - @Test - public void testBestEffortTimelineDelegationToken() - throws Exception { - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); - - YarnClientImpl client = spy(new YarnClientImpl() { - - @Override - TimelineClient createTimelineClient() throws IOException, YarnException { - timelineClient = mock(TimelineClient.class); - when(timelineClient.getDelegationToken(any(String.class))) - .thenThrow(new RuntimeException("Best effort test exception")); - return timelineClient; - } - }); - - client.init(conf); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, - true); - client.serviceInit(conf); - client.getTimelineDelegationToken(); - - try { - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false); - client.serviceInit(conf); - client.getTimelineDelegationToken(); - Assert.fail("Get delegation token should have thrown an exception"); - } catch (IOException e) { - // Success - } - } - - @Test - public void testAutomaticTimelineDelegationTokenLoading() - throws Exception { - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); - TimelineDelegationTokenIdentifier timelineDT = - new TimelineDelegationTokenIdentifier(); - final Token dToken = - new Token( - timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text()); - // create a mock client - YarnClientImpl client = spy(new YarnClientImpl() { - - @Override - TimelineClient createTimelineClient() throws IOException, YarnException { - timelineClient = mock(TimelineClient.class); - when(timelineClient.getDelegationToken(any(String.class))) - .thenReturn(dToken); - return timelineClient; - } - - - @Override - protected void serviceStart() throws Exception { - rmClient = mock(ApplicationClientProtocol.class); - } - - @Override - protected void serviceStop() throws Exception { - } - - @Override - public ApplicationReport getApplicationReport(ApplicationId appId) { - ApplicationReport report = mock(ApplicationReport.class); - when(report.getYarnApplicationState()) - .thenReturn(YarnApplicationState.RUNNING); - return report; - } - - @Override - public boolean isSecurityEnabled() { - return true; - } - }); - client.init(conf); - client.start(); - try { - // when i == 0, timeline DT already exists, no need to get one more - // when i == 1, timeline DT doesn't exist, need to get one more - for (int i = 0; i < 2; ++i) { - ApplicationSubmissionContext context = - mock(ApplicationSubmissionContext.class); - ApplicationId applicationId = ApplicationId.newInstance(0, i + 1); - when(context.getApplicationId()).thenReturn(applicationId); - DataOutputBuffer dob = new DataOutputBuffer(); - Credentials credentials = new Credentials(); - if (i == 0) { - credentials.addToken(client.timelineService, dToken); - } - credentials.writeTokenStorageToStream(dob); - ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); - ContainerLaunchContext clc = ContainerLaunchContext.newInstance( - null, null, null, null, tokens, null); - when(context.getAMContainerSpec()).thenReturn(clc); - client.submitApplication(context); - if (i == 0) { - // GetTimelineDelegationToken shouldn't be called - verify(client, never()).getTimelineDelegationToken(); - } - // In either way, token should be there - credentials = new Credentials(); - DataInputByteBuffer dibb = new DataInputByteBuffer(); - tokens = clc.getTokens(); - if (tokens != null) { - dibb.reset(tokens); - credentials.readTokenStorageStream(dibb); - tokens.rewind(); - } - Collection> dTokens = - credentials.getAllTokens(); - Assert.assertEquals(1, dTokens.size()); - Assert.assertEquals(dToken, dTokens.iterator().next()); - } - } finally { - client.stop(); - } - } - - @Test - public void testParseTimelineDelegationTokenRenewer() throws Exception { - // Client side - YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); - Configuration conf = getConf(); - conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); - conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/_HOST@EXAMPLE.COM"); - conf.set( - YarnConfiguration.RM_ADDRESS, "localhost:8188"); - try { - client.init(conf); - client.start(); - Assert.assertEquals("rm/localhost@EXAMPLE.COM", client.timelineDTRenewer); - } finally { - client.stop(); - } - } - @Test(timeout = 30000, expected = ApplicationNotFoundException.class) public void testShouldNotRetryForeverForNonNetworkExceptions() throws Exception { YarnConfiguration conf = getConf(); @@ -1353,38 +1159,35 @@ private void testCreateTimelineClientWithError( timelineClientBestEffort); conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, timelineVersion); - YarnClient client = new MockYarnClient(); - if (client instanceof YarnClientImpl) { - YarnClientImpl impl = (YarnClientImpl) client; - YarnClientImpl spyClient = spy(impl); - when(spyClient.createTimelineClient()).thenThrow(mockErr); - CreateTimelineClientErrorVerifier verifier = spy(errVerifier); - spyClient.init(conf); - spyClient.start(); + MockYarnClient client = new MockYarnClient(); + MockYarnClient spyClient = spy(client); + when(spyClient.createTimelineClient()).thenThrow(mockErr); + CreateTimelineClientErrorVerifier verifier = spy(errVerifier); + spyClient.init(conf); + spyClient.start(); - ApplicationSubmissionContext context = - mock(ApplicationSubmissionContext.class); - ContainerLaunchContext containerContext = - mock(ContainerLaunchContext.class); - ApplicationId applicationId = - ApplicationId.newInstance(System.currentTimeMillis(), 1); - when(containerContext.getTokens()).thenReturn(null); - when(context.getApplicationId()).thenReturn(applicationId); - when(spyClient.isSecurityEnabled()).thenReturn(true); - when(context.getAMContainerSpec()).thenReturn(containerContext); + ApplicationSubmissionContext context = + mock(ApplicationSubmissionContext.class); + ContainerLaunchContext containerContext = + mock(ContainerLaunchContext.class); + ApplicationId applicationId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + when(containerContext.getTokens()).thenReturn(null); + when(context.getApplicationId()).thenReturn(applicationId); + when(spyClient.isSecurityEnabled()).thenReturn(true); + when(context.getAMContainerSpec()).thenReturn(containerContext); - try { - spyClient.submitApplication(context); - } catch (Throwable e) { - verifier.verifyError(e); - } finally { - // Make sure the verifier runs with expected times - // This is required because in case throwable is swallowed - // and verifyError never gets the chance to run - verify(verifier, times(verifier.getExpectedTimes())) - .verifyError(any(Throwable.class)); - spyClient.stop(); - } + try { + spyClient.submitApplication(context); + } catch (Throwable e) { + verifier.verifyError(e); + } finally { + // Make sure the verifier runs with expected times + // This is required because in case throwable is swallowed + // and verifyError never gets the chance to run + verify(verifier, times(verifier.getExpectedTimes())) + .verifyError(any(Throwable.class)); + spyClient.stop(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java new file mode 100644 index 0000000000..dd0aa5c1a0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java @@ -0,0 +1,254 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.client.api.impl; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.client.api.TimelineClient; +import org.apache.hadoop.yarn.client.api.YarnClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager + .ParameterizedSchedulerTestBase; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * This class is to test class {@link YarnClientImpl ). + */ +public class TestYarnClientImpl extends ParameterizedSchedulerTestBase { + + public TestYarnClientImpl(SchedulerType type) throws IOException { + super(type); + } + + @Before + public void setup() { + QueueMetrics.clearQueueMetrics(); + DefaultMetricsSystem.setMiniClusterMode(true); + } + + @Test + public void testStartWithTimelineV15() { + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f); + YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); + client.init(conf); + client.start(); + client.stop(); + } + + @Test + public void testAsyncAPIPollTimeout() { + testAsyncAPIPollTimeoutHelper(null, false); + testAsyncAPIPollTimeoutHelper(0L, true); + testAsyncAPIPollTimeoutHelper(1L, true); + } + + private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout, + boolean expectedTimeoutEnforcement) { + YarnClientImpl client = new YarnClientImpl(); + try { + Configuration conf = getConf(); + if (valueForTimeout != null) { + conf.setLong( + YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, + valueForTimeout); + } + + client.init(conf); + + Assert.assertEquals( + expectedTimeoutEnforcement, client.enforceAsyncAPITimeout()); + } finally { + IOUtils.closeQuietly(client); + } + } + + @Test + public void testBestEffortTimelineDelegationToken() + throws Exception { + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + + YarnClientImpl client = spy(new YarnClientImpl() { + + @Override + TimelineClient createTimelineClient() throws IOException, YarnException { + timelineClient = mock(TimelineClient.class); + when(timelineClient.getDelegationToken(any(String.class))) + .thenThrow(new RuntimeException("Best effort test exception")); + return timelineClient; + } + }); + + client.init(conf); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, + true); + client.serviceInit(conf); + client.getTimelineDelegationToken(); + + try { + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false); + client.serviceInit(conf); + client.getTimelineDelegationToken(); + Assert.fail("Get delegation token should have thrown an exception"); + } catch (IOException e) { + // Success + } + } + + @Test + public void testAutomaticTimelineDelegationTokenLoading() + throws Exception { + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + TimelineDelegationTokenIdentifier timelineDT = + new TimelineDelegationTokenIdentifier(); + final Token dToken = + new Token<>( + timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text()); + // create a mock client + YarnClientImpl client = spy(new YarnClientImpl() { + + @Override + TimelineClient createTimelineClient() throws IOException, YarnException { + timelineClient = mock(TimelineClient.class); + when(timelineClient.getDelegationToken(any(String.class))) + .thenReturn(dToken); + return timelineClient; + } + + + @Override + protected void serviceStart() { + rmClient = mock(ApplicationClientProtocol.class); + } + + @Override + protected void serviceStop() { + } + + @Override + public ApplicationReport getApplicationReport(ApplicationId appId) { + ApplicationReport report = mock(ApplicationReport.class); + when(report.getYarnApplicationState()) + .thenReturn(YarnApplicationState.RUNNING); + return report; + } + + @Override + public boolean isSecurityEnabled() { + return true; + } + }); + client.init(conf); + client.start(); + try { + // when i == 0, timeline DT already exists, no need to get one more + // when i == 1, timeline DT doesn't exist, need to get one more + for (int i = 0; i < 2; ++i) { + ApplicationSubmissionContext context = + mock(ApplicationSubmissionContext.class); + ApplicationId applicationId = ApplicationId.newInstance(0, i + 1); + when(context.getApplicationId()).thenReturn(applicationId); + DataOutputBuffer dob = new DataOutputBuffer(); + Credentials credentials = new Credentials(); + if (i == 0) { + credentials.addToken(client.timelineService, dToken); + } + credentials.writeTokenStorageToStream(dob); + ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + ContainerLaunchContext clc = ContainerLaunchContext.newInstance( + null, null, null, null, tokens, null); + when(context.getAMContainerSpec()).thenReturn(clc); + client.submitApplication(context); + if (i == 0) { + // GetTimelineDelegationToken shouldn't be called + verify(client, never()).getTimelineDelegationToken(); + } + // In either way, token should be there + credentials = new Credentials(); + DataInputByteBuffer dibb = new DataInputByteBuffer(); + tokens = clc.getTokens(); + if (tokens != null) { + dibb.reset(tokens); + credentials.readTokenStorageStream(dibb); + tokens.rewind(); + } + Collection> dTokens = + credentials.getAllTokens(); + Assert.assertEquals(1, dTokens.size()); + Assert.assertEquals(dToken, dTokens.iterator().next()); + } + } finally { + client.stop(); + } + } + + @Test + public void testParseTimelineDelegationTokenRenewer() { + // Client side + YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); + Configuration conf = getConf(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/_HOST@EXAMPLE.COM"); + conf.set( + YarnConfiguration.RM_ADDRESS, "localhost:8188"); + try { + client.init(conf); + client.start(); + Assert.assertEquals("rm/localhost@EXAMPLE.COM", client.timelineDTRenewer); + } finally { + client.stop(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index af1440a56e..eddcbaae67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -242,7 +242,7 @@ src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js src/main/resources/webapps/static/jt/jquery.jstree.js - src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js + src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css src/test/resources/application_1440536969523_0001.har/_index diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 0d045f36a9..0e9f0a77be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -83,6 +83,7 @@ static class ServletStruct { public String name; public String spec; public Map params; + public boolean loadExistingFilters = true; } final String name; @@ -151,12 +152,13 @@ public Builder withServlet(String name, String pathSpec, public Builder withServlet(String name, String pathSpec, Class servlet, - Map params) { + Map params,boolean loadExistingFilters) { ServletStruct struct = new ServletStruct(); struct.clazz = servlet; struct.name = name; struct.spec = pathSpec; struct.params = params; + struct.loadExistingFilters = loadExistingFilters; servlets.add(struct); return this; } @@ -256,9 +258,15 @@ public void setup() { pathList.add("/" + wsName + "/*"); } } + for (ServletStruct s : servlets) { if (!pathList.contains(s.spec)) { - pathList.add(s.spec); + // The servlet told us to not load-existing filters, but we still want + // to add the default authentication filter always, so add it to the + // pathList + if (!s.loadExistingFilters) { + pathList.add(s.spec); + } } } if (conf == null) { @@ -333,7 +341,7 @@ public void setup() { HttpServer2 server = builder.build(); for(ServletStruct struct: servlets) { - if (struct.params != null) { + if (!struct.loadExistingFilters) { server.addInternalServlet(struct.name, struct.spec, struct.clazz, struct.params); } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java index d4fba1f241..91e5f89df7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java @@ -68,7 +68,7 @@ protected void render(Block html) { html.link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css")) .link(root_url("static/dt-1.9.4/css/jui-dt.css")) .script(root_url("static/jquery/jquery-3.3.1.min.js")) - .script(root_url("static/jquery/jquery-ui-1.9.1.custom.min.js")) + .script(root_url("static/jquery/jquery-ui-1.12.1.custom.min.js")) .script(root_url("static/dt-1.9.4/js/jquery.dataTables.min.js")) .script(root_url("static/yarn.dt.plugins.js")) .script(root_url("static/dt-sorting/natural.js")) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js new file mode 100644 index 0000000000..25398a1674 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js @@ -0,0 +1,13 @@ +/*! jQuery UI - v1.12.1 - 2016-09-14 +* http://jqueryui.com +* Includes: widget.js, position.js, data.js, disable-selection.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js, focusable.js, form-reset-mixin.js, jquery-1-7.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/draggable.js, widgets/droppable.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/resizable.js, widgets/selectable.js, widgets/selectmenu.js, widgets/slider.js, widgets/sortable.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js +* Copyright jQuery Foundation and other contributors; Licensed MIT */ + +(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)})(function(t){function e(t){for(var e=t.css("visibility");"inherit"===e;)t=t.parent(),e=t.css("visibility");return"hidden"!==e}function i(t){for(var e,i;t.length&&t[0]!==document;){if(e=t.css("position"),("absolute"===e||"relative"===e||"fixed"===e)&&(i=parseInt(t.css("zIndex"),10),!isNaN(i)&&0!==i))return i;t=t.parent()}return 0}function s(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},t.extend(this._defaults,this.regional[""]),this.regional.en=t.extend(!0,{},this.regional[""]),this.regional["en-US"]=t.extend(!0,{},this.regional.en),this.dpDiv=n(t("
"))}function n(e){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return e.on("mouseout",i,function(){t(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).removeClass("ui-datepicker-next-hover")}).on("mouseover",i,o)}function o(){t.datepicker._isDisabledDatepicker(m.inline?m.dpDiv.parent()[0]:m.input[0])||(t(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),t(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).addClass("ui-datepicker-next-hover"))}function a(e,i){t.extend(e,i);for(var s in i)null==i[s]&&(e[s]=i[s]);return e}function r(t){return function(){var e=this.element.val();t.apply(this,arguments),this._refresh(),e!==this.element.val()&&this._trigger("change")}}t.ui=t.ui||{},t.ui.version="1.12.1";var h=0,l=Array.prototype.slice;t.cleanData=function(e){return function(i){var s,n,o;for(o=0;null!=(n=i[o]);o++)try{s=t._data(n,"events"),s&&s.remove&&t(n).triggerHandler("remove")}catch(a){}e(i)}}(t.cleanData),t.widget=function(e,i,s){var n,o,a,r={},h=e.split(".")[0];e=e.split(".")[1];var l=h+"-"+e;return s||(s=i,i=t.Widget),t.isArray(s)&&(s=t.extend.apply(null,[{}].concat(s))),t.expr[":"][l.toLowerCase()]=function(e){return!!t.data(e,l)},t[h]=t[h]||{},n=t[h][e],o=t[h][e]=function(t,e){return this._createWidget?(arguments.length&&this._createWidget(t,e),void 0):new o(t,e)},t.extend(o,n,{version:s.version,_proto:t.extend({},s),_childConstructors:[]}),a=new i,a.options=t.widget.extend({},a.options),t.each(s,function(e,s){return t.isFunction(s)?(r[e]=function(){function t(){return i.prototype[e].apply(this,arguments)}function n(t){return i.prototype[e].apply(this,t)}return function(){var e,i=this._super,o=this._superApply;return this._super=t,this._superApply=n,e=s.apply(this,arguments),this._super=i,this._superApply=o,e}}(),void 0):(r[e]=s,void 0)}),o.prototype=t.widget.extend(a,{widgetEventPrefix:n?a.widgetEventPrefix||e:e},r,{constructor:o,namespace:h,widgetName:e,widgetFullName:l}),n?(t.each(n._childConstructors,function(e,i){var s=i.prototype;t.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete n._childConstructors):i._childConstructors.push(o),t.widget.bridge(e,o),o},t.widget.extend=function(e){for(var i,s,n=l.call(arguments,1),o=0,a=n.length;a>o;o++)for(i in n[o])s=n[o][i],n[o].hasOwnProperty(i)&&void 0!==s&&(e[i]=t.isPlainObject(s)?t.isPlainObject(e[i])?t.widget.extend({},e[i],s):t.widget.extend({},s):s);return e},t.widget.bridge=function(e,i){var s=i.prototype.widgetFullName||e;t.fn[e]=function(n){var o="string"==typeof n,a=l.call(arguments,1),r=this;return o?this.length||"instance"!==n?this.each(function(){var i,o=t.data(this,s);return"instance"===n?(r=o,!1):o?t.isFunction(o[n])&&"_"!==n.charAt(0)?(i=o[n].apply(o,a),i!==o&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+n+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+n+"'")}):r=void 0:(a.length&&(n=t.widget.extend.apply(null,[n].concat(a))),this.each(function(){var e=t.data(this,s);e?(e.option(n||{}),e._init&&e._init()):t.data(this,s,new i(n,this))})),r}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
",options:{classes:{},disabled:!1,create:null},_createWidget:function(e,i){i=t(i||this.defaultElement||this)[0],this.element=t(i),this.uuid=h++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},i!==this&&(t.data(i,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===i&&this.destroy()}}),this.document=t(i.style?i.ownerDocument:i.document||i),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),e),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+o.eventNamespace,c=h[2];c?n.on(l,c,r):i.on(l,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,h=/top|center|bottom/,l=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t("
"),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.widthi?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};l>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),h.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,h=n-r,l=r+e.collisionWidth-a-n;e.collisionWidth>a?h>0&&0>=l?(i=t.left+h+e.collisionWidth-a-n,t.left+=h-i):t.left=l>0&&0>=h?n:h>l?n+a-e.collisionWidth:n:h>0?t.left+=h:l>0?t.left-=l:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,h=n-r,l=r+e.collisionHeight-a-n;e.collisionHeight>a?h>0&&0>=l?(i=t.top+h+e.collisionHeight-a-n,t.top+=h-i):t.top=l>0&&0>=h?n:h>l?n+a-e.collisionHeight:n:h>0?t.top+=h:l>0?t.top-=l:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=t.left-e.collisionPosition.marginLeft,c=l-h,u=l+e.collisionWidth-r-h,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-h,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=t.top-e.collisionPosition.marginTop,c=l-h,u=l+e.collisionHeight-r-h,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,g=-2*e.offset[1];0>c?(s=t.top+p+f+g+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+g)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+g-h,(i>0||u>a(i))&&(t.top+=p+f+g))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.extend(t.expr[":"],{data:t.expr.createPseudo?t.expr.createPseudo(function(e){return function(i){return!!t.data(i,e)}}):function(e,i,s){return!!t.data(e,s[3])}}),t.fn.extend({disableSelection:function(){var t="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.on(t+".ui-disableSelection",function(t){t.preventDefault()})}}(),enableSelection:function(){return this.off(".ui-disableSelection")}});var c="ui-effects-",u="ui-effects-style",d="ui-effects-animated",p=t;t.effects={effect:{}},function(t,e){function i(t,e,i){var s=u[e.type]||{};return null==t?i||!e.def?null:e.def:(t=s.floor?~~t:parseFloat(t),isNaN(t)?e.def:s.mod?(t+s.mod)%s.mod:0>t?0:t>s.max?s.max:t)}function s(i){var s=l(),n=s._rgba=[];return i=i.toLowerCase(),f(h,function(t,o){var a,r=o.re.exec(i),h=r&&o.parse(r),l=o.space||"rgba";return h?(a=s[l](h),s[c[l].cache]=a[c[l].cache],n=s._rgba=a._rgba,!1):e}),n.length?("0,0,0,0"===n.join()&&t.extend(n,o.transparent),s):o[i]}function n(t,e,i){return i=(i+1)%1,1>6*i?t+6*(e-t)*i:1>2*i?e:2>3*i?t+6*(e-t)*(2/3-i):t}var o,a="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",r=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[t[1],t[2],t[3],t[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[2.55*t[1],2.55*t[2],2.55*t[3],t[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(t){return[parseInt(t[1],16),parseInt(t[2],16),parseInt(t[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(t){return[parseInt(t[1]+t[1],16),parseInt(t[2]+t[2],16),parseInt(t[3]+t[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(t){return[t[1],t[2]/100,t[3]/100,t[4]]}}],l=t.Color=function(e,i,s,n){return new t.Color.fn.parse(e,i,s,n)},c={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},u={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},d=l.support={},p=t("

")[0],f=t.each;p.style.cssText="background-color:rgba(1,1,1,.5)",d.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(c,function(t,e){e.cache="_"+t,e.props.alpha={idx:3,type:"percent",def:1}}),l.fn=t.extend(l.prototype,{parse:function(n,a,r,h){if(n===e)return this._rgba=[null,null,null,null],this;(n.jquery||n.nodeType)&&(n=t(n).css(a),a=e);var u=this,d=t.type(n),p=this._rgba=[];return a!==e&&(n=[n,a,r,h],d="array"),"string"===d?this.parse(s(n)||o._default):"array"===d?(f(c.rgba.props,function(t,e){p[e.idx]=i(n[e.idx],e)}),this):"object"===d?(n instanceof l?f(c,function(t,e){n[e.cache]&&(u[e.cache]=n[e.cache].slice())}):f(c,function(e,s){var o=s.cache;f(s.props,function(t,e){if(!u[o]&&s.to){if("alpha"===t||null==n[t])return;u[o]=s.to(u._rgba)}u[o][e.idx]=i(n[t],e,!0)}),u[o]&&0>t.inArray(null,u[o].slice(0,3))&&(u[o][3]=1,s.from&&(u._rgba=s.from(u[o])))}),this):e},is:function(t){var i=l(t),s=!0,n=this;return f(c,function(t,o){var a,r=i[o.cache];return r&&(a=n[o.cache]||o.to&&o.to(n._rgba)||[],f(o.props,function(t,i){return null!=r[i.idx]?s=r[i.idx]===a[i.idx]:e})),s}),s},_space:function(){var t=[],e=this;return f(c,function(i,s){e[s.cache]&&t.push(i)}),t.pop()},transition:function(t,e){var s=l(t),n=s._space(),o=c[n],a=0===this.alpha()?l("transparent"):this,r=a[o.cache]||o.to(a._rgba),h=r.slice();return s=s[o.cache],f(o.props,function(t,n){var o=n.idx,a=r[o],l=s[o],c=u[n.type]||{};null!==l&&(null===a?h[o]=l:(c.mod&&(l-a>c.mod/2?a+=c.mod:a-l>c.mod/2&&(a-=c.mod)),h[o]=i((l-a)*e+a,n)))}),this[n](h)},blend:function(e){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),n=l(e)._rgba;return l(t.map(i,function(t,e){return(1-s)*n[e]+s*t}))},toRgbaString:function(){var e="rgba(",i=t.map(this._rgba,function(t,e){return null==t?e>2?1:0:t});return 1===i[3]&&(i.pop(),e="rgb("),e+i.join()+")"},toHslaString:function(){var e="hsla(",i=t.map(this.hsla(),function(t,e){return null==t&&(t=e>2?1:0),e&&3>e&&(t=Math.round(100*t)+"%"),t});return 1===i[3]&&(i.pop(),e="hsl("),e+i.join()+")"},toHexString:function(e){var i=this._rgba.slice(),s=i.pop();return e&&i.push(~~(255*s)),"#"+t.map(i,function(t){return t=(t||0).toString(16),1===t.length?"0"+t:t}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,c.hsla.to=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e,i,s=t[0]/255,n=t[1]/255,o=t[2]/255,a=t[3],r=Math.max(s,n,o),h=Math.min(s,n,o),l=r-h,c=r+h,u=.5*c;return e=h===r?0:s===r?60*(n-o)/l+360:n===r?60*(o-s)/l+120:60*(s-n)/l+240,i=0===l?0:.5>=u?l/c:l/(2-c),[Math.round(e)%360,i,u,null==a?1:a]},c.hsla.from=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e=t[0]/360,i=t[1],s=t[2],o=t[3],a=.5>=s?s*(1+i):s+i-s*i,r=2*s-a;return[Math.round(255*n(r,a,e+1/3)),Math.round(255*n(r,a,e)),Math.round(255*n(r,a,e-1/3)),o]},f(c,function(s,n){var o=n.props,a=n.cache,h=n.to,c=n.from;l.fn[s]=function(s){if(h&&!this[a]&&(this[a]=h(this._rgba)),s===e)return this[a].slice();var n,r=t.type(s),u="array"===r||"object"===r?s:arguments,d=this[a].slice();return f(o,function(t,e){var s=u["object"===r?t:e.idx];null==s&&(s=d[e.idx]),d[e.idx]=i(s,e)}),c?(n=l(c(d)),n[a]=d,n):l(d)},f(o,function(e,i){l.fn[e]||(l.fn[e]=function(n){var o,a=t.type(n),h="alpha"===e?this._hsla?"hsla":"rgba":s,l=this[h](),c=l[i.idx];return"undefined"===a?c:("function"===a&&(n=n.call(this,c),a=t.type(n)),null==n&&i.empty?this:("string"===a&&(o=r.exec(n),o&&(n=c+parseFloat(o[2])*("+"===o[1]?1:-1))),l[i.idx]=n,this[h](l)))})})}),l.hook=function(e){var i=e.split(" ");f(i,function(e,i){t.cssHooks[i]={set:function(e,n){var o,a,r="";if("transparent"!==n&&("string"!==t.type(n)||(o=s(n)))){if(n=l(o||n),!d.rgba&&1!==n._rgba[3]){for(a="backgroundColor"===i?e.parentNode:e;(""===r||"transparent"===r)&&a&&a.style;)try{r=t.css(a,"backgroundColor"),a=a.parentNode}catch(h){}n=n.blend(r&&"transparent"!==r?r:"_default")}n=n.toRgbaString()}try{e.style[i]=n}catch(h){}}},t.fx.step[i]=function(e){e.colorInit||(e.start=l(e.elem,i),e.end=l(e.end),e.colorInit=!0),t.cssHooks[i].set(e.elem,e.start.transition(e.end,e.pos))}})},l.hook(a),t.cssHooks.borderColor={expand:function(t){var e={};return f(["Top","Right","Bottom","Left"],function(i,s){e["border"+s+"Color"]=t}),e}},o=t.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(p),function(){function e(e){var i,s,n=e.ownerDocument.defaultView?e.ownerDocument.defaultView.getComputedStyle(e,null):e.currentStyle,o={};if(n&&n.length&&n[0]&&n[n[0]])for(s=n.length;s--;)i=n[s],"string"==typeof n[i]&&(o[t.camelCase(i)]=n[i]);else for(i in n)"string"==typeof n[i]&&(o[i]=n[i]);return o}function i(e,i){var s,o,a={};for(s in i)o=i[s],e[s]!==o&&(n[s]||(t.fx.step[s]||!isNaN(parseFloat(o)))&&(a[s]=o));return a}var s=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};t.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(e,i){t.fx.step[i]=function(t){("none"!==t.end&&!t.setAttr||1===t.pos&&!t.setAttr)&&(p.style(t.elem,i,t.end),t.setAttr=!0)}}),t.fn.addBack||(t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.effects.animateClass=function(n,o,a,r){var h=t.speed(o,a,r);return this.queue(function(){var o,a=t(this),r=a.attr("class")||"",l=h.children?a.find("*").addBack():a;l=l.map(function(){var i=t(this);return{el:i,start:e(this)}}),o=function(){t.each(s,function(t,e){n[e]&&a[e+"Class"](n[e])})},o(),l=l.map(function(){return this.end=e(this.el[0]),this.diff=i(this.start,this.end),this}),a.attr("class",r),l=l.map(function(){var e=this,i=t.Deferred(),s=t.extend({},h,{queue:!1,complete:function(){i.resolve(e)}});return this.el.animate(this.diff,s),i.promise()}),t.when.apply(t,l.get()).done(function(){o(),t.each(arguments,function(){var e=this.el;t.each(this.diff,function(t){e.css(t,"")})}),h.complete.call(a[0])})})},t.fn.extend({addClass:function(e){return function(i,s,n,o){return s?t.effects.animateClass.call(this,{add:i},s,n,o):e.apply(this,arguments)}}(t.fn.addClass),removeClass:function(e){return function(i,s,n,o){return arguments.length>1?t.effects.animateClass.call(this,{remove:i},s,n,o):e.apply(this,arguments)}}(t.fn.removeClass),toggleClass:function(e){return function(i,s,n,o,a){return"boolean"==typeof s||void 0===s?n?t.effects.animateClass.call(this,s?{add:i}:{remove:i},n,o,a):e.apply(this,arguments):t.effects.animateClass.call(this,{toggle:i},s,n,o)}}(t.fn.toggleClass),switchClass:function(e,i,s,n,o){return t.effects.animateClass.call(this,{add:i,remove:e},s,n,o)}})}(),function(){function e(e,i,s,n){return t.isPlainObject(e)&&(i=e,e=e.effect),e={effect:e},null==i&&(i={}),t.isFunction(i)&&(n=i,s=null,i={}),("number"==typeof i||t.fx.speeds[i])&&(n=s,s=i,i={}),t.isFunction(s)&&(n=s,s=null),i&&t.extend(e,i),s=s||i.duration,e.duration=t.fx.off?0:"number"==typeof s?s:s in t.fx.speeds?t.fx.speeds[s]:t.fx.speeds._default,e.complete=n||i.complete,e}function i(e){return!e||"number"==typeof e||t.fx.speeds[e]?!0:"string"!=typeof e||t.effects.effect[e]?t.isFunction(e)?!0:"object"!=typeof e||e.effect?!1:!0:!0}function s(t,e){var i=e.outerWidth(),s=e.outerHeight(),n=/^rect\((-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto)\)$/,o=n.exec(t)||["",0,i,s,0];return{top:parseFloat(o[1])||0,right:"auto"===o[2]?i:parseFloat(o[2]),bottom:"auto"===o[3]?s:parseFloat(o[3]),left:parseFloat(o[4])||0}}t.expr&&t.expr.filters&&t.expr.filters.animated&&(t.expr.filters.animated=function(e){return function(i){return!!t(i).data(d)||e(i)}}(t.expr.filters.animated)),t.uiBackCompat!==!1&&t.extend(t.effects,{save:function(t,e){for(var i=0,s=e.length;s>i;i++)null!==e[i]&&t.data(c+e[i],t[0].style[e[i]])},restore:function(t,e){for(var i,s=0,n=e.length;n>s;s++)null!==e[s]&&(i=t.data(c+e[s]),t.css(e[s],i))},setMode:function(t,e){return"toggle"===e&&(e=t.is(":hidden")?"show":"hide"),e},createWrapper:function(e){if(e.parent().is(".ui-effects-wrapper"))return e.parent();var i={width:e.outerWidth(!0),height:e.outerHeight(!0),"float":e.css("float")},s=t("

").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),n={width:e.width(),height:e.height()},o=document.activeElement;try{o.id}catch(a){o=document.body}return e.wrap(s),(e[0]===o||t.contains(e[0],o))&&t(o).trigger("focus"),s=e.parent(),"static"===e.css("position")?(s.css({position:"relative"}),e.css({position:"relative"})):(t.extend(i,{position:e.css("position"),zIndex:e.css("z-index")}),t.each(["top","left","bottom","right"],function(t,s){i[s]=e.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),e.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),e.css(n),s.css(i).show()},removeWrapper:function(e){var i=document.activeElement;return e.parent().is(".ui-effects-wrapper")&&(e.parent().replaceWith(e),(e[0]===i||t.contains(e[0],i))&&t(i).trigger("focus")),e}}),t.extend(t.effects,{version:"1.12.1",define:function(e,i,s){return s||(s=i,i="effect"),t.effects.effect[e]=s,t.effects.effect[e].mode=i,s},scaledDimensions:function(t,e,i){if(0===e)return{height:0,width:0,outerHeight:0,outerWidth:0};var s="horizontal"!==i?(e||100)/100:1,n="vertical"!==i?(e||100)/100:1;return{height:t.height()*n,width:t.width()*s,outerHeight:t.outerHeight()*n,outerWidth:t.outerWidth()*s}},clipToBox:function(t){return{width:t.clip.right-t.clip.left,height:t.clip.bottom-t.clip.top,left:t.clip.left,top:t.clip.top}},unshift:function(t,e,i){var s=t.queue();e>1&&s.splice.apply(s,[1,0].concat(s.splice(e,i))),t.dequeue()},saveStyle:function(t){t.data(u,t[0].style.cssText)},restoreStyle:function(t){t[0].style.cssText=t.data(u)||"",t.removeData(u)},mode:function(t,e){var i=t.is(":hidden");return"toggle"===e&&(e=i?"show":"hide"),(i?"hide"===e:"show"===e)&&(e="none"),e},getBaseline:function(t,e){var i,s;switch(t[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=t[0]/e.height}switch(t[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=t[1]/e.width}return{x:s,y:i}},createPlaceholder:function(e){var i,s=e.css("position"),n=e.position();return e.css({marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()),/^(static|relative)/.test(s)&&(s="absolute",i=t("<"+e[0].nodeName+">").insertAfter(e).css({display:/^(inline|ruby)/.test(e.css("display"))?"inline-block":"block",visibility:"hidden",marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight"),"float":e.css("float")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).addClass("ui-effects-placeholder"),e.data(c+"placeholder",i)),e.css({position:s,left:n.left,top:n.top}),i},removePlaceholder:function(t){var e=c+"placeholder",i=t.data(e);i&&(i.remove(),t.removeData(e))},cleanUp:function(e){t.effects.restoreStyle(e),t.effects.removePlaceholder(e)},setTransition:function(e,i,s,n){return n=n||{},t.each(i,function(t,i){var o=e.cssUnit(i);o[0]>0&&(n[i]=o[0]*s+o[1])}),n}}),t.fn.extend({effect:function(){function i(e){function i(){r.removeData(d),t.effects.cleanUp(r),"hide"===s.mode&&r.hide(),a()}function a(){t.isFunction(h)&&h.call(r[0]),t.isFunction(e)&&e()}var r=t(this);s.mode=c.shift(),t.uiBackCompat===!1||o?"none"===s.mode?(r[l](),a()):n.call(r[0],s,i):(r.is(":hidden")?"hide"===l:"show"===l)?(r[l](),a()):n.call(r[0],s,a)}var s=e.apply(this,arguments),n=t.effects.effect[s.effect],o=n.mode,a=s.queue,r=a||"fx",h=s.complete,l=s.mode,c=[],u=function(e){var i=t(this),s=t.effects.mode(i,l)||o;i.data(d,!0),c.push(s),o&&("show"===s||s===o&&"hide"===s)&&i.show(),o&&"none"===s||t.effects.saveStyle(i),t.isFunction(e)&&e()};return t.fx.off||!n?l?this[l](s.duration,h):this.each(function(){h&&h.call(this)}):a===!1?this.each(u).each(i):this.queue(r,u).queue(r,i)},show:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="show",this.effect.call(this,n) +}}(t.fn.show),hide:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="hide",this.effect.call(this,n)}}(t.fn.hide),toggle:function(t){return function(s){if(i(s)||"boolean"==typeof s)return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)}}(t.fn.toggle),cssUnit:function(e){var i=this.css(e),s=[];return t.each(["em","px","%","pt"],function(t,e){i.indexOf(e)>0&&(s=[parseFloat(i),e])}),s},cssClip:function(t){return t?this.css("clip","rect("+t.top+"px "+t.right+"px "+t.bottom+"px "+t.left+"px)"):s(this.css("clip"),this)},transfer:function(e,i){var s=t(this),n=t(e.to),o="fixed"===n.css("position"),a=t("body"),r=o?a.scrollTop():0,h=o?a.scrollLeft():0,l=n.offset(),c={top:l.top-r,left:l.left-h,height:n.innerHeight(),width:n.innerWidth()},u=s.offset(),d=t("
").appendTo("body").addClass(e.className).css({top:u.top-r,left:u.left-h,height:s.innerHeight(),width:s.innerWidth(),position:o?"fixed":"absolute"}).animate(c,e.duration,e.easing,function(){d.remove(),t.isFunction(i)&&i()})}}),t.fx.step.clip=function(e){e.clipInit||(e.start=t(e.elem).cssClip(),"string"==typeof e.end&&(e.end=s(e.end,e.elem)),e.clipInit=!0),t(e.elem).cssClip({top:e.pos*(e.end.top-e.start.top)+e.start.top,right:e.pos*(e.end.right-e.start.right)+e.start.right,bottom:e.pos*(e.end.bottom-e.start.bottom)+e.start.bottom,left:e.pos*(e.end.left-e.start.left)+e.start.left})}}(),function(){var e={};t.each(["Quad","Cubic","Quart","Quint","Expo"],function(t,i){e[i]=function(e){return Math.pow(e,t+2)}}),t.extend(e,{Sine:function(t){return 1-Math.cos(t*Math.PI/2)},Circ:function(t){return 1-Math.sqrt(1-t*t)},Elastic:function(t){return 0===t||1===t?t:-Math.pow(2,8*(t-1))*Math.sin((80*(t-1)-7.5)*Math.PI/15)},Back:function(t){return t*t*(3*t-2)},Bounce:function(t){for(var e,i=4;((e=Math.pow(2,--i))-1)/11>t;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*e-2)/22-t,2)}}),t.each(e,function(e,i){t.easing["easeIn"+e]=i,t.easing["easeOut"+e]=function(t){return 1-i(1-t)},t.easing["easeInOut"+e]=function(t){return.5>t?i(2*t)/2:1-i(-2*t+2)/2}})}();var f=t.effects;t.effects.define("blind","hide",function(e,i){var s={up:["bottom","top"],vertical:["bottom","top"],down:["top","bottom"],left:["right","left"],horizontal:["right","left"],right:["left","right"]},n=t(this),o=e.direction||"up",a=n.cssClip(),r={clip:t.extend({},a)},h=t.effects.createPlaceholder(n);r.clip[s[o][0]]=r.clip[s[o][1]],"show"===e.mode&&(n.cssClip(r.clip),h&&h.css(t.effects.clipToBox(r)),r.clip=a),h&&h.animate(t.effects.clipToBox(r),e.duration,e.easing),n.animate(r,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("bounce",function(e,i){var s,n,o,a=t(this),r=e.mode,h="hide"===r,l="show"===r,c=e.direction||"up",u=e.distance,d=e.times||5,p=2*d+(l||h?1:0),f=e.duration/p,g=e.easing,m="up"===c||"down"===c?"top":"left",_="up"===c||"left"===c,v=0,b=a.queue().length;for(t.effects.createPlaceholder(a),o=a.css(m),u||(u=a["top"===m?"outerHeight":"outerWidth"]()/3),l&&(n={opacity:1},n[m]=o,a.css("opacity",0).css(m,_?2*-u:2*u).animate(n,f,g)),h&&(u/=Math.pow(2,d-1)),n={},n[m]=o;d>v;v++)s={},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g).animate(n,f,g),u=h?2*u:u/2;h&&(s={opacity:0},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g)),a.queue(i),t.effects.unshift(a,b,p+1)}),t.effects.define("clip","hide",function(e,i){var s,n={},o=t(this),a=e.direction||"vertical",r="both"===a,h=r||"horizontal"===a,l=r||"vertical"===a;s=o.cssClip(),n.clip={top:l?(s.bottom-s.top)/2:s.top,right:h?(s.right-s.left)/2:s.right,bottom:l?(s.bottom-s.top)/2:s.bottom,left:h?(s.right-s.left)/2:s.left},t.effects.createPlaceholder(o),"show"===e.mode&&(o.cssClip(n.clip),n.clip=s),o.animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("drop","hide",function(e,i){var s,n=t(this),o=e.mode,a="show"===o,r=e.direction||"left",h="up"===r||"down"===r?"top":"left",l="up"===r||"left"===r?"-=":"+=",c="+="===l?"-=":"+=",u={opacity:0};t.effects.createPlaceholder(n),s=e.distance||n["top"===h?"outerHeight":"outerWidth"](!0)/2,u[h]=l+s,a&&(n.css(u),u[h]=c+s,u.opacity=1),n.animate(u,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("explode","hide",function(e,i){function s(){b.push(this),b.length===u*d&&n()}function n(){p.css({visibility:"visible"}),t(b).remove(),i()}var o,a,r,h,l,c,u=e.pieces?Math.round(Math.sqrt(e.pieces)):3,d=u,p=t(this),f=e.mode,g="show"===f,m=p.show().css("visibility","hidden").offset(),_=Math.ceil(p.outerWidth()/d),v=Math.ceil(p.outerHeight()/u),b=[];for(o=0;u>o;o++)for(h=m.top+o*v,c=o-(u-1)/2,a=0;d>a;a++)r=m.left+a*_,l=a-(d-1)/2,p.clone().appendTo("body").wrap("
").css({position:"absolute",visibility:"visible",left:-a*_,top:-o*v}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:_,height:v,left:r+(g?l*_:0),top:h+(g?c*v:0),opacity:g?0:1}).animate({left:r+(g?0:l*_),top:h+(g?0:c*v),opacity:g?1:0},e.duration||500,e.easing,s)}),t.effects.define("fade","toggle",function(e,i){var s="show"===e.mode;t(this).css("opacity",s?0:1).animate({opacity:s?1:0},{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("fold","hide",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=e.size||15,h=/([0-9]+)%/.exec(r),l=!!e.horizFirst,c=l?["right","bottom"]:["bottom","right"],u=e.duration/2,d=t.effects.createPlaceholder(s),p=s.cssClip(),f={clip:t.extend({},p)},g={clip:t.extend({},p)},m=[p[c[0]],p[c[1]]],_=s.queue().length;h&&(r=parseInt(h[1],10)/100*m[a?0:1]),f.clip[c[0]]=r,g.clip[c[0]]=r,g.clip[c[1]]=0,o&&(s.cssClip(g.clip),d&&d.css(t.effects.clipToBox(g)),g.clip=p),s.queue(function(i){d&&d.animate(t.effects.clipToBox(f),u,e.easing).animate(t.effects.clipToBox(g),u,e.easing),i()}).animate(f,u,e.easing).animate(g,u,e.easing).queue(i),t.effects.unshift(s,_,4)}),t.effects.define("highlight","show",function(e,i){var s=t(this),n={backgroundColor:s.css("backgroundColor")};"hide"===e.mode&&(n.opacity=0),t.effects.saveStyle(s),s.css({backgroundImage:"none",backgroundColor:e.color||"#ffff99"}).animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("size",function(e,i){var s,n,o,a=t(this),r=["fontSize"],h=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],l=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],c=e.mode,u="effect"!==c,d=e.scale||"both",p=e.origin||["middle","center"],f=a.css("position"),g=a.position(),m=t.effects.scaledDimensions(a),_=e.from||m,v=e.to||t.effects.scaledDimensions(a,0);t.effects.createPlaceholder(a),"show"===c&&(o=_,_=v,v=o),n={from:{y:_.height/m.height,x:_.width/m.width},to:{y:v.height/m.height,x:v.width/m.width}},("box"===d||"both"===d)&&(n.from.y!==n.to.y&&(_=t.effects.setTransition(a,h,n.from.y,_),v=t.effects.setTransition(a,h,n.to.y,v)),n.from.x!==n.to.x&&(_=t.effects.setTransition(a,l,n.from.x,_),v=t.effects.setTransition(a,l,n.to.x,v))),("content"===d||"both"===d)&&n.from.y!==n.to.y&&(_=t.effects.setTransition(a,r,n.from.y,_),v=t.effects.setTransition(a,r,n.to.y,v)),p&&(s=t.effects.getBaseline(p,m),_.top=(m.outerHeight-_.outerHeight)*s.y+g.top,_.left=(m.outerWidth-_.outerWidth)*s.x+g.left,v.top=(m.outerHeight-v.outerHeight)*s.y+g.top,v.left=(m.outerWidth-v.outerWidth)*s.x+g.left),a.css(_),("content"===d||"both"===d)&&(h=h.concat(["marginTop","marginBottom"]).concat(r),l=l.concat(["marginLeft","marginRight"]),a.find("*[width]").each(function(){var i=t(this),s=t.effects.scaledDimensions(i),o={height:s.height*n.from.y,width:s.width*n.from.x,outerHeight:s.outerHeight*n.from.y,outerWidth:s.outerWidth*n.from.x},a={height:s.height*n.to.y,width:s.width*n.to.x,outerHeight:s.height*n.to.y,outerWidth:s.width*n.to.x};n.from.y!==n.to.y&&(o=t.effects.setTransition(i,h,n.from.y,o),a=t.effects.setTransition(i,h,n.to.y,a)),n.from.x!==n.to.x&&(o=t.effects.setTransition(i,l,n.from.x,o),a=t.effects.setTransition(i,l,n.to.x,a)),u&&t.effects.saveStyle(i),i.css(o),i.animate(a,e.duration,e.easing,function(){u&&t.effects.restoreStyle(i)})})),a.animate(v,{queue:!1,duration:e.duration,easing:e.easing,complete:function(){var e=a.offset();0===v.opacity&&a.css("opacity",_.opacity),u||(a.css("position","static"===f?"relative":f).offset(e),t.effects.saveStyle(a)),i()}})}),t.effects.define("scale",function(e,i){var s=t(this),n=e.mode,o=parseInt(e.percent,10)||(0===parseInt(e.percent,10)?0:"effect"!==n?0:100),a=t.extend(!0,{from:t.effects.scaledDimensions(s),to:t.effects.scaledDimensions(s,o,e.direction||"both"),origin:e.origin||["middle","center"]},e);e.fade&&(a.from.opacity=1,a.to.opacity=0),t.effects.effect.size.call(this,a,i)}),t.effects.define("puff","hide",function(e,i){var s=t.extend(!0,{},e,{fade:!0,percent:parseInt(e.percent,10)||150});t.effects.effect.scale.call(this,s,i)}),t.effects.define("pulsate","show",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=o||a,h=2*(e.times||5)+(r?1:0),l=e.duration/h,c=0,u=1,d=s.queue().length;for((o||!s.is(":visible"))&&(s.css("opacity",0).show(),c=1);h>u;u++)s.animate({opacity:c},l,e.easing),c=1-c;s.animate({opacity:c},l,e.easing),s.queue(i),t.effects.unshift(s,d,h+1)}),t.effects.define("shake",function(e,i){var s=1,n=t(this),o=e.direction||"left",a=e.distance||20,r=e.times||3,h=2*r+1,l=Math.round(e.duration/h),c="up"===o||"down"===o?"top":"left",u="up"===o||"left"===o,d={},p={},f={},g=n.queue().length;for(t.effects.createPlaceholder(n),d[c]=(u?"-=":"+=")+a,p[c]=(u?"+=":"-=")+2*a,f[c]=(u?"-=":"+=")+2*a,n.animate(d,l,e.easing);r>s;s++)n.animate(p,l,e.easing).animate(f,l,e.easing);n.animate(p,l,e.easing).animate(d,l/2,e.easing).queue(i),t.effects.unshift(n,g,h+1)}),t.effects.define("slide","show",function(e,i){var s,n,o=t(this),a={up:["bottom","top"],down:["top","bottom"],left:["right","left"],right:["left","right"]},r=e.mode,h=e.direction||"left",l="up"===h||"down"===h?"top":"left",c="up"===h||"left"===h,u=e.distance||o["top"===l?"outerHeight":"outerWidth"](!0),d={};t.effects.createPlaceholder(o),s=o.cssClip(),n=o.position()[l],d[l]=(c?-1:1)*u+n,d.clip=o.cssClip(),d.clip[a[h][1]]=d.clip[a[h][0]],"show"===r&&(o.cssClip(d.clip),o.css(l,d[l]),d.clip=s,d[l]=n),o.animate(d,{queue:!1,duration:e.duration,easing:e.easing,complete:i})});var f;t.uiBackCompat!==!1&&(f=t.effects.define("transfer",function(e,i){t(this).transfer(e,i)})),t.ui.focusable=function(i,s){var n,o,a,r,h,l=i.nodeName.toLowerCase();return"area"===l?(n=i.parentNode,o=n.name,i.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap='#"+o+"']"),a.length>0&&a.is(":visible")):!1):(/^(input|select|textarea|button|object)$/.test(l)?(r=!i.disabled,r&&(h=t(i).closest("fieldset")[0],h&&(r=!h.disabled))):r="a"===l?i.href||s:s,r&&t(i).is(":visible")&&e(t(i)))},t.extend(t.expr[":"],{focusable:function(e){return t.ui.focusable(e,null!=t.attr(e,"tabindex"))}}),t.ui.focusable,t.fn.form=function(){return"string"==typeof this[0].form?this.closest("form"):t(this[0].form)},t.ui.formResetMixin={_formResetHandler:function(){var e=t(this);setTimeout(function(){var i=e.data("ui-form-reset-instances");t.each(i,function(){this.refresh()})})},_bindFormResetHandler:function(){if(this.form=this.element.form(),this.form.length){var t=this.form.data("ui-form-reset-instances")||[];t.length||this.form.on("reset.ui-form-reset",this._formResetHandler),t.push(this),this.form.data("ui-form-reset-instances",t)}},_unbindFormResetHandler:function(){if(this.form.length){var e=this.form.data("ui-form-reset-instances");e.splice(t.inArray(this,e),1),e.length?this.form.data("ui-form-reset-instances",e):this.form.removeData("ui-form-reset-instances").off("reset.ui-form-reset")}}},"1.7"===t.fn.jquery.substring(0,3)&&(t.each(["Width","Height"],function(e,i){function s(e,i,s,o){return t.each(n,function(){i-=parseFloat(t.css(e,"padding"+this))||0,s&&(i-=parseFloat(t.css(e,"border"+this+"Width"))||0),o&&(i-=parseFloat(t.css(e,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],o=i.toLowerCase(),a={innerWidth:t.fn.innerWidth,innerHeight:t.fn.innerHeight,outerWidth:t.fn.outerWidth,outerHeight:t.fn.outerHeight};t.fn["inner"+i]=function(e){return void 0===e?a["inner"+i].call(this):this.each(function(){t(this).css(o,s(this,e)+"px")})},t.fn["outer"+i]=function(e,n){return"number"!=typeof e?a["outer"+i].call(this,e):this.each(function(){t(this).css(o,s(this,e,!0,n)+"px")})}}),t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.ui.escapeSelector=function(){var t=/([!"#$%&'()*+,.\/:;<=>?@[\]^`{|}~])/g;return function(e){return e.replace(t,"\\$1")}}(),t.fn.labels=function(){var e,i,s,n,o;return this[0].labels&&this[0].labels.length?this.pushStack(this[0].labels):(n=this.eq(0).parents("label"),s=this.attr("id"),s&&(e=this.eq(0).parents().last(),o=e.add(e.length?e.siblings():this.siblings()),i="label[for='"+t.ui.escapeSelector(s)+"']",n=n.add(o.find(i).addBack(i))),this.pushStack(n))},t.fn.scrollParent=function(e){var i=this.css("position"),s="absolute"===i,n=e?/(auto|scroll|hidden)/:/(auto|scroll)/,o=this.parents().filter(function(){var e=t(this);return s&&"static"===e.css("position")?!1:n.test(e.css("overflow")+e.css("overflow-y")+e.css("overflow-x"))}).eq(0);return"fixed"!==i&&o.length?o:t(this[0].ownerDocument||document)},t.extend(t.expr[":"],{tabbable:function(e){var i=t.attr(e,"tabindex"),s=null!=i;return(!s||i>=0)&&t.ui.focusable(e,s)}}),t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.widget("ui.accordion",{version:"1.12.1",options:{active:0,animate:{},classes:{"ui-accordion-header":"ui-corner-top","ui-accordion-header-collapsed":"ui-corner-all","ui-accordion-content":"ui-corner-bottom"},collapsible:!1,event:"click",header:"> li > :first-child, > :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var e=this.options;this.prevShow=this.prevHide=t(),this._addClass("ui-accordion","ui-widget ui-helper-reset"),this.element.attr("role","tablist"),e.collapsible||e.active!==!1&&null!=e.active||(e.active=0),this._processPanels(),0>e.active&&(e.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():t()}},_createIcons:function(){var e,i,s=this.options.icons;s&&(e=t(""),this._addClass(e,"ui-accordion-header-icon","ui-icon "+s.header),e.prependTo(this.headers),i=this.active.children(".ui-accordion-header-icon"),this._removeClass(i,s.header)._addClass(i,null,s.activeHeader)._addClass(this.headers,"ui-accordion-icons"))},_destroyIcons:function(){this._removeClass(this.headers,"ui-accordion-icons"),this.headers.children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeAttr("role"),this.headers.removeAttr("role aria-expanded aria-selected aria-controls tabIndex").removeUniqueId(),this._destroyIcons(),t=this.headers.next().css("display","").removeAttr("role aria-hidden aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&t.css("height","")},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):("event"===t&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(e)),this._super(t,e),"collapsible"!==t||e||this.options.active!==!1||this._activate(0),"icons"===t&&(this._destroyIcons(),e&&this._createIcons()),void 0)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t),this._toggleClass(this.headers.add(this.headers.next()),null,"ui-state-disabled",!!t)},_keydown:function(e){if(!e.altKey&&!e.ctrlKey){var i=t.ui.keyCode,s=this.headers.length,n=this.headers.index(e.target),o=!1;switch(e.keyCode){case i.RIGHT:case i.DOWN:o=this.headers[(n+1)%s];break;case i.LEFT:case i.UP:o=this.headers[(n-1+s)%s];break;case i.SPACE:case i.ENTER:this._eventHandler(e);break;case i.HOME:o=this.headers[0];break;case i.END:o=this.headers[s-1]}o&&(t(e.target).attr("tabIndex",-1),t(o).attr("tabIndex",0),t(o).trigger("focus"),e.preventDefault())}},_panelKeyDown:function(e){e.keyCode===t.ui.keyCode.UP&&e.ctrlKey&&t(e.currentTarget).prev().trigger("focus")},refresh:function(){var e=this.options;this._processPanels(),e.active===!1&&e.collapsible===!0||!this.headers.length?(e.active=!1,this.active=t()):e.active===!1?this._activate(0):this.active.length&&!t.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(e.active=!1,this.active=t()):this._activate(Math.max(0,e.active-1)):e.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var t=this.headers,e=this.panels;this.headers=this.element.find(this.options.header),this._addClass(this.headers,"ui-accordion-header ui-accordion-header-collapsed","ui-state-default"),this.panels=this.headers.next().filter(":not(.ui-accordion-content-active)").hide(),this._addClass(this.panels,"ui-accordion-content","ui-helper-reset ui-widget-content"),e&&(this._off(t.not(this.headers)),this._off(e.not(this.panels)))},_refresh:function(){var e,i=this.options,s=i.heightStyle,n=this.element.parent();this.active=this._findActive(i.active),this._addClass(this.active,"ui-accordion-header-active","ui-state-active")._removeClass(this.active,"ui-accordion-header-collapsed"),this._addClass(this.active.next(),"ui-accordion-content-active"),this.active.next().show(),this.headers.attr("role","tab").each(function(){var e=t(this),i=e.uniqueId().attr("id"),s=e.next(),n=s.uniqueId().attr("id");e.attr("aria-controls",n),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(i.event),"fill"===s?(e=n.height(),this.element.siblings(":visible").each(function(){var i=t(this),s=i.css("position");"absolute"!==s&&"fixed"!==s&&(e-=i.outerHeight(!0))}),this.headers.each(function(){e-=t(this).outerHeight(!0)}),this.headers.next().each(function(){t(this).height(Math.max(0,e-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===s&&(e=0,this.headers.next().each(function(){var i=t(this).is(":visible");i||t(this).show(),e=Math.max(e,t(this).css("height","").height()),i||t(this).hide()}).height(e))},_activate:function(e){var i=this._findActive(e)[0];i!==this.active[0]&&(i=i||this.active[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return"number"==typeof e?this.headers.eq(e):t()},_setupEvents:function(e){var i={keydown:"_keydown"};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(e){var i,s,n=this.options,o=this.active,a=t(e.currentTarget),r=a[0]===o[0],h=r&&n.collapsible,l=h?t():a.next(),c=o.next(),u={oldHeader:o,oldPanel:c,newHeader:h?t():a,newPanel:l};e.preventDefault(),r&&!n.collapsible||this._trigger("beforeActivate",e,u)===!1||(n.active=h?!1:this.headers.index(a),this.active=r?t():a,this._toggle(u),this._removeClass(o,"ui-accordion-header-active","ui-state-active"),n.icons&&(i=o.children(".ui-accordion-header-icon"),this._removeClass(i,null,n.icons.activeHeader)._addClass(i,null,n.icons.header)),r||(this._removeClass(a,"ui-accordion-header-collapsed")._addClass(a,"ui-accordion-header-active","ui-state-active"),n.icons&&(s=a.children(".ui-accordion-header-icon"),this._removeClass(s,null,n.icons.header)._addClass(s,null,n.icons.activeHeader)),this._addClass(a.next(),"ui-accordion-content-active")))},_toggle:function(e){var i=e.newPanel,s=this.prevShow.length?this.prevShow:e.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=i,this.prevHide=s,this.options.animate?this._animate(i,s,e):(s.hide(),i.show(),this._toggleComplete(e)),s.attr({"aria-hidden":"true"}),s.prev().attr({"aria-selected":"false","aria-expanded":"false"}),i.length&&s.length?s.prev().attr({tabIndex:-1,"aria-expanded":"false"}):i.length&&this.headers.filter(function(){return 0===parseInt(t(this).attr("tabIndex"),10)}).attr("tabIndex",-1),i.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(t,e,i){var s,n,o,a=this,r=0,h=t.css("box-sizing"),l=t.length&&(!e.length||t.index()",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault()},"click .ui-menu-item":function(e){var i=t(e.target),s=t(t.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(e),e.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(e):!this.element.is(":focus")&&s.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(e){if(!this.previousFilter){var i=t(e.target).closest(".ui-menu-item"),s=t(e.currentTarget);i[0]===s[0]&&(this._removeClass(s.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(e,s))}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this.element.find(this.options.items).eq(0);e||this.focus(t,i)},blur:function(e){this._delay(function(){var i=!t.contains(this.element[0],t.ui.safeActiveElement(this.document[0]));i&&this.collapseAll(e)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t),this.mouseHandled=!1}})},_destroy:function(){var e=this.element.find(".ui-menu-item").removeAttr("role aria-disabled"),i=e.children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),i.children().each(function(){var e=t(this);e.data("ui-menu-submenu-caret")&&e.remove()})},_keydown:function(e){var i,s,n,o,a=!0;switch(e.keyCode){case t.ui.keyCode.PAGE_UP:this.previousPage(e);break;case t.ui.keyCode.PAGE_DOWN:this.nextPage(e);break;case t.ui.keyCode.HOME:this._move("first","first",e);break;case t.ui.keyCode.END:this._move("last","last",e);break;case t.ui.keyCode.UP:this.previous(e);break;case t.ui.keyCode.DOWN:this.next(e);break;case t.ui.keyCode.LEFT:this.collapse(e);break;case t.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(e);break;case t.ui.keyCode.ENTER:case t.ui.keyCode.SPACE:this._activate(e);break;case t.ui.keyCode.ESCAPE:this.collapse(e);break;default:a=!1,s=this.previousFilter||"",o=!1,n=e.keyCode>=96&&105>=e.keyCode?""+(e.keyCode-96):String.fromCharCode(e.keyCode),clearTimeout(this.filterTimer),n===s?o=!0:n=s+n,i=this._filterMenuItems(n),i=o&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(e.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(e,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}a&&e.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var e,i,s,n,o,a=this,r=this.options.icons.submenu,h=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),s=h.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var e=t(this),i=e.prev(),s=t("").data("ui-menu-submenu-caret",!0);a._addClass(s,"ui-menu-icon","ui-icon "+r),i.attr("aria-haspopup","true").prepend(s),e.attr("aria-labelledby",i.attr("id"))}),this._addClass(s,"ui-menu","ui-widget ui-widget-content ui-front"),e=h.add(this.element),i=e.find(this.options.items),i.not(".ui-menu-item").each(function(){var e=t(this);a._isDivider(e)&&a._addClass(e,"ui-menu-divider","ui-widget-content")}),n=i.not(".ui-menu-item, .ui-menu-divider"),o=n.children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(n,"ui-menu-item")._addClass(o,"ui-menu-item-wrapper"),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!t.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){if("icons"===t){var i=this.element.find(".ui-menu-icon");this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)}this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t+""),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i,s,n;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),s=this.active.children(".ui-menu-item-wrapper"),this._addClass(s,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),n=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(n,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=e.children(".ui-menu"),i.length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(e){var i,s,n,o,a,r;this._hasScroll()&&(i=parseFloat(t.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(t.css(this.activeMenu[0],"paddingTop"))||0,n=e.offset().top-this.activeMenu.offset().top-i-s,o=this.activeMenu.scrollTop(),a=this.activeMenu.height(),r=e.outerHeight(),0>n?this.activeMenu.scrollTop(o+n):n+r>a&&this.activeMenu.scrollTop(o+n-a+r))},blur:function(t,e){e||clearTimeout(this.timer),this.active&&(this._removeClass(this.active.children(".ui-menu-item-wrapper"),null,"ui-state-active"),this._trigger("blur",t,{item:this.active}),this.active=null)},_startOpening:function(t){clearTimeout(this.timer),"true"===t.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(t)},this.delay))},_open:function(e){var i=t.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(e.parents(".ui-menu")).hide().attr("aria-hidden","true"),e.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(e,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:t(e&&e.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(e),this._removeClass(s.find(".ui-state-active"),null,"ui-state-active"),this.activeMenu=s},this.delay)},_close:function(t){t||(t=this.active?this.active.parent():this.element),t.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false")},_closeOnDocumentClick:function(e){return!t(e.target).closest(".ui-menu").length},_isDivider:function(t){return!/[^\-\u2014\u2013\s]/.test(t.text())},collapse:function(t){var e=this.active&&this.active.parent().closest(".ui-menu-item",this.element);e&&e.length&&(this._close(),this.focus(t,e))},expand:function(t){var e=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();e&&e.length&&(this._open(e.parent()),this._delay(function(){this.focus(t,e)}))},next:function(t){this._move("next","first",t)},previous:function(t){this._move("prev","last",t)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(t,e,i){var s;this.active&&(s="first"===t||"last"===t?this.active["first"===t?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[t+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[e]()),this.focus(i,s)},nextPage:function(e){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=t(this),0>i.offset().top-s-n}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(e),void 0)},previousPage:function(e){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=t(this),i.offset().top-s+n>0}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items).first())),void 0):(this.next(e),void 0)},_hasScroll:function(){return this.element.outerHeight()",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var e,i,s,n=this.element[0].nodeName.toLowerCase(),o="textarea"===n,a="input"===n; +this.isMultiLine=o||!a&&this._isContentEditable(this.element),this.valueMethod=this.element[o||a?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return e=!0,s=!0,i=!0,void 0;e=!1,s=!1,i=!1;var o=t.ui.keyCode;switch(n.keyCode){case o.PAGE_UP:e=!0,this._move("previousPage",n);break;case o.PAGE_DOWN:e=!0,this._move("nextPage",n);break;case o.UP:e=!0,this._keyEvent("previous",n);break;case o.DOWN:e=!0,this._keyEvent("next",n);break;case o.ENTER:this.menu.active&&(e=!0,n.preventDefault(),this.menu.select(n));break;case o.TAB:this.menu.active&&this.menu.select(n);break;case o.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(e)return e=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=t.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(t){return s?(s=!1,t.preventDefault(),void 0):(this._searchTimeout(t),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(t),this._change(t),void 0)}}),this._initSource(),this.menu=t("