Changed \"" +
- StringEscapeUtils.escapeHtml(param) + "\" from \"" +
- StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) +
+ StringEscapeUtils.escapeHtml4(param) + "\" from \"" +
+ StringEscapeUtils.escapeHtml4(oldConf.getRaw(param)) +
"\" to default
");
reconf.reconfigureProperty(param, null);
} else if (!value.equals("default") && !value.equals("null") &&
@@ -168,16 +168,16 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
// change from default or value to different value
if (oldConf.getRaw(param) == null) {
out.println("
Changed \"" +
- StringEscapeUtils.escapeHtml(param) +
+ StringEscapeUtils.escapeHtml4(param) +
"\" from default to \"" +
- StringEscapeUtils.escapeHtml(value) + "\"
\"" + StringEscapeUtils.escapeHtml4(param) +
"\" not changed because value has changed from \"" +
- StringEscapeUtils.escapeHtml(value) + "\" to \"" +
- StringEscapeUtils.escapeHtml(newConf.getRaw(param)) +
+ StringEscapeUtils.escapeHtml4(value) + "\" to \"" +
+ StringEscapeUtils.escapeHtml4(newConf.getRaw(param)) +
"\" since approval
");
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index 050540b4cb..286312ce5e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -33,8 +33,8 @@
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
index 9fdf242fd5..fa84c47d26 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
@@ -27,7 +27,7 @@
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 08787a51bd..7b4607507b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -32,7 +32,9 @@
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -40,6 +42,7 @@
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.JsonSerialization;
import org.apache.hadoop.util.KMSUtil;
import org.apache.http.client.utils.URIBuilder;
import org.slf4j.Logger;
@@ -77,7 +80,6 @@
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
@@ -130,9 +132,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
private final ValueQueue encKeyVersionQueue;
- private static final ObjectWriter WRITER =
- new ObjectMapper().writerWithDefaultPrettyPrinter();
-
private final Text dtService;
// Allow fallback to default kms server port 9600 for certain tests that do
@@ -235,7 +234,7 @@ public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
private static void writeJson(Object obj, OutputStream os)
throws IOException {
Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
- WRITER.writeValue(writer, obj);
+ JsonSerialization.writer().writeValue(writer, obj);
}
/**
@@ -543,7 +542,9 @@ private T call(HttpURLConnection conn, Object jsonOutput,
String requestMethod = conn.getRequestMethod();
URL url = conn.getURL();
conn = createConnection(url, requestMethod);
- conn.setRequestProperty(CONTENT_TYPE, contentType);
+ if (contentType != null && !contentType.isEmpty()) {
+ conn.setRequestProperty(CONTENT_TYPE, contentType);
+ }
return call(conn, jsonOutput, expectedResponse, klass,
authRetryCount - 1);
}
@@ -1087,8 +1088,7 @@ private UserGroupInformation getActualUgi() throws IOException {
actualUgi = currentUgi.getRealUser();
}
if (UserGroupInformation.isSecurityEnabled() &&
- !containsKmsDt(actualUgi) &&
- !actualUgi.hasKerberosCredentials()) {
+ !containsKmsDt(actualUgi) && !actualUgi.shouldRelogin()) {
// Use login user is only necessary when Kerberos is enabled
// but the actual user does not have either
// Kerberos credential or KMS delegation token for KMS operations
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
new file mode 100644
index 0000000000..e1336b8085
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Byte array backed part handle.
+ */
+public final class BBPartHandle implements PartHandle {
+
+ private static final long serialVersionUID = 0x23ce3eb1;
+
+ private final byte[] bytes;
+
+ private BBPartHandle(ByteBuffer byteBuffer){
+ this.bytes = byteBuffer.array();
+ }
+
+ public static PartHandle from(ByteBuffer byteBuffer) {
+ return new BBPartHandle(byteBuffer);
+ }
+
+ @Override
+ public ByteBuffer bytes() {
+ return ByteBuffer.wrap(bytes);
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(bytes);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof PartHandle)) {
+ return false;
+
+ }
+ PartHandle o = (PartHandle) other;
+ return bytes().equals(o.bytes());
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
new file mode 100644
index 0000000000..6430c145e2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Byte array backed upload handle.
+ */
+public final class BBUploadHandle implements UploadHandle {
+
+ private static final long serialVersionUID = 0x69d5509b;
+
+ private final byte[] bytes;
+
+ private BBUploadHandle(ByteBuffer byteBuffer){
+ this.bytes = byteBuffer.array();
+ }
+
+ public static UploadHandle from(ByteBuffer byteBuffer) {
+ return new BBUploadHandle(byteBuffer);
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(bytes);
+ }
+
+ @Override
+ public ByteBuffer bytes() {
+ return ByteBuffer.wrap(bytes);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof UploadHandle)) {
+ return false;
+ }
+ UploadHandle o = (UploadHandle) other;
+ return bytes().equals(o.bytes());
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 9e0ba20c28..c7f32f92a6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -542,7 +542,7 @@ public class CommonConfigurationKeysPublic {
*
* core-default.xml
*/
- public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
+ public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY =
"hadoop.security.groups.shell.command.timeout";
/**
* @see
@@ -550,7 +550,7 @@ public class CommonConfigurationKeysPublic {
* core-default.xml
*/
public static final long
- HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
+ HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT =
0L;
/**
* @see
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 86c284a9e8..d43129388b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -115,6 +115,27 @@ public abstract class FSDataOutputStreamBuilder
*/
protected abstract B getThisBuilder();
+ /**
+ * Construct from a {@link FileContext}.
+ *
+ * @param fc FileContext
+ * @param p path.
+ * @throws IOException
+ */
+ FSDataOutputStreamBuilder(@Nonnull FileContext fc,
+ @Nonnull Path p) throws IOException {
+ Preconditions.checkNotNull(fc);
+ Preconditions.checkNotNull(p);
+ this.fs = null;
+ this.path = p;
+
+ AbstractFileSystem afs = fc.getFSofPath(p);
+ FsServerDefaults defaults = afs.getServerDefaults(p);
+ bufferSize = defaults.getFileBufferSize();
+ replication = defaults.getReplication();
+ blockSize = defaults.getBlockSize();
+ }
+
/**
* Constructor.
*/
@@ -131,6 +152,7 @@ protected FSDataOutputStreamBuilder(@Nonnull FileSystem fileSystem,
}
protected FileSystem getFS() {
+ Preconditions.checkNotNull(fs);
return fs;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 6ea69d01b1..5215c3cdee 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -24,6 +24,7 @@
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet;
@@ -35,6 +36,8 @@
import java.util.TreeSet;
import java.util.Map.Entry;
+import javax.annotation.Nonnull;
+
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -694,6 +697,69 @@ public FSDataOutputStream next(final AbstractFileSystem fs, final Path p)
}.resolve(this, absF);
}
+ /**
+ * {@link FSDataOutputStreamBuilder} for {@liink FileContext}.
+ */
+ private static final class FCDataOutputStreamBuilder extends
+ FSDataOutputStreamBuilder<
+ FSDataOutputStream, FCDataOutputStreamBuilder> {
+ private final FileContext fc;
+
+ private FCDataOutputStreamBuilder(
+ @Nonnull FileContext fc, @Nonnull Path p) throws IOException {
+ super(fc, p);
+ this.fc = fc;
+ Preconditions.checkNotNull(fc);
+ }
+
+ @Override
+ protected FCDataOutputStreamBuilder getThisBuilder() {
+ return this;
+ }
+
+ @Override
+ public FSDataOutputStream build() throws IOException {
+ final EnumSet flags = getFlags();
+ List createOpts = new ArrayList<>(Arrays.asList(
+ CreateOpts.blockSize(getBlockSize()),
+ CreateOpts.bufferSize(getBufferSize()),
+ CreateOpts.repFac(getReplication()),
+ CreateOpts.perms(getPermission())
+ ));
+ if (getChecksumOpt() != null) {
+ createOpts.add(CreateOpts.checksumParam(getChecksumOpt()));
+ }
+ if (getProgress() != null) {
+ createOpts.add(CreateOpts.progress(getProgress()));
+ }
+ if (isRecursive()) {
+ createOpts.add(CreateOpts.createParent());
+ }
+ return fc.create(getPath(), flags,
+ createOpts.toArray(new CreateOpts[0]));
+ }
+ }
+
+ /**
+ * Create a {@link FSDataOutputStreamBuilder} for creating or overwriting
+ * a file on indicated path.
+ *
+ * @param f the file path to create builder for.
+ * @return {@link FSDataOutputStreamBuilder} to build a
+ * {@link FSDataOutputStream}.
+ *
+ * Upon {@link FSDataOutputStreamBuilder#build()} being invoked,
+ * builder parameters will be verified by {@link FileContext} and
+ * {@link AbstractFileSystem#create}. And filesystem states will be modified.
+ *
+ * Client should expect {@link FSDataOutputStreamBuilder#build()} throw the
+ * same exceptions as create(Path, EnumSet, CreateOpts...).
+ */
+ public FSDataOutputStreamBuilder create(final Path f)
+ throws IOException {
+ return new FCDataOutputStreamBuilder(this, f).create();
+ }
+
/**
* Make(create) a directory and all the non-existent parents.
*
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
new file mode 100644
index 0000000000..b57ff3dc3a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import com.google.common.base.Charsets;
+import org.apache.commons.compress.utils.IOUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * A MultipartUploader that uses the basic FileSystem commands.
+ * This is done in three stages:
+ * Init - create a temp _multipart directory.
+ * PutPart - copying the individual parts of the file to the temp directory.
+ * Complete - use {@link FileSystem#concat} to merge the files; and then delete
+ * the temp directory.
+ */
+public class FileSystemMultipartUploader extends MultipartUploader {
+
+ private final FileSystem fs;
+
+ public FileSystemMultipartUploader(FileSystem fs) {
+ this.fs = fs;
+ }
+
+ @Override
+ public UploadHandle initialize(Path filePath) throws IOException {
+ Path collectorPath = createCollectorPath(filePath);
+ fs.mkdirs(collectorPath, FsPermission.getDirDefault());
+
+ ByteBuffer byteBuffer = ByteBuffer.wrap(
+ collectorPath.toString().getBytes(Charsets.UTF_8));
+ return BBUploadHandle.from(byteBuffer);
+ }
+
+ @Override
+ public PartHandle putPart(Path filePath, InputStream inputStream,
+ int partNumber, UploadHandle uploadId, long lengthInBytes)
+ throws IOException {
+
+ byte[] uploadIdByteArray = uploadId.toByteArray();
+ Path collectorPath = new Path(new String(uploadIdByteArray, 0,
+ uploadIdByteArray.length, Charsets.UTF_8));
+ Path partPath =
+ Path.mergePaths(collectorPath, Path.mergePaths(new Path(Path.SEPARATOR),
+ new Path(Integer.toString(partNumber) + ".part")));
+ FSDataOutputStreamBuilder outputStream = fs.createFile(partPath);
+ FSDataOutputStream fsDataOutputStream = outputStream.build();
+ IOUtils.copy(inputStream, fsDataOutputStream, 4096);
+ fsDataOutputStream.close();
+ return BBPartHandle.from(ByteBuffer.wrap(
+ partPath.toString().getBytes(Charsets.UTF_8)));
+ }
+
+ private Path createCollectorPath(Path filePath) {
+ return Path.mergePaths(filePath.getParent(),
+ Path.mergePaths(new Path(filePath.getName().split("\\.")[0]),
+ Path.mergePaths(new Path("_multipart"),
+ new Path(Path.SEPARATOR))));
+ }
+
+ @Override
+ @SuppressWarnings("deprecation") // rename w/ OVERWRITE
+ public PathHandle complete(Path filePath,
+ List> handles, UploadHandle multipartUploadId)
+ throws IOException {
+ handles.sort(Comparator.comparing(Pair::getKey));
+ List partHandles = handles
+ .stream()
+ .map(pair -> {
+ byte[] byteArray = pair.getValue().toByteArray();
+ return new Path(new String(byteArray, 0, byteArray.length,
+ Charsets.UTF_8));
+ })
+ .collect(Collectors.toList());
+
+ Path collectorPath = createCollectorPath(filePath);
+ Path filePathInsideCollector = Path.mergePaths(collectorPath,
+ new Path(Path.SEPARATOR + filePath.getName()));
+ fs.create(filePathInsideCollector).close();
+ fs.concat(filePathInsideCollector,
+ partHandles.toArray(new Path[handles.size()]));
+ fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE);
+ fs.delete(collectorPath, true);
+ FileStatus status = fs.getFileStatus(filePath);
+ return fs.getPathHandle(status);
+ }
+
+ @Override
+ public void abort(Path filePath, UploadHandle uploadId) throws IOException {
+ byte[] uploadIdByteArray = uploadId.toByteArray();
+ Path collectorPath = new Path(new String(uploadIdByteArray, 0,
+ uploadIdByteArray.length, Charsets.UTF_8));
+ fs.delete(collectorPath, true);
+ }
+
+ /**
+ * Factory for creating MultipartUploaderFactory objects for file://
+ * filesystems.
+ */
+ public static class Factory extends MultipartUploaderFactory {
+ protected MultipartUploader createMultipartUploader(FileSystem fs,
+ Configuration conf) {
+ if (fs.getScheme().equals("file")) {
+ return new FileSystemMultipartUploader(fs);
+ }
+ return null;
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 94d3389408..5be6e5f829 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -23,7 +23,6 @@
import java.util.Arrays;
import java.util.LinkedList;
-import org.apache.commons.lang.WordUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -275,7 +274,7 @@ private void printInstanceHelp(PrintStream out, Command instance) {
listing = null;
}
- for (String descLine : WordUtils.wrap(
+ for (String descLine : StringUtils.wrap(
line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
out.println(prefix + descLine);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java
new file mode 100644
index 0000000000..a6b37b32bb
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Objects;
+import java.util.Optional;
+
+/**
+ * Opaque handle to an entity in a FileSystem.
+ */
+public class LocalFileSystemPathHandle implements PathHandle {
+
+ private final String path;
+ private final Long mtime;
+
+ public LocalFileSystemPathHandle(String path, Optional mtime) {
+ this.path = path;
+ this.mtime = mtime.orElse(null);
+ }
+
+ public LocalFileSystemPathHandle(ByteBuffer bytes) throws IOException {
+ if (null == bytes) {
+ throw new IOException("Missing PathHandle");
+ }
+ LocalFileSystemPathHandleProto p =
+ LocalFileSystemPathHandleProto.parseFrom(ByteString.copyFrom(bytes));
+ path = p.hasPath() ? p.getPath() : null;
+ mtime = p.hasMtime() ? p.getMtime() : null;
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ public void verify(FileStatus stat) throws InvalidPathHandleException {
+ if (null == stat) {
+ throw new InvalidPathHandleException("Could not resolve handle");
+ }
+ if (mtime != null && mtime != stat.getModificationTime()) {
+ throw new InvalidPathHandleException("Content changed");
+ }
+ }
+
+ @Override
+ public ByteBuffer bytes() {
+ LocalFileSystemPathHandleProto.Builder b =
+ LocalFileSystemPathHandleProto.newBuilder();
+ b.setPath(path);
+ if (mtime != null) {
+ b.setMtime(mtime);
+ }
+ return b.build().toByteString().asReadOnlyByteBuffer();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ LocalFileSystemPathHandle that = (LocalFileSystemPathHandle) o;
+ return Objects.equals(path, that.path) &&
+ Objects.equals(mtime, that.mtime);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(path, mtime);
+ }
+
+ @Override
+ public String toString() {
+ return "LocalFileSystemPathHandle{" +
+ "path='" + path + '\'' +
+ ", mtime=" + mtime +
+ '}';
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
new file mode 100644
index 0000000000..24a92169a2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+
+import org.apache.commons.lang3.tuple.Pair;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * MultipartUploader is an interface for copying files multipart and across
+ * multiple nodes. Users should:
+ * 1. Initialize an upload
+ * 2. Upload parts in any order
+ * 3. Complete the upload in order to have it materialize in the destination FS.
+ *
+ * Implementers should make sure that the complete function should make sure
+ * that 'complete' will reorder parts if the destination FS doesn't already
+ * do it for them.
+ */
+public abstract class MultipartUploader {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(MultipartUploader.class);
+
+ /**
+ * Initialize a multipart upload.
+ * @param filePath Target path for upload.
+ * @return unique identifier associating part uploads.
+ * @throws IOException
+ */
+ public abstract UploadHandle initialize(Path filePath) throws IOException;
+
+ /**
+ * Put part as part of a multipart upload. It should be possible to have
+ * parts uploaded in any order (or in parallel).
+ * @param filePath Target path for upload (same as {@link #initialize(Path)}).
+ * @param inputStream Data for this part.
+ * @param partNumber Index of the part relative to others.
+ * @param uploadId Identifier from {@link #initialize(Path)}.
+ * @param lengthInBytes Target length to read from the stream.
+ * @return unique PartHandle identifier for the uploaded part.
+ * @throws IOException
+ */
+ public abstract PartHandle putPart(Path filePath, InputStream inputStream,
+ int partNumber, UploadHandle uploadId, long lengthInBytes)
+ throws IOException;
+
+ /**
+ * Complete a multipart upload.
+ * @param filePath Target path for upload (same as {@link #initialize(Path)}.
+ * @param handles Identifiers with associated part numbers from
+ * {@link #putPart(Path, InputStream, int, UploadHandle, long)}.
+ * Depending on the backend, the list order may be significant.
+ * @param multipartUploadId Identifier from {@link #initialize(Path)}.
+ * @return unique PathHandle identifier for the uploaded file.
+ * @throws IOException
+ */
+ public abstract PathHandle complete(Path filePath,
+ List> handles, UploadHandle multipartUploadId)
+ throws IOException;
+
+ /**
+ * Aborts a multipart upload.
+ * @param filePath Target path for upload (same as {@link #initialize(Path)}.
+ * @param multipartuploadId Identifier from {@link #initialize(Path)}.
+ * @throws IOException
+ */
+ public abstract void abort(Path filePath, UploadHandle multipartuploadId)
+ throws IOException;
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java
new file mode 100644
index 0000000000..b0fa798ee2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.ServiceLoader;
+
+/**
+ * {@link ServiceLoader}-driven uploader API for storage services supporting
+ * multipart uploads.
+ */
+public abstract class MultipartUploaderFactory {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(MultipartUploaderFactory.class);
+
+ /**
+ * Multipart Uploaders listed as services.
+ */
+ private static ServiceLoader serviceLoader =
+ ServiceLoader.load(MultipartUploaderFactory.class,
+ MultipartUploaderFactory.class.getClassLoader());
+
+ // Iterate through the serviceLoader to avoid lazy loading.
+ // Lazy loading would require synchronization in concurrent use cases.
+ static {
+ Iterator iterServices = serviceLoader.iterator();
+ while (iterServices.hasNext()) {
+ iterServices.next();
+ }
+ }
+
+ public static MultipartUploader get(FileSystem fs, Configuration conf)
+ throws IOException {
+ MultipartUploader mpu = null;
+ for (MultipartUploaderFactory factory : serviceLoader) {
+ mpu = factory.createMultipartUploader(fs, conf);
+ if (mpu != null) {
+ break;
+ }
+ }
+ return mpu;
+ }
+
+ protected abstract MultipartUploader createMultipartUploader(FileSystem fs,
+ Configuration conf) throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index 126e754731..5e932864c8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -55,6 +55,9 @@ public static ChecksumParam checksumParam(
ChecksumOpt csumOpt) {
return new ChecksumParam(csumOpt);
}
+ public static Progress progress(Progressable prog) {
+ return new Progress(prog);
+ }
public static Perms perms(FsPermission perm) {
return new Perms(perm);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java
new file mode 100644
index 0000000000..df70b746cc
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+
+/**
+ * Opaque, serializable reference to an part id for multipart uploads.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface PartHandle extends Serializable {
+ /**
+ * @return Serialized from in bytes.
+ */
+ default byte[] toByteArray() {
+ ByteBuffer bb = bytes();
+ byte[] ret = new byte[bb.remaining()];
+ bb.get(ret);
+ return ret;
+ }
+
+ ByteBuffer bytes();
+
+ @Override
+ boolean equals(Object other);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 252b3cca79..b6244d6a36 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -27,7 +27,7 @@
import java.util.regex.Pattern;
import org.apache.avro.reflect.Stringable;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index c0f81997b8..bd003ae90a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -40,6 +40,7 @@
import java.nio.file.attribute.FileTime;
import java.util.Arrays;
import java.util.EnumSet;
+import java.util.Optional;
import java.util.StringTokenizer;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -212,7 +213,19 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return new FSDataInputStream(new BufferedFSInputStream(
new LocalFSFileInputStream(f), bufferSize));
}
-
+
+ @Override
+ public FSDataInputStream open(PathHandle fd, int bufferSize)
+ throws IOException {
+ if (!(fd instanceof LocalFileSystemPathHandle)) {
+ fd = new LocalFileSystemPathHandle(fd.bytes());
+ }
+ LocalFileSystemPathHandle id = (LocalFileSystemPathHandle) fd;
+ id.verify(getFileStatus(new Path(id.getPath())));
+ return new FSDataInputStream(new BufferedFSInputStream(
+ new LocalFSFileInputStream(new Path(id.getPath())), bufferSize));
+ }
+
/*********************************************************
* For create()'s FSOutputStream.
*********************************************************/
@@ -246,7 +259,7 @@ private LocalFSFileOutputStream(Path f, boolean append,
}
}
}
-
+
/*
* Just forward to the fos
*/
@@ -350,6 +363,18 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
return out;
}
+ @Override
+ public void concat(final Path trg, final Path [] psrcs) throws IOException {
+ final int bufferSize = 4096;
+ try(FSDataOutputStream out = create(trg)) {
+ for (Path src : psrcs) {
+ try(FSDataInputStream in = open(src)) {
+ IOUtils.copyBytes(in, out, bufferSize, false);
+ }
+ }
+ }
+ }
+
@Override
public boolean rename(Path src, Path dst) throws IOException {
// Attempt rename using Java API.
@@ -863,6 +888,38 @@ public void setTimes(Path p, long mtime, long atime) throws IOException {
}
}
+ /**
+ * Hook to implement support for {@link PathHandle} operations.
+ * @param stat Referent in the target FileSystem
+ * @param opts Constraints that determine the validity of the
+ * {@link PathHandle} reference.
+ */
+ protected PathHandle createPathHandle(FileStatus stat,
+ Options.HandleOpt... opts) {
+ if (stat.isDirectory() || stat.isSymlink()) {
+ throw new IllegalArgumentException("PathHandle only available for files");
+ }
+ String authority = stat.getPath().toUri().getAuthority();
+ if (authority != null && !authority.equals("file://")) {
+ throw new IllegalArgumentException("Wrong FileSystem: " + stat.getPath());
+ }
+ Options.HandleOpt.Data data =
+ Options.HandleOpt.getOpt(Options.HandleOpt.Data.class, opts)
+ .orElse(Options.HandleOpt.changed(false));
+ Options.HandleOpt.Location loc =
+ Options.HandleOpt.getOpt(Options.HandleOpt.Location.class, opts)
+ .orElse(Options.HandleOpt.moved(false));
+ if (loc.allowChange()) {
+ throw new UnsupportedOperationException("Tracking file movement in " +
+ "basic FileSystem is not supported");
+ }
+ final Path p = stat.getPath();
+ final Optional mtime = !data.allowChange()
+ ? Optional.of(stat.getModificationTime())
+ : Optional.empty();
+ return new LocalFileSystemPathHandle(p.toString(), mtime);
+ }
+
@Override
public boolean supportsSymlinks() {
return true;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java
new file mode 100644
index 0000000000..5606a80dec
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedMultipartUploaderException.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * MultipartUploader for a given file system name/scheme is not supported.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class UnsupportedMultipartUploaderException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Constructs exception with the specified detail message.
+ *
+ * @param message exception message.
+ */
+ public UnsupportedMultipartUploaderException(final String message) {
+ super(message);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java
new file mode 100644
index 0000000000..143b4d1584
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UploadHandle.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+
+/**
+ * Opaque, serializable reference to an uploadId for multipart uploads.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface UploadHandle extends Serializable {
+
+ /**
+ * @return Serialized from in bytes.
+ */
+ default byte[] toByteArray() {
+ ByteBuffer bb = bytes();
+ byte[] ret = new byte[bb.remaining()];
+ bb.get(ret);
+ return ret;
+ }
+
+ ByteBuffer bytes();
+
+ @Override
+ boolean equals(Object other);
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
index 8f6fc4d570..011e489df2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
@@ -23,7 +23,7 @@
import java.util.LinkedList;
import java.util.List;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
index 9dd7771fd5..bbedf2a2dc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.io;
import com.google.common.collect.ComparisonChain;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import java.nio.ByteBuffer;
import java.util.Map;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
index a2903f89b9..5af6602b87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
@@ -26,7 +26,6 @@
import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
-import com.google.common.primitives.Longs;
import com.google.common.primitives.UnsignedBytes;
/**
@@ -195,52 +194,43 @@ public int compareTo(byte[] buffer1, int offset1, int length1,
length1 == length2) {
return 0;
}
+ final int stride = 8;
int minLength = Math.min(length1, length2);
- int minWords = minLength / Longs.BYTES;
+ int strideLimit = minLength & ~(stride - 1);
int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
+ int i;
/*
* Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a
* time is no slower than comparing 4 bytes at a time even on 32-bit.
* On the other hand, it is substantially faster on 64-bit.
*/
- for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) {
+ for (i = 0; i < strideLimit; i += stride) {
long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i);
long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i);
- long diff = lw ^ rw;
- if (diff != 0) {
+ if (lw != rw) {
if (!littleEndian) {
return lessThanUnsigned(lw, rw) ? -1 : 1;
}
- // Use binary search
- int n = 0;
- int y;
- int x = (int) diff;
- if (x == 0) {
- x = (int) (diff >>> 32);
- n = 32;
- }
-
- y = x << 16;
- if (y == 0) {
- n += 16;
- } else {
- x = y;
- }
-
- y = x << 8;
- if (y == 0) {
- n += 8;
- }
- return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
+ /*
+ * We want to compare only the first index where left[index] !=
+ * right[index]. This corresponds to the least significant nonzero
+ * byte in lw ^ rw, since lw and rw are little-endian.
+ * Long.numberOfTrailingZeros(diff) tells us the least significant
+ * nonzero bit, and zeroing out the first three bits of L.nTZ gives
+ * us the shift to get that least significant nonzero byte. This
+ * comparison logic is based on UnsignedBytes from Guava v21
+ */
+ int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
+ return ((int) ((lw >>> n) & 0xFF)) - ((int) ((rw >>> n) & 0xFF));
}
}
// The epilogue to cover the last (minLength % 8) elements.
- for (int i = minWords * Longs.BYTES; i < minLength; i++) {
+ for (; i < minLength; i++) {
int result = UnsignedBytes.compare(
buffer1[offset1 + i],
buffer2[offset2 + i]);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index f008e24d2f..0f95058afc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -22,8 +22,8 @@
import java.util.HashMap;
import java.util.Map;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
index 3d6867aec4..ec317eee4d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
@@ -46,7 +46,7 @@ public final class ErasureCodeNative {
loadLibrary();
} catch (Throwable t) {
problem = "Loading ISA-L failed: " + t.getMessage();
- LOG.error("Loading ISA-L failed", t);
+ LOG.warn(problem);
}
LOADING_FAILURE_REASON = problem;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
index 412634462a..4d820c271a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
@@ -21,7 +21,7 @@
import java.io.IOException;
import java.io.FileDescriptor;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index bdfa471f53..b156d1fe64 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.ipc;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index 0a00ca73d9..f12ecb6462 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -39,7 +39,7 @@
import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AtomicDoubleArray;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.metrics2.MetricsCollector;
@@ -429,7 +429,7 @@ private void decayCurrentCounts() {
updateAverageResponseTime(true);
} catch (Exception ex) {
LOG.error("decayCurrentCounts exception: " +
- ExceptionUtils.getFullStackTrace(ex));
+ ExceptionUtils.getStackTrace(ex));
throw ex;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index 6d9ea3e72e..3a8c83dea7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -32,7 +32,7 @@
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
import org.apache.hadoop.metrics2.util.MBeans;
@@ -286,7 +286,7 @@ public int size() {
*/
@Override
public Iterator iterator() {
- throw new NotImplementedException();
+ throw new NotImplementedException("Code is not implemented");
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java
index ce6fbe1d82..1d62c0a29f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index 027450cb65..976f16bedd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -37,10 +37,8 @@
import org.apache.commons.configuration2.Configuration;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.configuration2.builder.fluent.Configurations;
-import org.apache.commons.configuration2.builder.fluent.Parameters;
-import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
import org.apache.commons.configuration2.ex.ConfigurationException;
+import org.apache.commons.configuration2.io.FileHandler;
import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsPlugin;
import org.apache.hadoop.metrics2.filter.GlobFilter;
@@ -112,12 +110,11 @@ static MetricsConfig create(String prefix, String... fileNames) {
static MetricsConfig loadFirst(String prefix, String... fileNames) {
for (String fname : fileNames) {
try {
- Configuration cf = new Configurations().propertiesBuilder(fname)
- .configure(new Parameters().properties()
- .setFileName(fname)
- .setListDelimiterHandler(new DefaultListDelimiterHandler(',')))
- .getConfiguration()
- .interpolatedConfiguration();
+ PropertiesConfiguration pcf = new PropertiesConfiguration();
+ FileHandler fh = new FileHandler(pcf);
+ fh.setFileName(fname);
+ fh.load();
+ Configuration cf = pcf.interpolatedConfiguration();
LOG.info("Loaded properties from {}", fname);
if (LOG.isDebugEnabled()) {
LOG.debug("Properties: {}", toString(cf));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
index 3d7a90e7ee..9b54adcb43 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
@@ -21,7 +21,7 @@
import java.lang.reflect.Method;
import static com.google.common.base.Preconditions.*;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
index b2042e7a12..a3ca98d040 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
@@ -21,7 +21,7 @@
import java.lang.reflect.Field;
import java.lang.reflect.Method;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsException;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
index cc32975513..6b30618475 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
@@ -26,7 +26,7 @@
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
index 053cb5535c..22c288a3b1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
@@ -32,7 +32,7 @@
import java.util.function.Function;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index 92fe3d1496..5ef31785a6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
index 0f6e9a9172..92ac9529be 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
@@ -37,7 +37,7 @@
import java.util.regex.Pattern;
import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
index ead9a7430b..45759df6ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
@@ -28,7 +28,7 @@
import java.util.List;
import java.util.Map;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
index ac118c0517..9693220438 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
@@ -28,7 +28,7 @@
import java.nio.channels.ReadableByteChannel;
import java.nio.ByteBuffer;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.CloseableReferenceCount;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index c7af97f60a..e36399ff96 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -32,7 +32,7 @@
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.annotations.VisibleForTesting;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 94698d8446..31f4398055 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.security;
import java.io.IOException;
-import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.StringTokenizer;
@@ -26,7 +25,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -52,7 +51,8 @@ public class ShellBasedUnixGroupsMapping extends Configured
protected static final Logger LOG =
LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
- private long timeout = 0L;
+ private long timeout = CommonConfigurationKeys.
+ HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT;
private static final List EMPTY_GROUPS = new LinkedList<>();
@Override
@@ -61,10 +61,10 @@ public void setConf(Configuration conf) {
if (conf != null) {
timeout = conf.getTimeDuration(
CommonConfigurationKeys.
- HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
+ HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
CommonConfigurationKeys.
- HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT,
- TimeUnit.SECONDS);
+ HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT,
+ TimeUnit.MILLISECONDS);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 3872810748..29b9fea424 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -831,7 +831,9 @@ private long getRefreshTime(KerberosTicket tgt) {
return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
}
- private boolean shouldRelogin() {
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public boolean shouldRelogin() {
return hasKerberosCredentials() && isHadoopLogin();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index 608512155b..0a00d79104 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -27,7 +27,7 @@
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.CommandShell;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
index 34d9fe2b70..02c168f7b6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
@@ -34,7 +34,7 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import java.util.stream.Collectors;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
index d36ad9bf67..f154f2d816 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
@@ -26,7 +26,7 @@
import java.util.Date;
import java.util.ServiceLoader;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 617773b34d..0ae2af35bf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.security.token.delegation.web;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.net.NetUtils;
@@ -31,6 +29,7 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.JsonSerialization;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -56,9 +55,6 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
private static final String CONTENT_TYPE = "Content-Type";
private static final String APPLICATION_JSON_MIME = "application/json";
- private static final ObjectReader READER =
- new ObjectMapper().readerFor(Map.class);
-
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
@@ -328,7 +324,7 @@ private Map doDelegationTokenOperation(URL url,
if (contentType != null &&
contentType.contains(APPLICATION_JSON_MIME)) {
try {
- ret = READER.readValue(conn.getInputStream());
+ ret = JsonSerialization.mapReader().readValue(conn.getInputStream());
} catch (Exception ex) {
throw new AuthenticationException(String.format(
"'%s' did not handle the '%s' delegation token operation: %s",
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
index bc2e2d49d7..85015fbe30 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
@@ -20,8 +20,7 @@
import java.util.ArrayList;
import java.util.LinkedList;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.WordUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
/**
@@ -103,7 +102,8 @@ String[] getRow(int idx) {
// Line-wrap if it's too long
String[] lines = new String[] {raw};
if (wrap) {
- lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n");
+ lines = org.apache.hadoop.util.StringUtils.wrap(lines[0], wrapWidth,
+ "\n", true).split("\n");
}
for (int i=0; i jsonResponse = new LinkedHashMap();
jsonResponse.put(ERROR_JSON, json);
Writer writer = response.getWriter();
- WRITER.writeValue(writer, jsonResponse);
+ JsonSerialization.writer().writeValue(writer, jsonResponse);
writer.flush();
}
@@ -150,7 +142,7 @@ public static void validateResponse(HttpURLConnection conn,
InputStream es = null;
try {
es = conn.getErrorStream();
- Map json = READER.readValue(es);
+ Map json = JsonSerialization.mapReader().readValue(es);
json = (Map) json.get(ERROR_JSON);
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
index 86c4df666e..cbc8560a40 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
@@ -25,14 +25,18 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.Map;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.google.common.base.Preconditions;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -65,6 +69,26 @@ public class JsonSerialization {
private final Class classType;
private final ObjectMapper mapper;
+ private static final ObjectWriter WRITER =
+ new ObjectMapper().writerWithDefaultPrettyPrinter();
+
+ private static final ObjectReader MAP_READER =
+ new ObjectMapper().readerFor(Map.class);
+
+ /**
+ * @return an ObjectWriter which pretty-prints its output
+ */
+ public static ObjectWriter writer() {
+ return WRITER;
+ }
+
+ /**
+ * @return an ObjectReader which returns simple Maps.
+ */
+ public static ObjectReader mapReader() {
+ return MAP_READER;
+ }
+
/**
* Create an instance bound to a specific type.
* @param classType class to marshall
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 0b76f0df2a..46a0fccd41 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -1191,7 +1191,7 @@ public ShellCommandExecutor(String[] execString, File dir,
/**
* Returns the timeout value set for the executor's sub-commands.
- * @return The timeout value in seconds
+ * @return The timeout value in milliseconds
*/
@VisibleForTesting
public long getTimeoutInterval() {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 33a2010d6f..3db805fa4f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -35,7 +35,7 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -987,7 +987,7 @@ public static String camelize(String s) {
String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR, '_');
for (String word : words)
- sb.append(org.apache.commons.lang.StringUtils.capitalize(word));
+ sb.append(org.apache.commons.lang3.StringUtils.capitalize(word));
return sb.toString();
}
@@ -1183,4 +1183,64 @@ public static boolean isAlpha(String str) {
return true;
}
+ /**
+ * Same as WordUtils#wrap in commons-lang 2.6. Unlike commons-lang3, leading
+ * spaces on the first line are NOT stripped.
+ *
+ * @param str the String to be word wrapped, may be null
+ * @param wrapLength the column to wrap the words at, less than 1 is treated
+ * as 1
+ * @param newLineStr the string to insert for a new line,
+ * null uses the system property line separator
+ * @param wrapLongWords true if long words (such as URLs) should be wrapped
+ * @return a line with newlines inserted, null if null input
+ */
+ public static String wrap(String str, int wrapLength, String newLineStr,
+ boolean wrapLongWords) {
+ if(str == null) {
+ return null;
+ } else {
+ if(newLineStr == null) {
+ newLineStr = System.lineSeparator();
+ }
+
+ if(wrapLength < 1) {
+ wrapLength = 1;
+ }
+
+ int inputLineLength = str.length();
+ int offset = 0;
+ StringBuffer wrappedLine = new StringBuffer(inputLineLength + 32);
+
+ while(inputLineLength - offset > wrapLength) {
+ if(str.charAt(offset) == 32) {
+ ++offset;
+ } else {
+ int spaceToWrapAt = str.lastIndexOf(32, wrapLength + offset);
+ if(spaceToWrapAt >= offset) {
+ wrappedLine.append(str.substring(offset, spaceToWrapAt));
+ wrappedLine.append(newLineStr);
+ offset = spaceToWrapAt + 1;
+ } else if(wrapLongWords) {
+ wrappedLine.append(str.substring(offset, wrapLength + offset));
+ wrappedLine.append(newLineStr);
+ offset += wrapLength;
+ } else {
+ spaceToWrapAt = str.indexOf(32, wrapLength + offset);
+ if(spaceToWrapAt >= 0) {
+ wrappedLine.append(str.substring(offset, spaceToWrapAt));
+ wrappedLine.append(newLineStr);
+ offset = spaceToWrapAt + 1;
+ } else {
+ wrappedLine.append(str.substring(offset));
+ offset = inputLineLength;
+ }
+ }
+ }
+ }
+
+ wrappedLine.append(str.substring(offset));
+ return wrappedLine.toString();
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
index 7fd19907fd..2c2aca3a6b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
@@ -216,6 +216,21 @@ private void readProcMemInfoFile() {
readProcMemInfoFile(false);
}
+ /**
+ *
+ * Wrapper for Long.parseLong() that returns zero if the value is
+ * invalid. Under some circumstances, swapFree in /proc/meminfo can
+ * go negative, reported as a very large decimal value.
+ */
+ private long safeParseLong(String strVal) {
+ long parsedVal;
+ try {
+ parsedVal = Long.parseLong(strVal);
+ } catch (NumberFormatException nfe) {
+ parsedVal = 0;
+ }
+ return parsedVal;
+ }
/**
* Read /proc/meminfo, parse and compute memory information.
* @param readAgain if false, read only on the first time
@@ -252,9 +267,9 @@ private void readProcMemInfoFile(boolean readAgain) {
} else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
swapSize = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(MEMFREE_STRING)) {
- ramSizeFree = Long.parseLong(mat.group(2));
+ ramSizeFree = safeParseLong(mat.group(2));
} else if (mat.group(1).equals(SWAPFREE_STRING)) {
- swapSizeFree = Long.parseLong(mat.group(2));
+ swapSizeFree = safeParseLong(mat.group(2));
} else if (mat.group(1).equals(INACTIVE_STRING)) {
inactiveSize = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(INACTIVEFILE_STRING)) {
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto
index 5b8c45d0ad..c3b768ab67 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto
@@ -68,3 +68,11 @@ message FileStatusProto {
optional bytes ec_data = 17;
optional uint32 flags = 18 [default = 0];
}
+
+/**
+ * Placeholder type for consistent basic FileSystem operations.
+ */
+message LocalFileSystemPathHandleProto {
+ optional uint64 mtime = 1;
+ optional string path = 2;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory
new file mode 100644
index 0000000000..f0054fedb8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.fs.FileSystemMultipartUploader$Factory
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index e865bf1d93..2361626c3f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -62,7 +62,7 @@
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 7f27d7d51e..152159b3f3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.conf;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
index 6c2e5b88bc..2ea45231a1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
@@ -20,7 +20,7 @@
import java.io.IOException;
import java.util.Arrays;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.junit.Assume;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java
new file mode 100644
index 0000000000..f132089a9e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/AbstractSystemMultipartUploaderTest.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.tuple.Pair;
+
+import org.junit.Test;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public abstract class AbstractSystemMultipartUploaderTest {
+
+ abstract FileSystem getFS() throws IOException;
+
+ abstract Path getBaseTestPath();
+
+ @Test
+ public void testMultipartUpload() throws Exception {
+ FileSystem fs = getFS();
+ Path file = new Path(getBaseTestPath(), "some-file");
+ MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
+ UploadHandle uploadHandle = mpu.initialize(file);
+ List> partHandles = new ArrayList<>();
+ StringBuilder sb = new StringBuilder();
+ for (int i = 1; i <= 100; ++i) {
+ String contents = "ThisIsPart" + i + "\n";
+ sb.append(contents);
+ int len = contents.getBytes().length;
+ InputStream is = IOUtils.toInputStream(contents, "UTF-8");
+ PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
+ partHandles.add(Pair.of(i, partHandle));
+ }
+ PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
+ byte[] fdData = IOUtils.toByteArray(fs.open(fd));
+ byte[] fileData = IOUtils.toByteArray(fs.open(file));
+ String readString = new String(fdData);
+ assertEquals(sb.toString(), readString);
+ assertArrayEquals(fdData, fileData);
+ }
+
+ @Test
+ public void testMultipartUploadReverseOrder() throws Exception {
+ FileSystem fs = getFS();
+ Path file = new Path(getBaseTestPath(), "some-file");
+ MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
+ UploadHandle uploadHandle = mpu.initialize(file);
+ List> partHandles = new ArrayList<>();
+ StringBuilder sb = new StringBuilder();
+ for (int i = 1; i <= 100; ++i) {
+ String contents = "ThisIsPart" + i + "\n";
+ sb.append(contents);
+ }
+ for (int i = 100; i > 0; --i) {
+ String contents = "ThisIsPart" + i + "\n";
+ int len = contents.getBytes().length;
+ InputStream is = IOUtils.toInputStream(contents, "UTF-8");
+ PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
+ partHandles.add(Pair.of(i, partHandle));
+ }
+ PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
+ byte[] fdData = IOUtils.toByteArray(fs.open(fd));
+ byte[] fileData = IOUtils.toByteArray(fs.open(file));
+ String readString = new String(fdData);
+ assertEquals(sb.toString(), readString);
+ assertArrayEquals(fdData, fileData);
+ }
+
+ @Test
+ public void testMultipartUploadReverseOrderNoNContiguousPartNumbers()
+ throws Exception {
+ FileSystem fs = getFS();
+ Path file = new Path(getBaseTestPath(), "some-file");
+ MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
+ UploadHandle uploadHandle = mpu.initialize(file);
+ List> partHandles = new ArrayList<>();
+ StringBuilder sb = new StringBuilder();
+ for (int i = 2; i <= 200; i += 2) {
+ String contents = "ThisIsPart" + i + "\n";
+ sb.append(contents);
+ }
+ for (int i = 200; i > 0; i -= 2) {
+ String contents = "ThisIsPart" + i + "\n";
+ int len = contents.getBytes().length;
+ InputStream is = IOUtils.toInputStream(contents, "UTF-8");
+ PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
+ partHandles.add(Pair.of(i, partHandle));
+ }
+ PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
+ byte[] fdData = IOUtils.toByteArray(fs.open(fd));
+ byte[] fileData = IOUtils.toByteArray(fs.open(file));
+ String readString = new String(fdData);
+ assertEquals(sb.toString(), readString);
+ assertArrayEquals(fdData, fileData);
+ }
+
+ @Test
+ public void testMultipartUploadAbort() throws Exception {
+ FileSystem fs = getFS();
+ Path file = new Path(getBaseTestPath(), "some-file");
+ MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
+ UploadHandle uploadHandle = mpu.initialize(file);
+ for (int i = 100; i >= 50; --i) {
+ String contents = "ThisIsPart" + i + "\n";
+ int len = contents.getBytes().length;
+ InputStream is = IOUtils.toInputStream(contents, "UTF-8");
+ PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
+ }
+ mpu.abort(file, uploadHandle);
+
+ String contents = "ThisIsPart49\n";
+ int len = contents.getBytes().length;
+ InputStream is = IOUtils.toInputStream(contents, "UTF-8");
+
+ try {
+ mpu.putPart(file, is, 49, uploadHandle, len);
+ fail("putPart should have thrown an exception");
+ } catch (IOException ok) {
+ // ignore
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
index da071050eb..8cbca8e815 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
@@ -19,7 +19,7 @@
import java.io.IOException;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.test.GenericTestUtils;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 35ec4ff6b6..62ecd9f13a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -810,7 +810,49 @@ public void testCreateFlagAppendCreateOverwrite() throws IOException {
fc.create(p, EnumSet.of(CREATE, APPEND, OVERWRITE));
Assert.fail("Excepted exception not thrown");
}
-
+
+ @Test
+ public void testBuilderCreateNonExistingFile() throws IOException {
+ Path p = getTestRootPath(fc, "test/testBuilderCreateNonExistingFile");
+ FSDataOutputStream out = fc.create(p).build();
+ writeData(fc, p, out, data, data.length);
+ }
+
+ @Test
+ public void testBuilderCreateExistingFile() throws IOException {
+ Path p = getTestRootPath(fc, "test/testBuilderCreateExistingFile");
+ createFile(p);
+ FSDataOutputStream out = fc.create(p).overwrite(true).build();
+ writeData(fc, p, out, data, data.length);
+ }
+
+ @Test
+ public void testBuilderCreateAppendNonExistingFile() throws IOException {
+ Path p = getTestRootPath(fc, "test/testBuilderCreateAppendNonExistingFile");
+ FSDataOutputStream out = fc.create(p).append().build();
+ writeData(fc, p, out, data, data.length);
+ }
+
+ @Test
+ public void testBuilderCreateAppendExistingFile() throws IOException {
+ Path p = getTestRootPath(fc, "test/testBuilderCreateAppendExistingFile");
+ createFile(p);
+ FSDataOutputStream out = fc.create(p).append().build();
+ writeData(fc, p, out, data, 2 * data.length);
+ }
+
+ @Test
+ public void testBuilderCreateRecursive() throws IOException {
+ Path p = getTestRootPath(fc, "test/parent/no/exist/file1");
+ try (FSDataOutputStream out = fc.create(p).build()) {
+ fail("Should throw FileNotFoundException on non-exist directory");
+ } catch (FileNotFoundException e) {
+ }
+
+ FSDataOutputStream out = fc.create(p).recursive().build();
+ writeData(fc, p, out, data, data.length);
+ }
+
private static void writeData(FileContext fc, Path p, FSDataOutputStream out,
byte[] data, long expectedLen) throws IOException {
out.write(data, 0, data.length);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
index 1f37f74e71..b5307a4e27 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
@@ -22,7 +22,6 @@
import java.io.FileNotFoundException;
import java.util.EnumSet;
-import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
import org.apache.hadoop.io.IOUtils;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
index 3def5d5388..6b9a34c3b3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.fs;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java
index 597eb93b58..fa682649a0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.fs;
-import org.apache.commons.lang.math.RandomUtils;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
import org.junit.Before;
@@ -67,15 +67,15 @@ public class TestFileSystemStorageStatistics {
@Before
public void setup() {
- statistics.incrementBytesRead(RandomUtils.nextInt(100));
- statistics.incrementBytesWritten(RandomUtils.nextInt(100));
- statistics.incrementLargeReadOps(RandomUtils.nextInt(100));
- statistics.incrementWriteOps(RandomUtils.nextInt(100));
+ statistics.incrementBytesRead(RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesWritten(RandomUtils.nextInt(0, 100));
+ statistics.incrementLargeReadOps(RandomUtils.nextInt(0, 100));
+ statistics.incrementWriteOps(RandomUtils.nextInt(0, 100));
- statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100));
- statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100));
- statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100));
- statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100));
+ statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(0, 100));
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 0e337b4736..d5622af085 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -689,17 +689,18 @@ public void testFSOutputStreamBuilder() throws Exception {
// and permission
FSDataOutputStreamBuilder builder =
fileSys.createFile(path);
- builder.build();
- Assert.assertEquals("Should be default block size",
- builder.getBlockSize(), fileSys.getDefaultBlockSize());
- Assert.assertEquals("Should be default replication factor",
- builder.getReplication(), fileSys.getDefaultReplication());
- Assert.assertEquals("Should be default buffer size",
- builder.getBufferSize(),
- fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
- IO_FILE_BUFFER_SIZE_DEFAULT));
- Assert.assertEquals("Should be default permission",
- builder.getPermission(), FsPermission.getFileDefault());
+ try (FSDataOutputStream stream = builder.build()) {
+ Assert.assertEquals("Should be default block size",
+ builder.getBlockSize(), fileSys.getDefaultBlockSize());
+ Assert.assertEquals("Should be default replication factor",
+ builder.getReplication(), fileSys.getDefaultReplication());
+ Assert.assertEquals("Should be default buffer size",
+ builder.getBufferSize(),
+ fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT));
+ Assert.assertEquals("Should be default permission",
+ builder.getPermission(), FsPermission.getFileDefault());
+ }
// Test set 0 to replication, block size and buffer size
builder = fileSys.createFile(path);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java
new file mode 100644
index 0000000000..21d01b6cdb
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemMultipartUploader.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import static org.apache.hadoop.test.GenericTestUtils.getRandomizedTestDir;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * Test the FileSystemMultipartUploader on local file system.
+ */
+public class TestLocalFileSystemMultipartUploader
+ extends AbstractSystemMultipartUploaderTest {
+
+ private static FileSystem fs;
+ private File tmp;
+
+ @BeforeClass
+ public static void init() throws IOException {
+ fs = LocalFileSystem.getLocal(new Configuration());
+ }
+
+ @Before
+ public void setup() throws IOException {
+ tmp = getRandomizedTestDir();
+ tmp.mkdirs();
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ tmp.delete();
+ }
+
+ @Override
+ public FileSystem getFS() {
+ return fs;
+ }
+
+ @Override
+ public Path getBaseTestPath() {
+ return new Path(tmp.getAbsolutePath());
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java
index fbe28c3c24..36cfa6ccda 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java
@@ -123,6 +123,12 @@ public void testChanged() throws IOException {
HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts)
.orElseThrow(IllegalArgumentException::new);
FileStatus stat = testFile(B1);
+ try {
+ // Temporary workaround while RawLocalFS supports only second precision
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ throw new IOException(e);
+ }
// modify the file by appending data
appendFile(getFileSystem(), stat.getPath(), B2);
byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java
new file mode 100644
index 0000000000..3c088d278e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.contract.rawlocal;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.localfs.LocalFSContract;
+import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract;
+
+public class TestRawlocalContractPathHandle
+ extends AbstractContractPathHandleTest {
+
+ public TestRawlocalContractPathHandle(String testname,
+ Options.HandleOpt[] opts, boolean serialized) {
+ super(testname, opts, serialized);
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new RawlocalFSContract(conf);
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
index 8e60540126..e7f36fc850 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.fs.shell;
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.commons.lang.math.RandomUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
@@ -56,11 +56,11 @@ public static int initialize(Path dir) throws Exception {
fs.mkdirs(toDirPath);
int numTotalFiles = 0;
- int numDirs = RandomUtils.nextInt(5);
+ int numDirs = RandomUtils.nextInt(0, 5);
for (int dirCount = 0; dirCount < numDirs; ++dirCount) {
Path subDirPath = new Path(fromDirPath, "subdir" + dirCount);
fs.mkdirs(subDirPath);
- int numFiles = RandomUtils.nextInt(10);
+ int numFiles = RandomUtils.nextInt(0, 10);
for (int fileCount = 0; fileCount < numFiles; ++fileCount) {
numTotalFiles++;
Path subFile = new Path(subDirPath, "file" + fileCount);
@@ -115,7 +115,7 @@ public void testCopyFromLocalWithThreads() throws Exception {
Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
int numFiles = TestCopyFromLocal.initialize(dir);
int maxThreads = Runtime.getRuntime().availableProcessors() * 2;
- int randThreads = RandomUtils.nextInt(maxThreads - 1) + 1;
+ int randThreads = RandomUtils.nextInt(0, maxThreads - 1) + 1;
String numThreads = Integer.toString(randThreads);
run(new TestMultiThreadedCopy(randThreads,
randThreads == 1 ? 0 : numFiles), "-t", numThreads,
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
index fbe3fb8118..17be5874c5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
@@ -26,7 +26,7 @@
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
index 41ae910cba..5fbd957312 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
@@ -20,7 +20,7 @@
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.metrics.RpcMetrics;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index 0b463a5130..520042017d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.ipc;
import com.google.protobuf.ServiceException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index 30176f202c..62bd1b142e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -38,7 +38,7 @@
import java.util.List;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.KerberosAuthException;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
index 28b3cbe3fa..c0d204f86a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
@@ -43,7 +43,7 @@
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket.DomainChannel;
import org.apache.hadoop.test.GenericTestUtils;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
index d3c9538641..8c1339d38d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
@@ -173,6 +173,37 @@ public void testGetNumericGroupsResolvable() throws Exception {
assertTrue(groups.contains("zzz"));
}
+ public long getTimeoutInterval(String timeout) {
+ Configuration conf = new Configuration();
+ String userName = "foobarnonexistinguser";
+ conf.set(
+ CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
+ timeout);
+ TestDelayedGroupCommand mapping = ReflectionUtils
+ .newInstance(TestDelayedGroupCommand.class, conf);
+ ShellCommandExecutor executor = mapping.createGroupExecutor(userName);
+ return executor.getTimeoutInterval();
+ }
+
+ @Test
+ public void testShellTimeOutConf() {
+
+ // Test a 1 second max-runtime timeout
+ assertEquals(
+ "Expected the group names executor to carry the configured timeout",
+ 1000L, getTimeoutInterval("1s"));
+
+ // Test a 1 minute max-runtime timeout
+ assertEquals(
+ "Expected the group names executor to carry the configured timeout",
+ 60000L, getTimeoutInterval("1m"));
+
+ // Test a 1 millisecond max-runtime timeout
+ assertEquals(
+ "Expected the group names executor to carry the configured timeout",
+ 1L, getTimeoutInterval("1"));
+ }
+
private class TestGroupResolvable
extends ShellBasedUnixGroupsMapping {
/**
@@ -222,7 +253,7 @@ public void testGetGroupsResolvable() throws Exception {
private static class TestDelayedGroupCommand
extends ShellBasedUnixGroupsMapping {
- private Long timeoutSecs = 2L;
+ private Long timeoutSecs = 1L;
TestDelayedGroupCommand() {
super();
@@ -249,12 +280,12 @@ public void testFiniteGroupResolutionTime() throws Exception {
String userName = "foobarnonexistinguser";
String commandTimeoutMessage =
"ran longer than the configured timeout limit";
- long testTimeout = 1L;
+ long testTimeout = 500L;
// Test a 1 second max-runtime timeout
conf.setLong(
CommonConfigurationKeys.
- HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
+ HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
testTimeout);
TestDelayedGroupCommand mapping =
@@ -306,7 +337,7 @@ public void testFiniteGroupResolutionTime() throws Exception {
conf = new Configuration();
long defaultTimeout =
CommonConfigurationKeys.
- HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT;
+ HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT;
mapping =
ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
index 9357f48df3..0f8f1e45c9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
@@ -21,7 +21,7 @@
import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.commons.lang.mutable.MutableBoolean;
+import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java
index 127b0b3827..d7c86316ef 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.service.launcher;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceOperations;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 53eb2be3bb..3e9da1b45f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -40,7 +40,7 @@
import java.util.regex.Pattern;
import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileUtil;
@@ -344,7 +344,7 @@ public static void assertExceptionContains(String expectedText,
throw new AssertionError(E_NULL_THROWABLE_STRING, t);
}
if (expectedText != null && !msg.contains(expectedText)) {
- String prefix = org.apache.commons.lang.StringUtils.isEmpty(message)
+ String prefix = org.apache.commons.lang3.StringUtils.isEmpty(message)
? "" : (message + ": ");
throw new AssertionError(
String.format("%s Expected to find '%s' %s: %s",
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
index 2aa5e95b04..d53982363d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.util;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.slf4j.LoggerFactory;
import org.junit.Assert;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
index a9fa4c64e9..b61cebc0a6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.util;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
index a646a41271..0ae5d3ce8c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
@@ -161,6 +161,36 @@ int readDiskBlockInformation(String diskName, int defSector) {
"DirectMap2M: 2027520 kB\n" +
"DirectMap1G: 132120576 kB\n";
+ static final String MEMINFO_FORMAT3 =
+ "MemTotal: %d kB\n" +
+ "MemFree: %s kB\n" +
+ "Buffers: 138244 kB\n" +
+ "Cached: 947780 kB\n" +
+ "SwapCached: 142880 kB\n" +
+ "Active: 3229888 kB\n" +
+ "Inactive: %d kB\n" +
+ "SwapTotal: %d kB\n" +
+ "SwapFree: %s kB\n" +
+ "Dirty: 122012 kB\n" +
+ "Writeback: 0 kB\n" +
+ "AnonPages: 2710792 kB\n" +
+ "Mapped: 24740 kB\n" +
+ "Slab: 132528 kB\n" +
+ "SReclaimable: 105096 kB\n" +
+ "SUnreclaim: 27432 kB\n" +
+ "PageTables: 11448 kB\n" +
+ "NFS_Unstable: 0 kB\n" +
+ "Bounce: 0 kB\n" +
+ "CommitLimit: 4125904 kB\n" +
+ "Committed_AS: 4143556 kB\n" +
+ "VmallocTotal: 34359738367 kB\n" +
+ "VmallocUsed: 1632 kB\n" +
+ "VmallocChunk: 34359736375 kB\n" +
+ "HugePages_Total: %d\n" +
+ "HugePages_Free: 0\n" +
+ "HugePages_Rsvd: 0\n" +
+ "Hugepagesize: 2048 kB";
+
static final String CPUINFO_FORMAT =
"processor : %s\n" +
"vendor_id : AuthenticAMD\n" +
@@ -384,6 +414,36 @@ public void parsingProcMemFile2() throws IOException {
(nrHugePages * 2048) + swapTotal));
}
+ /**
+ * Test parsing /proc/meminfo
+ * @throws IOException
+ */
+ @Test
+ public void parsingProcMemFileWithBadValues() throws IOException {
+ long memTotal = 4058864L;
+ long memFree = 0L; // bad value should return 0
+ long inactive = 567732L;
+ long swapTotal = 2096472L;
+ long swapFree = 0L; // bad value should return 0
+ int nrHugePages = 10;
+ String badFreeValue = "18446744073709551596";
+ File tempFile = new File(FAKE_MEMFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
+ fWriter.write(String.format(MEMINFO_FORMAT3,
+ memTotal, badFreeValue, inactive, swapTotal, badFreeValue, nrHugePages));
+
+ fWriter.close();
+ assertEquals(plugin.getAvailablePhysicalMemorySize(),
+ 1024L * (memFree + inactive));
+ assertEquals(plugin.getAvailableVirtualMemorySize(),
+ 1024L * (memFree + inactive + swapFree));
+ assertEquals(plugin.getPhysicalMemorySize(),
+ 1024L * (memTotal - (nrHugePages * 2048)));
+ assertEquals(plugin.getVirtualMemorySize(),
+ 1024L * (memTotal - (nrHugePages * 2048) + swapTotal));
+ }
+
@Test
public void testCoreCounts() throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml
index a0d1d21a94..8cbd4a0abc 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml
@@ -122,4 +122,9 @@
true
+
+ fs.contract.supports-content-check
+ true
+
+
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh b/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
new file mode 100644
index 0000000000..d7c7427b70
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/process_with_sigterm_trap.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+trap "echo SIGTERM trapped!" SIGTERM
+trap "echo SIGINT trapped!" SIGINT
+
+echo "$$" > "$1"
+
+while true; do
+ sleep 1.3
+done
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
index f8265729d8..b9b8d9cee6 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
@@ -17,10 +17,9 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
-import com.fasterxml.jackson.databind.ObjectMapper;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.http.JettyUtils;
+import org.apache.hadoop.util.JsonSerialization;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
@@ -67,8 +66,7 @@ public void writeTo(Object obj, Class> aClass, Type type,
OutputStream outputStream) throws IOException, WebApplicationException {
Writer writer = new OutputStreamWriter(outputStream, Charset
.forName("UTF-8"));
- ObjectMapper jsonMapper = new ObjectMapper();
- jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
+ JsonSerialization.writer().writeValue(writer, obj);
}
}
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index dfbf8184f8..5de6759ce9 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -265,6 +265,11 @@
hadoop-ozone-docsprovided
+
+ org.apache.hadoop
+ hadoop-ozone-filesystem
+ provided
+
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index faf420c7f5..512c649e21 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -16,18 +16,6 @@
version: "3"
services:
- namenode:
- image: apache/hadoop-runner
- hostname: namenode
- volumes:
- - ../../ozone:/opt/hadoop
- ports:
- - 9870:9870
- environment:
- ENSURE_NAMENODE_DIR: /data/namenode
- env_file:
- - ./docker-config
- command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
index c693db0428..632f8701d2 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
index fb7873bf88..3233c11641 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
@@ -16,19 +16,6 @@
version: "3"
services:
- namenode:
- image: apache/hadoop-runner
- hostname: namenode
- volumes:
- - ../../ozone:/opt/hadoop
- - ./jmxpromo.jar:/opt/jmxpromo.jar
- ports:
- - 9870:9870
- environment:
- ENSURE_NAMENODE_DIR: /data/namenode
- env_file:
- - ./docker-config
- command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config
index e4f5485ac5..2be22a7792 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index b6b95eba06..438615fec5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -244,32 +244,6 @@ public final class ScmConfigKeys {
public static final String
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
- /**
- * Don't start processing a pool if we have not had a minimum number of
- * seconds from the last processing.
- */
- public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
- "ozone.scm.container.report.processing.interval";
- public static final String
- OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
-
- /**
- * This determines the total number of pools to be processed in parallel.
- */
- public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
- "ozone.scm.max.nodepool.processing.threads";
- public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1;
- /**
- * These 2 settings control the number of threads in executor pool and time
- * outs for thw container reports from all nodes.
- */
- public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
- "ozone.scm.max.container.report.threads";
- public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
- public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
- "ozone.scm.container.reports.wait.timeout";
- public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
- "5m";
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
"ozone.scm.block.deletion.max.retry";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 2c38d45728..ee05c8768a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
+import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
@@ -30,6 +31,7 @@
import org.apache.hadoop.util.Time;
import java.io.IOException;
+import java.util.Arrays;
import java.util.Comparator;
import static java.lang.Math.max;
@@ -63,6 +65,13 @@ public class ContainerInfo
private String owner;
private long containerID;
private long deleteTransactionId;
+ /**
+ * Allows you to maintain private data on ContainerInfo. This is not
+ * serialized via protobuf, just allows us to maintain some private data.
+ */
+ @JsonIgnore
+ private byte[] data;
+
ContainerInfo(
long containerID,
HddsProtos.LifeCycleState state,
@@ -295,6 +304,29 @@ public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
+ /**
+ * Returns private data that is set on this containerInfo.
+ *
+ * @return blob, the user can interpret it any way they like.
+ */
+ public byte[] getData() {
+ if (this.data != null) {
+ return Arrays.copyOf(this.data, this.data.length);
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Set private data on ContainerInfo object.
+ *
+ * @param data -- private data.
+ */
+ public void setData(byte[] data) {
+ if (data != null) {
+ this.data = Arrays.copyOf(data, data.length);
+ }
+ }
/**
* Builder class for ContainerInfo.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
index 87408385ec..c5794f4c03 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -27,14 +27,14 @@
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
+import java.util.Map;
+import java.util.TreeMap;
import java.util.List;
/**
@@ -46,7 +46,7 @@ public class Pipeline {
static {
ObjectMapper mapper = new ObjectMapper();
- String[] ignorableFieldNames = {"data"};
+ String[] ignorableFieldNames = {"leaderID", "datanodes"};
FilterProvider filters = new SimpleFilterProvider()
.addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter
.serializeAllExcept(ignorableFieldNames));
@@ -57,38 +57,66 @@ public class Pipeline {
WRITER = mapper.writer(filters);
}
- private PipelineChannel pipelineChannel;
- /**
- * Allows you to maintain private data on pipelines. This is not serialized
- * via protobuf, just allows us to maintain some private data.
- */
@JsonIgnore
- private byte[] data;
+ private String leaderID;
+ @JsonIgnore
+ private Map datanodes;
+ private HddsProtos.LifeCycleState lifeCycleState;
+ private HddsProtos.ReplicationType type;
+ private HddsProtos.ReplicationFactor factor;
+ private String name;
+ // TODO: change to long based id
+ //private long id;
+
/**
* Constructs a new pipeline data structure.
*
- * @param pipelineChannel - transport information for this container
+ * @param leaderID - Leader datanode id
+ * @param lifeCycleState - Pipeline State
+ * @param replicationType - Replication protocol
+ * @param replicationFactor - replication count on datanodes
+ * @param name - pipelineName
*/
- public Pipeline(PipelineChannel pipelineChannel) {
- this.pipelineChannel = pipelineChannel;
- data = null;
+ public Pipeline(String leaderID, HddsProtos.LifeCycleState lifeCycleState,
+ HddsProtos.ReplicationType replicationType,
+ HddsProtos.ReplicationFactor replicationFactor, String name) {
+ this.leaderID = leaderID;
+ this.lifeCycleState = lifeCycleState;
+ this.type = replicationType;
+ this.factor = replicationFactor;
+ this.name = name;
+ datanodes = new TreeMap<>();
}
/**
* Gets pipeline object from protobuf.
*
- * @param pipeline - ProtoBuf definition for the pipeline.
+ * @param pipelineProto - ProtoBuf definition for the pipeline.
* @return Pipeline Object
*/
- public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) {
- Preconditions.checkNotNull(pipeline);
- PipelineChannel pipelineChannel =
- PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel());
- return new Pipeline(pipelineChannel);
+ public static Pipeline getFromProtoBuf(
+ HddsProtos.Pipeline pipelineProto) {
+ Preconditions.checkNotNull(pipelineProto);
+ Pipeline pipeline =
+ new Pipeline(pipelineProto.getLeaderID(),
+ pipelineProto.getState(),
+ pipelineProto.getType(),
+ pipelineProto.getFactor(),
+ pipelineProto.getName());
+
+ for (HddsProtos.DatanodeDetailsProto dataID :
+ pipelineProto.getMembersList()) {
+ pipeline.addMember(DatanodeDetails.getFromProtoBuf(dataID));
+ }
+ return pipeline;
}
+ /**
+ * returns the replication count.
+ * @return Replication Factor
+ */
public HddsProtos.ReplicationFactor getFactor() {
- return pipelineChannel.getFactor();
+ return factor;
}
/**
@@ -98,19 +126,34 @@ public HddsProtos.ReplicationFactor getFactor() {
*/
@JsonIgnore
public DatanodeDetails getLeader() {
- return pipelineChannel.getDatanodes().get(pipelineChannel.getLeaderID());
+ return getDatanodes().get(leaderID);
}
+ public void addMember(DatanodeDetails datanodeDetails) {
+ datanodes.put(datanodeDetails.getUuid().toString(),
+ datanodeDetails);
+ }
+
+ public Map getDatanodes() {
+ return datanodes;
+ }
/**
* Returns the leader host.
*
* @return First Machine.
*/
public String getLeaderHost() {
- return pipelineChannel.getDatanodes()
- .get(pipelineChannel.getLeaderID()).getHostName();
+ return getDatanodes()
+ .get(leaderID).getHostName();
}
+ /**
+ *
+ * @return lead
+ */
+ public String getLeaderID() {
+ return leaderID;
+ }
/**
* Returns all machines that make up this pipeline.
*
@@ -118,7 +161,7 @@ public String getLeaderHost() {
*/
@JsonIgnore
public List getMachines() {
- return new ArrayList<>(pipelineChannel.getDatanodes().values());
+ return new ArrayList<>(getDatanodes().values());
}
/**
@@ -128,7 +171,7 @@ public List getMachines() {
*/
public List getDatanodeHosts() {
List dataHosts = new ArrayList<>();
- for (DatanodeDetails id : pipelineChannel.getDatanodes().values()) {
+ for (DatanodeDetails id :getDatanodes().values()) {
dataHosts.add(id.getHostName());
}
return dataHosts;
@@ -143,46 +186,31 @@ public List getDatanodeHosts() {
public HddsProtos.Pipeline getProtobufMessage() {
HddsProtos.Pipeline.Builder builder =
HddsProtos.Pipeline.newBuilder();
- builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage());
+ for (DatanodeDetails datanode : datanodes.values()) {
+ builder.addMembers(datanode.getProtoBufMessage());
+ }
+ builder.setLeaderID(leaderID);
+
+ if (this.getLifeCycleState() != null) {
+ builder.setState(this.getLifeCycleState());
+ }
+ if (this.getType() != null) {
+ builder.setType(this.getType());
+ }
+
+ if (this.getFactor() != null) {
+ builder.setFactor(this.getFactor());
+ }
return builder.build();
}
- /**
- * Returns private data that is set on this pipeline.
- *
- * @return blob, the user can interpret it any way they like.
- */
- public byte[] getData() {
- if (this.data != null) {
- return Arrays.copyOf(this.data, this.data.length);
- } else {
- return null;
- }
- }
-
- @VisibleForTesting
- public PipelineChannel getPipelineChannel() {
- return pipelineChannel;
- }
-
- /**
- * Set private data on pipeline.
- *
- * @param data -- private data.
- */
- public void setData(byte[] data) {
- if (data != null) {
- this.data = Arrays.copyOf(data, data.length);
- }
- }
-
/**
* Gets the State of the pipeline.
*
* @return - LifeCycleStates.
*/
public HddsProtos.LifeCycleState getLifeCycleState() {
- return pipelineChannel.getLifeCycleState();
+ return lifeCycleState;
}
/**
@@ -191,7 +219,7 @@ public HddsProtos.LifeCycleState getLifeCycleState() {
* @return - Name of the pipeline
*/
public String getPipelineName() {
- return pipelineChannel.getName();
+ return name;
}
/**
@@ -200,16 +228,16 @@ public String getPipelineName() {
* @return type - Standalone, Ratis, Chained.
*/
public HddsProtos.ReplicationType getType() {
- return pipelineChannel.getType();
+ return type;
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append("[");
- pipelineChannel.getDatanodes().keySet().stream()
+ getDatanodes().keySet().stream()
.forEach(id -> b.
- append(id.endsWith(pipelineChannel.getLeaderID()) ? "*" + id : id));
+ append(id.endsWith(getLeaderID()) ? "*" + id : id));
b.append(" name:").append(getPipelineName());
if (getType() != null) {
b.append(" type:").append(getType().toString());
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java
deleted file mode 100644
index 655751d737..0000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.ozone.container.common.statemachine
+ .SCMConnectionManager;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Command handler to copy containers from sources.
+ */
+public class ReplicateContainerCommandHandler implements CommandHandler {
+ static final Logger LOG =
+ LoggerFactory.getLogger(ReplicateContainerCommandHandler.class);
+
+ private int invocationCount;
+
+ private long totalTime;
+
+ @Override
+ public void handle(SCMCommand command, OzoneContainer container,
+ StateContext context, SCMConnectionManager connectionManager) {
+ LOG.warn("Replicate command is not yet handled");
+
+ }
+
+ @Override
+ public SCMCommandProto.Type getCommandType() {
+ return Type.replicateContainerCommand;
+ }
+
+ @Override
+ public int getInvocationCount() {
+ return this.invocationCount;
+ }
+
+ @Override
+ public long getAverageRunTime() {
+ if (invocationCount > 0) {
+ return totalTime / invocationCount;
+ }
+ return 0;
+ }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 1ee6375a56..260a245ceb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -39,6 +39,8 @@
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
+import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -196,6 +198,16 @@ private void processResponse(SCMHeartbeatResponseProto response,
}
this.context.addCommand(closeContainer);
break;
+ case replicateContainerCommand:
+ ReplicateContainerCommand replicateContainerCommand =
+ ReplicateContainerCommand.getFromProtobuf(
+ commandResponseProto.getReplicateContainerCommandProto());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Received SCM container replicate request for container {}",
+ replicateContainerCommand.getContainerID());
+ }
+ this.context.addCommand(replicateContainerCommand);
+ break;
default:
throw new IllegalArgumentException("Unknown response : "
+ commandResponseProto.getCommandType().name());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 6809d57042..5d6fc0aa3f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.ozone.container.common.utils;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
new file mode 100644
index 0000000000..e0a235122e
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.List;
+import java.util.PriorityQueue;
+import java.util.Queue;
+
+/**
+ * Priority queue to handle under-replicated and over replicated containers
+ * in ozone. ReplicationManager will consume these messages and decide
+ * accordingly.
+ */
+public class ReplicationQueue {
+
+ private final Queue queue;
+
+ ReplicationQueue() {
+ queue = new PriorityQueue<>();
+ }
+
+ public synchronized boolean add(ReplicationRequest repObj) {
+ if (this.queue.contains(repObj)) {
+ // Remove the earlier message and insert this one
+ this.queue.remove(repObj);
+ }
+ return this.queue.add(repObj);
+ }
+
+ public synchronized boolean remove(ReplicationRequest repObj) {
+ return queue.remove(repObj);
+ }
+
+ /**
+ * Retrieves, but does not remove, the head of this queue,
+ * or returns {@code null} if this queue is empty.
+ *
+ * @return the head of this queue, or {@code null} if this queue is empty
+ */
+ public synchronized ReplicationRequest peek() {
+ return queue.peek();
+ }
+
+ /**
+ * Retrieves and removes the head of this queue,
+ * or returns {@code null} if this queue is empty.
+ *
+ * @return the head of this queue, or {@code null} if this queue is empty
+ */
+ public synchronized ReplicationRequest poll() {
+ return queue.poll();
+ }
+
+ public synchronized boolean removeAll(List repObjs) {
+ return queue.removeAll(repObjs);
+ }
+
+ public int size() {
+ return queue.size();
+ }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
new file mode 100644
index 0000000000..a6ccce13e0
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.io.Serializable;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+
+/**
+ * Wrapper class for hdds replication queue. Implements its natural
+ * ordering for priority queue.
+ */
+public class ReplicationRequest implements Comparable,
+ Serializable {
+ private final long containerId;
+ private final short replicationCount;
+ private final short expecReplicationCount;
+ private final long timestamp;
+
+ public ReplicationRequest(long containerId, short replicationCount,
+ long timestamp, short expecReplicationCount) {
+ this.containerId = containerId;
+ this.replicationCount = replicationCount;
+ this.timestamp = timestamp;
+ this.expecReplicationCount = expecReplicationCount;
+ }
+
+ /**
+ * Compares this object with the specified object for order. Returns a
+ * negative integer, zero, or a positive integer as this object is less
+ * than, equal to, or greater than the specified object.
+ * @param o the object to be compared.
+ * @return a negative integer, zero, or a positive integer as this object
+ * is less than, equal to, or greater than the specified object.
+ * @throws NullPointerException if the specified object is null
+ * @throws ClassCastException if the specified object's type prevents it
+ * from being compared to this object.
+ */
+ @Override
+ public int compareTo(ReplicationRequest o) {
+ if (o == null) {
+ return 1;
+ }
+ if (this == o) {
+ return 0;
+ }
+ int retVal = Integer
+ .compare(getReplicationCount() - getExpecReplicationCount(),
+ o.getReplicationCount() - o.getExpecReplicationCount());
+ if (retVal != 0) {
+ return retVal;
+ }
+ return Long.compare(getTimestamp(), o.getTimestamp());
+ }
+
+ @Override
+ public int hashCode() {
+ return new HashCodeBuilder(91, 1011)
+ .append(getContainerId())
+ .toHashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ ReplicationRequest that = (ReplicationRequest) o;
+ return new EqualsBuilder().append(getContainerId(), that.getContainerId())
+ .isEquals();
+ }
+
+ public long getContainerId() {
+ return containerId;
+ }
+
+ public short getReplicationCount() {
+ return replicationCount;
+ }
+
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ public short getExpecReplicationCount() {
+ return expecReplicationCount;
+ }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000000..7f335e37c1
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.replication;
+
+/**
+ * Ozone Container replicaton related classes.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
new file mode 100644
index 0000000000..0c4964ac4c
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocol.commands;
+
+import java.util.UUID;
+
+import com.google.protobuf.GeneratedMessage;
+
+/**
+ * Command for the datanode with the destination address.
+ */
+public class CommandForDatanode {
+
+ private final UUID datanodeId;
+
+ private final SCMCommand command;
+
+ public CommandForDatanode(UUID datanodeId, SCMCommand command) {
+ this.datanodeId = datanodeId;
+ this.command = command;
+ }
+
+ public UUID getDatanodeId() {
+ return datanodeId;
+ }
+
+ public SCMCommand getCommand() {
+ return command;
+ }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
new file mode 100644
index 0000000000..834318b145
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocol.commands;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto
+ .Builder;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * SCM command to request replication of a container.
+ */
+public class ReplicateContainerCommand
+ extends SCMCommand {
+
+ private final long containerID;
+
+ private final List sourceDatanodes;
+
+ public ReplicateContainerCommand(long containerID,
+ List sourceDatanodes) {
+ this.containerID = containerID;
+ this.sourceDatanodes = sourceDatanodes;
+ }
+
+ @Override
+ public Type getType() {
+ return SCMCommandProto.Type.replicateContainerCommand;
+ }
+
+ @Override
+ public byte[] getProtoBufMessage() {
+ return getProto().toByteArray();
+ }
+
+ public ReplicateContainerCommandProto getProto() {
+ Builder builder = ReplicateContainerCommandProto.newBuilder()
+ .setContainerID(containerID);
+ for (DatanodeDetails dd : sourceDatanodes) {
+ builder.addSources(dd.getProtoBufMessage());
+ }
+ return builder.build();
+ }
+
+ public static ReplicateContainerCommand getFromProtobuf(
+ ReplicateContainerCommandProto protoMessage) {
+ Preconditions.checkNotNull(protoMessage);
+
+ List datanodeDetails =
+ protoMessage.getSourcesList()
+ .stream()
+ .map(DatanodeDetails::getFromProtoBuf)
+ .collect(Collectors.toList());
+
+ return new ReplicateContainerCommand(protoMessage.getContainerID(),
+ datanodeDetails);
+
+ }
+
+ public long getContainerID() {
+ return containerID;
+ }
+
+ public List getSourceDatanodes() {
+ return sourceDatanodes;
+ }
+}
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index f6aba05636..54230c1e9f 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -172,6 +172,7 @@ message SCMCommandProto {
deleteBlocksCommand = 2;
closeContainerCommand = 3;
deleteContainerCommand = 4;
+ replicateContainerCommand = 5;
}
// TODO: once we start using protoc 3.x, refactor this message using "oneof"
required Type commandType = 1;
@@ -179,6 +180,7 @@ message SCMCommandProto {
optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3;
optional CloseContainerCommandProto closeContainerCommandProto = 4;
optional DeleteContainerCommandProto deleteContainerCommandProto = 5;
+ optional ReplicateContainerCommandProto replicateContainerCommandProto = 6;
}
/**
@@ -227,12 +229,20 @@ message CloseContainerCommandProto {
}
/**
-This command asks the datanode to close a specific container.
+This command asks the datanode to delete a specific container.
*/
message DeleteContainerCommandProto {
required int64 containerID = 1;
}
+/**
+This command asks the datanode to replicate a container from specific sources.
+*/
+message ReplicateContainerCommandProto {
+ required int64 containerID = 1;
+ repeated DatanodeDetailsProto sources = 2;
+}
+
/**
* Protocol used from a datanode to StorageContainerManager.
*
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
new file mode 100644
index 0000000000..6d74c683ee
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.Random;
+import java.util.UUID;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for ReplicationQueue.
+ */
+public class TestReplicationQueue {
+
+ private ReplicationQueue replicationQueue;
+ private Random random;
+
+ @Before
+ public void setUp() {
+ replicationQueue = new ReplicationQueue();
+ random = new Random();
+ }
+
+ @Test
+ public void testDuplicateAddOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationRequest obj1, obj2, obj3;
+ long time = Time.monotonicNow();
+ obj1 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
+ obj2 = new ReplicationRequest(contId, (short) 2, time + 1, (short) 3);
+ obj3 = new ReplicationRequest(contId, (short) 1, time+2, (short) 3);
+
+ replicationQueue.add(obj1);
+ replicationQueue.add(obj2);
+ replicationQueue.add(obj3);
+ Assert.assertEquals("Should add only 1 msg as second one is duplicate",
+ 1, replicationQueue.size());
+ ReplicationRequest temp = replicationQueue.poll();
+ Assert.assertEquals(temp, obj3);
+ }
+
+ @Test
+ public void testPollOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationRequest msg1, msg2, msg3, msg4, msg5;
+ msg1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
+ (short) 3);
+ long time = Time.monotonicNow();
+ msg2 = new ReplicationRequest(contId + 1, (short) 4, time, (short) 3);
+ msg3 = new ReplicationRequest(contId + 2, (short) 0, time, (short) 3);
+ msg4 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
+ // Replication message for same container but different nodeId
+ msg5 = new ReplicationRequest(contId + 1, (short) 2, time, (short) 3);
+
+ replicationQueue.add(msg1);
+ replicationQueue.add(msg2);
+ replicationQueue.add(msg3);
+ replicationQueue.add(msg4);
+ replicationQueue.add(msg5);
+ Assert.assertEquals("Should have 3 objects",
+ 3, replicationQueue.size());
+
+ // Since Priority queue orders messages according to replication count,
+ // message with lowest replication should be first
+ ReplicationRequest temp;
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 2 objects",
+ 2, replicationQueue.size());
+ Assert.assertEquals(temp, msg3);
+
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 1 objects",
+ 1, replicationQueue.size());
+ Assert.assertEquals(temp, msg5);
+
+ // Message 2 should be ordered before message 5 as both have same replication
+ // number but message 2 has earlier timestamp.
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 0 objects",
+ replicationQueue.size(), 0);
+ Assert.assertEquals(temp, msg4);
+ }
+
+ @Test
+ public void testRemoveOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationRequest obj1, obj2, obj3;
+ obj1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
+ (short) 3);
+ obj2 = new ReplicationRequest(contId + 1, (short) 2, Time.monotonicNow(),
+ (short) 3);
+ obj3 = new ReplicationRequest(contId + 2, (short) 3, Time.monotonicNow(),
+ (short) 3);
+
+ replicationQueue.add(obj1);
+ replicationQueue.add(obj2);
+ replicationQueue.add(obj3);
+ Assert.assertEquals("Should have 3 objects",
+ 3, replicationQueue.size());
+
+ replicationQueue.remove(obj3);
+ Assert.assertEquals("Should have 2 objects",
+ 2, replicationQueue.size());
+
+ replicationQueue.remove(obj2);
+ Assert.assertEquals("Should have 1 objects",
+ 1, replicationQueue.size());
+
+ replicationQueue.remove(obj1);
+ Assert.assertEquals("Should have 0 objects",
+ 0, replicationQueue.size());
+ }
+
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000000..5b1fd0f43a
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * SCM Testing and Mocking Utils.
+ */
+package org.apache.hadoop.ozone.container.replication;
+// Test classes for Replication functionality.
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
index 86888aa790..7c129457fd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
@@ -44,8 +44,8 @@ public class BlockDeletingServiceTestImpl
public BlockDeletingServiceTestImpl(ContainerManager containerManager,
int serviceInterval, Configuration conf) {
- super(containerManager, serviceInterval,
- SERVICE_TIMEOUT_IN_MILLISECONDS, conf);
+ super(containerManager, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
+ TimeUnit.MILLISECONDS, conf);
}
@VisibleForTesting
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
new file mode 100644
index 0000000000..19fddde9b4
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.ozone.lease.Lease;
+import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException;
+import org.apache.hadoop.ozone.lease.LeaseExpiredException;
+import org.apache.hadoop.ozone.lease.LeaseManager;
+import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Event watcher the (re)send a message after timeout.
+ *
+ * Event watcher will send the tracked payload/event after a timeout period
+ * unless a confirmation from the original event (completion event) is arrived.
+ *
+ * @param The type of the events which are tracked.
+ * @param The type of event which could cancel the
+ * tracking.
+ */
+@SuppressWarnings("CheckStyle")
+public abstract class EventWatcher {
+
+ private static final Logger LOG = LoggerFactory.getLogger(EventWatcher.class);
+
+ private final Event startEvent;
+
+ private final Event completionEvent;
+
+ private final LeaseManager leaseManager;
+
+ protected final Map trackedEventsByUUID =
+ new ConcurrentHashMap<>();
+
+ protected final Set trackedEvents = new HashSet<>();
+
+ public EventWatcher(Event startEvent,
+ Event completionEvent,
+ LeaseManager leaseManager) {
+ this.startEvent = startEvent;
+ this.completionEvent = completionEvent;
+ this.leaseManager = leaseManager;
+
+ }
+
+ public void start(EventQueue queue) {
+
+ queue.addHandler(startEvent, this::handleStartEvent);
+
+ queue.addHandler(completionEvent, (completionPayload, publisher) -> {
+ UUID uuid = completionPayload.getUUID();
+ try {
+ handleCompletion(uuid, publisher);
+ } catch (LeaseNotFoundException e) {
+ //It's already done. Too late, we already retried it.
+ //Not a real problem.
+ LOG.warn("Completion event without active lease. UUID={}", uuid);
+ }
+ });
+
+ }
+
+ private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload,
+ EventPublisher publisher) {
+ UUID identifier = payload.getUUID();
+ trackedEventsByUUID.put(identifier, payload);
+ trackedEvents.add(payload);
+ try {
+ Lease lease = leaseManager.acquire(identifier);
+ try {
+ lease.registerCallBack(() -> {
+ handleTimeout(publisher, identifier);
+ return null;
+ });
+
+ } catch (LeaseExpiredException e) {
+ handleTimeout(publisher, identifier);
+ }
+ } catch (LeaseAlreadyExistException e) {
+ //No problem at all. But timer is not reset.
+ }
+ }
+
+ private synchronized void handleCompletion(UUID uuid,
+ EventPublisher publisher) throws LeaseNotFoundException {
+ leaseManager.release(uuid);
+ TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(uuid);
+ trackedEvents.remove(payload);
+ onFinished(publisher, payload);
+ }
+
+ private synchronized void handleTimeout(EventPublisher publisher,
+ UUID identifier) {
+ TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(identifier);
+ trackedEvents.remove(payload);
+ onTimeout(publisher, payload);
+ }
+
+
+ /**
+ * Check if a specific payload is in-progress.
+ */
+ public synchronized boolean contains(TIMEOUT_PAYLOAD payload) {
+ return trackedEvents.contains(payload);
+ }
+
+ public synchronized boolean remove(TIMEOUT_PAYLOAD payload) {
+ try {
+ leaseManager.release(payload.getUUID());
+ } catch (LeaseNotFoundException e) {
+ LOG.warn("Completion event without active lease. UUID={}",
+ payload.getUUID());
+ }
+ trackedEventsByUUID.remove(payload.getUUID());
+ return trackedEvents.remove(payload);
+
+ }
+
+ abstract void onTimeout(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+
+ abstract void onFinished(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+
+ public List getTimeoutEvents(
+ Predicate super TIMEOUT_PAYLOAD> predicate) {
+ return trackedEventsByUUID.values().stream().filter(predicate)
+ .collect(Collectors.toList());
+ }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
new file mode 100644
index 0000000000..e73e30fcde
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import java.util.UUID;
+
+/**
+ * Event with an additional unique identifier.
+ *
+ */
+public interface IdentifiableEventPayload {
+
+ UUID getUUID();
+
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
new file mode 100644
index 0000000000..3f34a70e6e
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Dummy class for testing to collect all the received events.
+ */
+public class EventHandlerStub implements EventHandler {
+
+ private List receivedEvents = new ArrayList<>();
+
+ @Override
+ public void onMessage(PAYLOAD payload, EventPublisher publisher) {
+ receivedEvents.add(payload);
+ }
+
+ public List getReceivedEvents() {
+ return receivedEvents;
+ }
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
new file mode 100644
index 0000000000..1731350cfe
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.PriorityQueue;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static com.google.common.util.concurrent.Uninterruptibles
- .sleepUninterruptibly;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT;
-
-/**
- * This class takes a set of container reports that belong to a pool and then
- * computes the replication levels for each container.
- */
-public class ContainerSupervisor implements Closeable {
- public static final Logger LOG =
- LoggerFactory.getLogger(ContainerSupervisor.class);
-
- private final NodePoolManager poolManager;
- private final HashSet poolNames;
- private final PriorityQueue poolQueue;
- private final NodeManager nodeManager;
- private final long containerProcessingLag;
- private final AtomicBoolean runnable;
- private final ExecutorService executorService;
- private final long maxPoolWait;
- private long poolProcessCount;
- private final List inProgressPoolList;
- private final AtomicInteger threadFaultCount;
- private final int inProgressPoolMaxCount;
-
- private final ReadWriteLock inProgressPoolListLock;
-
- /**
- * Returns the number of times we have processed pools.
- * @return long
- */
- public long getPoolProcessCount() {
- return poolProcessCount;
- }
-
-
- /**
- * Constructs a class that computes Replication Levels.
- *
- * @param conf - OzoneConfiguration
- * @param nodeManager - Node Manager
- * @param poolManager - Pool Manager
- */
- public ContainerSupervisor(Configuration conf, NodeManager nodeManager,
- NodePoolManager poolManager) {
- Preconditions.checkNotNull(poolManager);
- Preconditions.checkNotNull(nodeManager);
- this.containerProcessingLag =
- conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL,
- OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT,
- TimeUnit.SECONDS
- ) * 1000;
- int maxContainerReportThreads =
- conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS,
- OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT
- );
- this.maxPoolWait =
- conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT,
- OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT,
- TimeUnit.MILLISECONDS);
- this.inProgressPoolMaxCount = conf.getInt(
- OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS,
- OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT);
- this.poolManager = poolManager;
- this.nodeManager = nodeManager;
- this.poolNames = new HashSet<>();
- this.poolQueue = new PriorityQueue<>();
- this.runnable = new AtomicBoolean(true);
- this.threadFaultCount = new AtomicInteger(0);
- this.executorService = newCachedThreadPool(
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("Container Reports Processing Thread - %d")
- .build(), maxContainerReportThreads);
- this.inProgressPoolList = new LinkedList<>();
- this.inProgressPoolListLock = new ReentrantReadWriteLock();
-
- initPoolProcessThread();
- }
-
- private ExecutorService newCachedThreadPool(ThreadFactory threadFactory,
- int maxThreads) {
- return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS,
- new LinkedBlockingQueue<>(), threadFactory);
- }
-
- /**
- * Returns the number of pools that are under process right now.
- * @return int - Number of pools that are in process.
- */
- public int getInProgressPoolCount() {
- return inProgressPoolList.size();
- }
-
- /**
- * Exits the background thread.
- */
- public void setExit() {
- this.runnable.set(false);
- }
-
- /**
- * Adds or removes pools from names that we need to process.
- *
- * There are two different cases that we need to process.
- * The case where some pools are being added and some times we have to
- * handle cases where pools are removed.
- */
- private void refreshPools() {
- List pools = this.poolManager.getNodePools();
- if (pools != null) {
-
- HashSet removedPools =
- computePoolDifference(this.poolNames, new HashSet<>(pools));
-
- HashSet addedPools =
- computePoolDifference(new HashSet<>(pools), this.poolNames);
- // TODO: Support remove pool API in pool manager so that this code
- // path can be tested. This never happens in the current code base.
- for (String poolName : removedPools) {
- for (PeriodicPool periodicPool : poolQueue) {
- if (periodicPool.getPoolName().compareTo(poolName) == 0) {
- poolQueue.remove(periodicPool);
- }
- }
- }
- // Remove the pool names that we have in the list.
- this.poolNames.removeAll(removedPools);
-
- for (String poolName : addedPools) {
- poolQueue.add(new PeriodicPool(poolName));
- }
-
- // Add to the pool names we are tracking.
- poolNames.addAll(addedPools);
- }
-
- }
-
- /**
- * Handle the case where pools are added.
- *
- * @param newPools - New Pools list
- * @param oldPool - oldPool List.
- */
- private HashSet computePoolDifference(HashSet newPools,
- Set oldPool) {
- Preconditions.checkNotNull(newPools);
- Preconditions.checkNotNull(oldPool);
- HashSet newSet = new HashSet<>(newPools);
- newSet.removeAll(oldPool);
- return newSet;
- }
-
- private void initPoolProcessThread() {
-
- /*
- * Task that runs to check if we need to start a pool processing job.
- * if so we create a pool reconciliation job and find out of all the
- * expected containers are on the nodes.
- */
- Runnable processPools = () -> {
- while (runnable.get()) {
- // Make sure that we don't have any new pools.
- refreshPools();
- while (inProgressPoolList.size() < inProgressPoolMaxCount) {
- PeriodicPool pool = poolQueue.poll();
- if (pool != null) {
- if (pool.getLastProcessedTime() + this.containerProcessingLag >
- Time.monotonicNow()) {
- LOG.debug("Not within the time window for processing: {}",
- pool.getPoolName());
- // we might over sleep here, not a big deal.
- sleepUninterruptibly(this.containerProcessingLag,
- TimeUnit.MILLISECONDS);
- }
- LOG.debug("Adding pool {} to container processing queue",
- pool.getPoolName());
- InProgressPool inProgressPool = new InProgressPool(maxPoolWait,
- pool, this.nodeManager, this.poolManager, this.executorService);
- inProgressPool.startReconciliation();
- inProgressPoolListLock.writeLock().lock();
- try {
- inProgressPoolList.add(inProgressPool);
- } finally {
- inProgressPoolListLock.writeLock().unlock();
- }
- poolProcessCount++;
- } else {
- break;
- }
- }
- sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS);
- inProgressPoolListLock.readLock().lock();
- try {
- for (InProgressPool inProgressPool : inProgressPoolList) {
- inProgressPool.finalizeReconciliation();
- poolQueue.add(inProgressPool.getPool());
- }
- } finally {
- inProgressPoolListLock.readLock().unlock();
- }
- inProgressPoolListLock.writeLock().lock();
- try {
- inProgressPoolList.clear();
- } finally {
- inProgressPoolListLock.writeLock().unlock();
- }
- }
- };
-
- // We will have only one thread for pool processing.
- Thread poolProcessThread = new Thread(processPools);
- poolProcessThread.setDaemon(true);
- poolProcessThread.setName("Pool replica thread");
- poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> {
- // Let us just restart this thread after logging a critical error.
- // if this thread is not running we cannot handle commands from SCM.
- LOG.error("Critical Error : Pool replica thread encountered an " +
- "error. Thread: {} Error Count : {}", t.toString(), e,
- threadFaultCount.incrementAndGet());
- poolProcessThread.start();
- // TODO : Add a config to restrict how many times we will restart this
- // thread in a single session.
- });
- poolProcessThread.start();
- }
-
- /**
- * Adds a container report to appropriate inProgress Pool.
- * @param containerReport -- Container report for a specific container from
- * a datanode.
- */
- public void handleContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto containerReport) {
- inProgressPoolListLock.readLock().lock();
- try {
- String poolName = poolManager.getNodePool(datanodeDetails);
- for (InProgressPool ppool : inProgressPoolList) {
- if (ppool.getPoolName().equalsIgnoreCase(poolName)) {
- ppool.handleContainerReport(datanodeDetails, containerReport);
- return;
- }
- }
- // TODO: Decide if we can do anything else with this report.
- LOG.debug("Discarding the container report for pool {}. " +
- "That pool is not currently in the pool reconciliation process." +
- " Container Name: {}", poolName, datanodeDetails);
- } catch (SCMException e) {
- LOG.warn("Skipping processing container report from datanode {}, "
- + "cause: failed to get the corresponding node pool",
- datanodeDetails.toString(), e);
- } finally {
- inProgressPoolListLock.readLock().unlock();
- }
- }
-
- /**
- * Get in process pool list, used for testing.
- * @return List of InProgressPool
- */
- @VisibleForTesting
- public List getInProcessPoolList() {
- return inProgressPoolList;
- }
-
- /**
- * Shutdown the Container Replication Manager.
- * @throws IOException if an I/O error occurs
- */
- @Override
- public void close() throws IOException {
- setExit();
- HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS);
- }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
deleted file mode 100644
index 4b547311da..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-/**
- * These are pools that are actively checking for replication status of the
- * containers.
- */
-public final class InProgressPool {
- public static final Logger LOG =
- LoggerFactory.getLogger(InProgressPool.class);
-
- private final PeriodicPool pool;
- private final NodeManager nodeManager;
- private final NodePoolManager poolManager;
- private final ExecutorService executorService;
- private final Map containerCountMap;
- private final Map processedNodeSet;
- private final long startTime;
- private ProgressStatus status;
- private AtomicInteger nodeCount;
- private AtomicInteger nodeProcessed;
- private AtomicInteger containerProcessedCount;
- private long maxWaitTime;
- /**
- * Constructs an pool that is being processed.
- * @param maxWaitTime - Maximum wait time in milliseconds.
- * @param pool - Pool that we are working against
- * @param nodeManager - Nodemanager
- * @param poolManager - pool manager
- * @param executorService - Shared Executor service.
- */
- InProgressPool(long maxWaitTime, PeriodicPool pool,
- NodeManager nodeManager, NodePoolManager poolManager,
- ExecutorService executorService) {
- Preconditions.checkNotNull(pool);
- Preconditions.checkNotNull(nodeManager);
- Preconditions.checkNotNull(poolManager);
- Preconditions.checkNotNull(executorService);
- Preconditions.checkArgument(maxWaitTime > 0);
- this.pool = pool;
- this.nodeManager = nodeManager;
- this.poolManager = poolManager;
- this.executorService = executorService;
- this.containerCountMap = new ConcurrentHashMap<>();
- this.processedNodeSet = new ConcurrentHashMap<>();
- this.maxWaitTime = maxWaitTime;
- startTime = Time.monotonicNow();
- }
-
- /**
- * Returns periodic pool.
- *
- * @return PeriodicPool
- */
- public PeriodicPool getPool() {
- return pool;
- }
-
- /**
- * We are done if we have got reports from all nodes or we have
- * done waiting for the specified time.
- *
- * @return true if we are done, false otherwise.
- */
- public boolean isDone() {
- return (nodeCount.get() == nodeProcessed.get()) ||
- (this.startTime + this.maxWaitTime) > Time.monotonicNow();
- }
-
- /**
- * Gets the number of containers processed.
- *
- * @return int
- */
- public int getContainerProcessedCount() {
- return containerProcessedCount.get();
- }
-
- /**
- * Returns the start time in milliseconds.
- *
- * @return - Start Time.
- */
- public long getStartTime() {
- return startTime;
- }
-
- /**
- * Get the number of nodes in this pool.
- *
- * @return - node count
- */
- public int getNodeCount() {
- return nodeCount.get();
- }
-
- /**
- * Get the number of nodes that we have already processed container reports
- * from.
- *
- * @return - Processed count.
- */
- public int getNodeProcessed() {
- return nodeProcessed.get();
- }
-
- /**
- * Returns the current status.
- *
- * @return Status
- */
- public ProgressStatus getStatus() {
- return status;
- }
-
- /**
- * Starts the reconciliation process for all the nodes in the pool.
- */
- public void startReconciliation() {
- List datanodeDetailsList =
- this.poolManager.getNodes(pool.getPoolName());
- if (datanodeDetailsList.size() == 0) {
- LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ",
- pool.getPoolName());
- this.status = ProgressStatus.Error;
- return;
- }
-
- nodeProcessed = new AtomicInteger(0);
- containerProcessedCount = new AtomicInteger(0);
- nodeCount = new AtomicInteger(0);
- this.status = ProgressStatus.InProgress;
- this.getPool().setLastProcessedTime(Time.monotonicNow());
- }
-
- /**
- * Queues a container Report for handling. This is done in a worker thread
- * since decoding a container report might be compute intensive . We don't
- * want to block since we have asked for bunch of container reports
- * from a set of datanodes.
- *
- * @param containerReport - ContainerReport
- */
- public void handleContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto containerReport) {
- if (status == ProgressStatus.InProgress) {
- executorService.submit(processContainerReport(datanodeDetails,
- containerReport));
- } else {
- LOG.debug("Cannot handle container report when the pool is in {} status.",
- status);
- }
- }
-
- private Runnable processContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto reports) {
- return () -> {
- if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(),
- (k) -> true)) {
- nodeProcessed.incrementAndGet();
- LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed,
- datanodeDetails.getUuid());
- for (ContainerInfo info : reports.getReportsList()) {
- containerProcessedCount.incrementAndGet();
- LOG.debug("Total Containers processed: {} Container Name: {}",
- containerProcessedCount.get(), info.getContainerID());
-
- // Update the container map with count + 1 if the key exists or
- // update the map with 1. Since this is a concurrentMap the
- // computation and update is atomic.
- containerCountMap.merge(info.getContainerID(), 1, Integer::sum);
- }
- }
- };
- }
-
- /**
- * Filter the containers based on specific rules.
- *
- * @param predicate -- Predicate to filter by
- * @return A list of map entries.
- */
- public List> filterContainer(
- Predicate> predicate) {
- return containerCountMap.entrySet().stream()
- .filter(predicate).collect(Collectors.toList());
- }
-
- /**
- * Used only for testing, calling this will abort container report
- * processing. This is very dangerous call and should not be made by any users
- */
- @VisibleForTesting
- public void setDoneProcessing() {
- nodeProcessed.set(nodeCount.get());
- }
-
- /**
- * Returns the pool name.
- *
- * @return Name of the pool.
- */
- String getPoolName() {
- return pool.getPoolName();
- }
-
- public void finalizeReconciliation() {
- status = ProgressStatus.Done;
- //TODO: Add finalizing logic. This is where actual reconciliation happens.
- }
-
- /**
- * Current status of the computing replication status.
- */
- public enum ProgressStatus {
- InProgress, Done, Error
- }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
deleted file mode 100644
index ef28aa78d0..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Periodic pool is a pool with a time stamp, this allows us to process pools
- * based on a cyclic clock.
- */
-public class PeriodicPool implements Comparable {
- private final String poolName;
- private long lastProcessedTime;
- private AtomicLong totalProcessedCount;
-
- /**
- * Constructs a periodic pool.
- *
- * @param poolName - Name of the pool
- */
- public PeriodicPool(String poolName) {
- this.poolName = poolName;
- lastProcessedTime = 0;
- totalProcessedCount = new AtomicLong(0);
- }
-
- /**
- * Get pool Name.
- * @return PoolName
- */
- public String getPoolName() {
- return poolName;
- }
-
- /**
- * Compares this object with the specified object for order. Returns a
- * negative integer, zero, or a positive integer as this object is less
- * than, equal to, or greater than the specified object.
- *
- * @param o the object to be compared.
- * @return a negative integer, zero, or a positive integer as this object is
- * less than, equal to, or greater than the specified object.
- * @throws NullPointerException if the specified object is null
- * @throws ClassCastException if the specified object's type prevents it
- * from being compared to this object.
- */
- @Override
- public int compareTo(PeriodicPool o) {
- return Long.compare(this.lastProcessedTime, o.lastProcessedTime);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- PeriodicPool that = (PeriodicPool) o;
-
- return poolName.equals(that.poolName);
- }
-
- @Override
- public int hashCode() {
- return poolName.hashCode();
- }
-
- /**
- * Returns the Total Times we have processed this pool.
- *
- * @return processed count.
- */
- public long getTotalProcessedCount() {
- return totalProcessedCount.get();
- }
-
- /**
- * Gets the last time we processed this pool.
- * @return time in milliseconds
- */
- public long getLastProcessedTime() {
- return this.lastProcessedTime;
- }
-
-
- /**
- * Sets the last processed time.
- *
- * @param lastProcessedTime - Long in milliseconds.
- */
-
- public void setLastProcessedTime(long lastProcessedTime) {
- this.lastProcessedTime = lastProcessedTime;
- }
-
- /*
- * Increments the total processed count.
- */
- public void incTotalProcessedCount() {
- this.totalProcessedCount.incrementAndGet();
- }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
deleted file mode 100644
index 7bbe2efe57..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-/*
- This package contains routines that manage replication of a container. This
- relies on container reports to understand the replication level of a
- container - UnderReplicated, Replicated, OverReplicated -- and manages the
- replication level based on that.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 4392633b16..72d7e946cc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -123,12 +123,6 @@ public interface NodeManager extends StorageContainerNodeProtocol,
*/
SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails);
- /**
- * Returns the NodePoolManager associated with the NodeManager.
- * @return NodePoolManager
- */
- NodePoolManager getNodePoolManager();
-
/**
* Wait for the heartbeat is processed by NodeManager.
* @return true if heartbeat has been processed.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
deleted file mode 100644
index 46faf9ca4d..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Interface that defines SCM NodePoolManager.
- */
-public interface NodePoolManager extends Closeable {
-
- /**
- * Add a node to a node pool.
- * @param pool - name of the node pool.
- * @param node - data node.
- */
- void addNode(String pool, DatanodeDetails node) throws IOException;
-
- /**
- * Remove a node from a node pool.
- * @param pool - name of the node pool.
- * @param node - data node.
- * @throws SCMException
- */
- void removeNode(String pool, DatanodeDetails node)
- throws SCMException;
-
- /**
- * Get a list of known node pools.
- * @return a list of known node pool names or an empty list if not node pool
- * is defined.
- */
- List getNodePools();
-
- /**
- * Get all nodes of a node pool given the name of the node pool.
- * @param pool - name of the node pool.
- * @return a list of datanode ids or an empty list if the node pool was not
- * found.
- */
- List getNodes(String pool);
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- * @param datanodeDetails - datanode ID.
- * @return node pool name if it has been assigned.
- * null if the node has not been assigned to any node pool yet.
- */
- String getNodePool(DatanodeDetails datanodeDetails) throws SCMException;
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 9ac99303bd..ff5b9f1f23 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -25,6 +25,10 @@
import org.apache.hadoop.hdds.scm.VersionInfo;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -43,11 +47,13 @@
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -101,7 +107,8 @@
* as soon as you read it.
*/
public class SCMNodeManager
- implements NodeManager, StorageContainerNodeProtocol {
+ implements NodeManager, StorageContainerNodeProtocol,
+ EventHandler {
@VisibleForTesting
static final Logger LOG =
@@ -152,9 +159,11 @@ public class SCMNodeManager
private ObjectName nmInfoBean;
// Node pool manager.
- private final SCMNodePoolManager nodePoolManager;
private final StorageContainerManager scmManager;
+ public static final Event DATANODE_COMMAND =
+ new TypedEvent<>(CommandForDatanode.class, "DATANODE_COMMAND");
+
/**
* Constructs SCM machine Manager.
*/
@@ -200,7 +209,6 @@ public SCMNodeManager(OzoneConfiguration conf, String clusterID,
registerMXBean();
- this.nodePoolManager = new SCMNodePoolManager(conf);
this.scmManager = scmManager;
}
@@ -672,7 +680,6 @@ private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) {
@Override
public void close() throws IOException {
unregisterMXBean();
- nodePoolManager.close();
executorService.shutdown();
try {
if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
@@ -753,20 +760,6 @@ public RegisteredCommand register(
LOG.info("Leaving startup chill mode.");
}
- // TODO: define node pool policy for non-default node pool.
- // For now, all nodes are added to the "DefaultNodePool" upon registration
- // if it has not been added to any node pool yet.
- try {
- if (nodePoolManager.getNodePool(datanodeDetails) == null) {
- nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL,
- datanodeDetails);
- }
- } catch (IOException e) {
- // TODO: make sure registration failure is handled correctly.
- return RegisteredCommand.newBuilder()
- .setErrorCode(ErrorCode.errorNodeNotPermitted)
- .build();
- }
// Updating Node Report, as registration is successful
updateNodeStat(datanodeDetails.getUuid(), nodeReport);
LOG.info("Data node with ID: {} Registered.",
@@ -852,11 +845,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) {
return new SCMNodeMetric(nodeStats.get(datanodeDetails.getUuid()));
}
- @Override
- public NodePoolManager getNodePoolManager() {
- return nodePoolManager;
- }
-
@Override
public Map getNodeCount() {
Map nodeCountMap = new HashMap();
@@ -875,4 +863,11 @@ public void addDatanodeCommand(UUID dnId, SCMCommand command) {
public void setStaleNodeIntervalMs(long interval) {
this.staleNodeIntervalMs = interval;
}
+
+ @Override
+ public void onMessage(CommandForDatanode commandForDatanode,
+ EventPublisher publisher) {
+ addDatanodeCommand(commandForDatanode.getDatanodeId(),
+ commandForDatanode.getCommand());
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
deleted file mode 100644
index faf330ea1d..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
- .FAILED_TO_FIND_NODE_IN_POOL;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
- .FAILED_TO_LOAD_NODEPOOL;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
-
-/**
- * SCM node pool manager that manges node pools.
- */
-public final class SCMNodePoolManager implements NodePoolManager {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(SCMNodePoolManager.class);
- private static final List EMPTY_NODE_LIST =
- new ArrayList<>();
- private static final List EMPTY_NODEPOOL_LIST = new ArrayList<>();
- public static final String DEFAULT_NODEPOOL = "DefaultNodePool";
-
- // DB that saves the node to node pool mapping.
- private MetadataStore nodePoolStore;
-
- // In-memory node pool to nodes mapping
- private HashMap> nodePools;
-
- // Read-write lock for nodepool operations
- private ReadWriteLock lock;
-
- /**
- * Construct SCMNodePoolManager class that manages node to node pool mapping.
- * @param conf - configuration.
- * @throws IOException
- */
- public SCMNodePoolManager(final OzoneConfiguration conf)
- throws IOException {
- final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
- OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
- File metaDir = getOzoneMetaDirPath(conf);
- String scmMetaDataDir = metaDir.getPath();
- File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB);
- nodePoolStore = MetadataStoreBuilder.newBuilder()
- .setConf(conf)
- .setDbFile(nodePoolDBPath)
- .setCacheSize(cacheSize * OzoneConsts.MB)
- .build();
- nodePools = new HashMap<>();
- lock = new ReentrantReadWriteLock();
- init();
- }
-
- /**
- * Initialize the in-memory store based on persist store from level db.
- * No lock is needed as init() is only invoked by constructor.
- * @throws SCMException
- */
- private void init() throws SCMException {
- try {
- nodePoolStore.iterate(null, (key, value) -> {
- try {
- DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf(
- HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key));
- String poolName = DFSUtil.bytes2String(value);
-
- Set nodePool = null;
- if (nodePools.containsKey(poolName)) {
- nodePool = nodePools.get(poolName);
- } else {
- nodePool = new HashSet<>();
- nodePools.put(poolName, nodePool);
- }
- nodePool.add(nodeId);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding node: {} to node pool: {}",
- nodeId, poolName);
- }
- } catch (IOException e) {
- LOG.warn("Can't add a datanode to node pool, continue next...");
- }
- return true;
- });
- } catch (IOException e) {
- LOG.error("Loading node pool error " + e);
- throw new SCMException("Failed to load node pool",
- FAILED_TO_LOAD_NODEPOOL);
- }
- }
-
- /**
- * Add a datanode to a node pool.
- * @param pool - name of the node pool.
- * @param node - name of the datanode.
- */
- @Override
- public void addNode(final String pool, final DatanodeDetails node)
- throws IOException {
- Preconditions.checkNotNull(pool, "pool name is null");
- Preconditions.checkNotNull(node, "node is null");
- lock.writeLock().lock();
- try {
- // add to the persistent store
- nodePoolStore.put(node.getProtoBufMessage().toByteArray(),
- DFSUtil.string2Bytes(pool));
-
- // add to the in-memory store
- Set nodePool = null;
- if (nodePools.containsKey(pool)) {
- nodePool = nodePools.get(pool);
- } else {
- nodePool = new HashSet();
- nodePools.put(pool, nodePool);
- }
- nodePool.add(node);
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- /**
- * Remove a datanode from a node pool.
- * @param pool - name of the node pool.
- * @param node - datanode id.
- * @throws SCMException
- */
- @Override
- public void removeNode(final String pool, final DatanodeDetails node)
- throws SCMException {
- Preconditions.checkNotNull(pool, "pool name is null");
- Preconditions.checkNotNull(node, "node is null");
- lock.writeLock().lock();
- try {
- // Remove from the persistent store
- byte[] kName = node.getProtoBufMessage().toByteArray();
- byte[] kData = nodePoolStore.get(kName);
- if (kData == null) {
- throw new SCMException(String.format("Unable to find node %s from" +
- " pool %s in DB.", DFSUtil.bytes2String(kName), pool),
- FAILED_TO_FIND_NODE_IN_POOL);
- }
- nodePoolStore.delete(kName);
-
- // Remove from the in-memory store
- if (nodePools.containsKey(pool)) {
- Set nodePool = nodePools.get(pool);
- nodePool.remove(node);
- } else {
- throw new SCMException(String.format("Unable to find node %s from" +
- " pool %s in MAP.", DFSUtil.bytes2String(kName), pool),
- FAILED_TO_FIND_NODE_IN_POOL);
- }
- } catch (IOException e) {
- throw new SCMException("Failed to remove node " + node.toString()
- + " from node pool " + pool, e,
- SCMException.ResultCodes.IO_EXCEPTION);
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- /**
- * Get all the node pools.
- * @return all the node pools.
- */
- @Override
- public List getNodePools() {
- lock.readLock().lock();
- try {
- if (!nodePools.isEmpty()) {
- return nodePools.keySet().stream().collect(Collectors.toList());
- } else {
- return EMPTY_NODEPOOL_LIST;
- }
- } finally {
- lock.readLock().unlock();
- }
- }
-
- /**
- * Get all datanodes of a specific node pool.
- * @param pool - name of the node pool.
- * @return all datanodes of the specified node pool.
- */
- @Override
- public List getNodes(final String pool) {
- Preconditions.checkNotNull(pool, "pool name is null");
- if (nodePools.containsKey(pool)) {
- return nodePools.get(pool).stream().collect(Collectors.toList());
- } else {
- return EMPTY_NODE_LIST;
- }
- }
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- * @param datanodeDetails - datanode ID.
- * @return node pool name if it has been assigned.
- * null if the node has not been assigned to any node pool yet.
- * TODO: Put this in a in-memory map if performance is an issue.
- */
- @Override
- public String getNodePool(final DatanodeDetails datanodeDetails)
- throws SCMException {
- Preconditions.checkNotNull(datanodeDetails, "node is null");
- try {
- byte[] result = nodePoolStore.get(
- datanodeDetails.getProtoBufMessage().toByteArray());
- return result == null ? null : DFSUtil.bytes2String(result);
- } catch (IOException e) {
- throw new SCMException("Failed to get node pool for node "
- + datanodeDetails.toString(), e,
- SCMException.ResultCodes.IO_EXCEPTION);
- }
- }
-
- /**
- * Close node pool level db store.
- * @throws IOException
- */
- @Override
- public void close() throws IOException {
- nodePoolStore.close();
- }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
index 832fcc669a..48affa4112 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.scm.pipelines;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -36,12 +35,12 @@
public abstract class PipelineManager {
private static final Logger LOG =
LoggerFactory.getLogger(PipelineManager.class);
- private final List activePipelineChannels;
- private final AtomicInteger conduitsIndex;
+ private final List activePipelines;
+ private final AtomicInteger pipelineIndex;
public PipelineManager() {
- activePipelineChannels = new LinkedList<>();
- conduitsIndex = new AtomicInteger(0);
+ activePipelines = new LinkedList<>();
+ pipelineIndex = new AtomicInteger(0);
}
/**
@@ -59,9 +58,9 @@ public synchronized final Pipeline getPipeline(
/**
* In the Ozone world, we have a very simple policy.
*
- * 1. Try to create a pipelineChannel if there are enough free nodes.
+ * 1. Try to create a pipeline if there are enough free nodes.
*
- * 2. This allows all nodes to part of a pipelineChannel quickly.
+ * 2. This allows all nodes to part of a pipeline quickly.
*
* 3. if there are not enough free nodes, return conduits in a
* round-robin fashion.
@@ -70,28 +69,28 @@ public synchronized final Pipeline getPipeline(
* Create a new placement policy that returns conduits in round robin
* fashion.
*/
- PipelineChannel pipelineChannel =
- allocatePipelineChannel(replicationFactor);
- if (pipelineChannel != null) {
- LOG.debug("created new pipelineChannel:{} for container with " +
+ Pipeline pipeline =
+ allocatePipeline(replicationFactor);
+ if (pipeline != null) {
+ LOG.debug("created new pipeline:{} for container with " +
"replicationType:{} replicationFactor:{}",
- pipelineChannel.getName(), replicationType, replicationFactor);
- activePipelineChannels.add(pipelineChannel);
+ pipeline.getPipelineName(), replicationType, replicationFactor);
+ activePipelines.add(pipeline);
} else {
- pipelineChannel =
- findOpenPipelineChannel(replicationType, replicationFactor);
- if (pipelineChannel != null) {
- LOG.debug("re-used pipelineChannel:{} for container with " +
+ pipeline =
+ findOpenPipeline(replicationType, replicationFactor);
+ if (pipeline != null) {
+ LOG.debug("re-used pipeline:{} for container with " +
"replicationType:{} replicationFactor:{}",
- pipelineChannel.getName(), replicationType, replicationFactor);
+ pipeline.getPipelineName(), replicationType, replicationFactor);
}
}
- if (pipelineChannel == null) {
- LOG.error("Get pipelineChannel call failed. We are not able to find" +
- "free nodes or operational pipelineChannel.");
+ if (pipeline == null) {
+ LOG.error("Get pipeline call failed. We are not able to find" +
+ "free nodes or operational pipeline.");
return null;
} else {
- return new Pipeline(pipelineChannel);
+ return pipeline;
}
}
@@ -106,19 +105,19 @@ protected int getReplicationCount(ReplicationFactor factor) {
}
}
- public abstract PipelineChannel allocatePipelineChannel(
+ public abstract Pipeline allocatePipeline(
ReplicationFactor replicationFactor) throws IOException;
/**
- * Find a PipelineChannel that is operational.
+ * Find a Pipeline that is operational.
*
* @return - Pipeline or null
*/
- private PipelineChannel findOpenPipelineChannel(
+ private Pipeline findOpenPipeline(
ReplicationType type, ReplicationFactor factor) {
- PipelineChannel pipelineChannel = null;
+ Pipeline pipeline = null;
final int sentinal = -1;
- if (activePipelineChannels.size() == 0) {
+ if (activePipelines.size() == 0) {
LOG.error("No Operational conduits found. Returning null.");
return null;
}
@@ -126,26 +125,26 @@ private PipelineChannel findOpenPipelineChannel(
int nextIndex = sentinal;
for (; startIndex != nextIndex; nextIndex = getNextIndex()) {
// Just walk the list in a circular way.
- PipelineChannel temp =
- activePipelineChannels
+ Pipeline temp =
+ activePipelines
.get(nextIndex != sentinal ? nextIndex : startIndex);
- // if we find an operational pipelineChannel just return that.
+ // if we find an operational pipeline just return that.
if ((temp.getLifeCycleState() == LifeCycleState.OPEN) &&
(temp.getFactor() == factor) && (temp.getType() == type)) {
- pipelineChannel = temp;
+ pipeline = temp;
break;
}
}
- return pipelineChannel;
+ return pipeline;
}
/**
- * gets the next index of the PipelineChannel to get.
+ * gets the next index of the Pipeline to get.
*
* @return index in the link list to get.
*/
private int getNextIndex() {
- return conduitsIndex.incrementAndGet() % activePipelineChannels.size();
+ return pipelineIndex.incrementAndGet() % activePipelines.size();
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index 2e56043c6b..508ca9bd3b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -20,7 +20,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
@@ -85,20 +84,20 @@ public PipelineSelector(NodeManager nodeManager, Configuration conf) {
* The first of the list will be the leader node.
* @return pipeline corresponding to nodes
*/
- public static PipelineChannel newPipelineFromNodes(
+ public static Pipeline newPipelineFromNodes(
List nodes, LifeCycleState state,
ReplicationType replicationType, ReplicationFactor replicationFactor,
String name) {
Preconditions.checkNotNull(nodes);
Preconditions.checkArgument(nodes.size() > 0);
String leaderId = nodes.get(0).getUuidString();
- PipelineChannel
- pipelineChannel = new PipelineChannel(leaderId, state, replicationType,
+ Pipeline
+ pipeline = new Pipeline(leaderId, state, replicationType,
replicationFactor, name);
for (DatanodeDetails node : nodes) {
- pipelineChannel.addMember(node);
+ pipeline.addMember(node);
}
- return pipelineChannel;
+ return pipeline;
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
index 70489b9253..ace8758234 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
@@ -20,7 +20,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.XceiverClientRatis;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -68,12 +67,12 @@ public RatisManagerImpl(NodeManager nodeManager,
}
/**
- * Allocates a new ratis PipelineChannel from the free nodes.
+ * Allocates a new ratis Pipeline from the free nodes.
*
* @param factor - One or Three
* @return PipelineChannel.
*/
- public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) {
+ public Pipeline allocatePipeline(ReplicationFactor factor) {
List newNodesList = new LinkedList<>();
List datanodes = nodeManager.getNodes(NodeState.HEALTHY);
int count = getReplicationCount(factor);
@@ -87,22 +86,20 @@ public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) {
// once a datanode has been added to a pipeline, exclude it from
// further allocations
ratisMembers.addAll(newNodesList);
- LOG.info("Allocating a new ratis pipelineChannel of size: {}", count);
+ LOG.info("Allocating a new ratis pipeline of size: {}", count);
// Start all channel names with "Ratis", easy to grep the logs.
String conduitName = PREFIX +
UUID.randomUUID().toString().substring(PREFIX.length());
- PipelineChannel pipelineChannel =
+ Pipeline pipeline=
PipelineSelector.newPipelineFromNodes(newNodesList,
LifeCycleState.OPEN, ReplicationType.RATIS, factor, conduitName);
- Pipeline pipeline =
- new Pipeline(pipelineChannel);
try (XceiverClientRatis client =
XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
client.createPipeline(pipeline.getPipelineName(), newNodesList);
} catch (IOException e) {
return null;
}
- return pipelineChannel;
+ return pipeline;
}
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
index 8268329351..e76027fb2b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.hdds.scm.pipelines.standalone;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -67,12 +67,12 @@ public StandaloneManagerImpl(NodeManager nodeManager,
/**
- * Allocates a new standalone PipelineChannel from the free nodes.
+ * Allocates a new standalone Pipeline from the free nodes.
*
* @param factor - One
- * @return PipelineChannel.
+ * @return Pipeline.
*/
- public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) {
+ public Pipeline allocatePipeline(ReplicationFactor factor) {
List newNodesList = new LinkedList<>();
List datanodes = nodeManager.getNodes(NodeState.HEALTHY);
int count = getReplicationCount(factor);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
new file mode 100644
index 0000000000..36f10a93dc
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handler for Datanode Container Report.
- */
-public class SCMDatanodeContainerReportHandler extends
- SCMDatanodeReportHandler {
-
- private static final Logger LOG = LoggerFactory.getLogger(
- SCMDatanodeContainerReportHandler.class);
-
- @Override
- public void processReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto report) throws IOException {
- LOG.trace("Processing container report from {}.", datanodeDetails);
- updateContainerReportMetrics(datanodeDetails, report);
- getSCM().getScmContainerManager()
- .processContainerReports(datanodeDetails, report);
- }
-
- /**
- * Updates container report metrics in SCM.
- *
- * @param datanodeDetails Datanode Information
- * @param reports Container Reports
- */
- private void updateContainerReportMetrics(DatanodeDetails datanodeDetails,
- ContainerReportsProto reports) {
- ContainerStat newStat = new ContainerStat();
- for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports
- .getReportsList()) {
- newStat.add(new ContainerStat(info.getSize(), info.getUsed(),
- info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(),
- info.getReadCount(), info.getWriteCount()));
- }
- // update container metrics
- StorageContainerManager.getMetrics().setLastContainerStat(newStat);
-
- // Update container stat entry, this will trigger a removal operation if it
- // exists in cache.
- String datanodeUuid = datanodeDetails.getUuidString();
- getSCM().getContainerReportCache().put(datanodeUuid, newStat);
- // update global view container metrics
- StorageContainerManager.getMetrics().incrContainerStat(newStat);
- }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java
deleted file mode 100644
index d50edff7c5..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-
-/**
- * This class is responsible for dispatching heartbeat from datanode to
- * appropriate ReportHandlers at SCM.
- * Only one handler per report is supported now, it's very easy to support
- * multiple handlers for a report.
- */
-public final class SCMDatanodeHeartbeatDispatcher {
-
- private static final Logger LOG = LoggerFactory.getLogger(
- SCMDatanodeHeartbeatDispatcher.class);
-
- /**
- * This stores Report to Handler mapping.
- */
- private final Map,
- SCMDatanodeReportHandler extends GeneratedMessage>> handlers;
-
- /**
- * Executor service which will be used for processing reports.
- */
- private final ExecutorService executorService;
-
- /**
- * Constructs SCMDatanodeHeartbeatDispatcher instance with the given
- * handlers.
- *
- * @param handlers report to report handler mapping
- */
- private SCMDatanodeHeartbeatDispatcher(Map,
- SCMDatanodeReportHandler extends GeneratedMessage>> handlers) {
- this.handlers = handlers;
- this.executorService = HadoopExecutors.newCachedThreadPool(
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("SCMDatanode Heartbeat Dispatcher Thread - %d")
- .build());
- }
-
- /**
- * Dispatches heartbeat to registered handlers.
- *
- * @param heartbeat heartbeat to be dispatched.
- */
- public void dispatch(SCMHeartbeatRequestProto heartbeat) {
- DatanodeDetails datanodeDetails = DatanodeDetails
- .getFromProtoBuf(heartbeat.getDatanodeDetails());
- if (heartbeat.hasNodeReport()) {
- processReport(datanodeDetails, heartbeat.getNodeReport());
- }
- if (heartbeat.hasContainerReport()) {
- processReport(datanodeDetails, heartbeat.getContainerReport());
- }
- }
-
- /**
- * Invokes appropriate ReportHandler and submits the task to executor
- * service for processing.
- *
- * @param datanodeDetails Datanode Information
- * @param report Report to be processed
- */
- @SuppressWarnings("unchecked")
- private void processReport(DatanodeDetails datanodeDetails,
- GeneratedMessage report) {
- executorService.submit(() -> {
- try {
- SCMDatanodeReportHandler handler = handlers.get(report.getClass());
- handler.processReport(datanodeDetails, report);
- } catch (IOException ex) {
- LOG.error("Exception wile processing report {}, from {}",
- report.getClass(), datanodeDetails, ex);
- }
- });
- }
-
- /**
- * Shuts down SCMDatanodeHeartbeatDispatcher.
- */
- public void shutdown() {
- executorService.shutdown();
- }
-
- /**
- * Returns a new Builder to construct {@link SCMDatanodeHeartbeatDispatcher}.
- *
- * @param conf Configuration to be used by SCMDatanodeHeartbeatDispatcher
- * @param scm {@link StorageContainerManager} instance to be used by report
- * handlers
- *
- * @return {@link SCMDatanodeHeartbeatDispatcher.Builder} instance
- */
- public static Builder newBuilder(Configuration conf,
- StorageContainerManager scm) {
- return new Builder(conf, scm);
- }
-
- /**
- * Builder for SCMDatanodeHeartbeatDispatcher.
- */
- public static class Builder {
-
- private final SCMDatanodeReportHandlerFactory reportHandlerFactory;
- private final Map,
- SCMDatanodeReportHandler extends GeneratedMessage>> report2handler;
-
- /**
- * Constructs SCMDatanodeHeartbeatDispatcher.Builder instance.
- *
- * @param conf Configuration object to be used.
- * @param scm StorageContainerManager instance to be used for report
- * handler initialization.
- */
- private Builder(Configuration conf, StorageContainerManager scm) {
- this.report2handler = new HashMap<>();
- this.reportHandlerFactory =
- new SCMDatanodeReportHandlerFactory(conf, scm);
- }
-
- /**
- * Adds new report handler for the given report.
- *
- * @param report Report for which handler has to be added
- *
- * @return Builder
- */
- public Builder addHandlerFor(Class extends GeneratedMessage> report) {
- report2handler.put(report, reportHandlerFactory.getHandlerFor(report));
- return this;
- }
-
- /**
- * Associates the given report handler for the given report.
- *
- * @param report Report to be associated with
- * @param handler Handler to be used for the report
- *
- * @return Builder
- */
- public Builder addHandler(Class extends GeneratedMessage> report,
- SCMDatanodeReportHandler extends GeneratedMessage> handler) {
- report2handler.put(report, handler);
- return this;
- }
-
- /**
- * Builds and returns {@link SCMDatanodeHeartbeatDispatcher} instance.
- *
- * @return SCMDatanodeHeartbeatDispatcher
- */
- public SCMDatanodeHeartbeatDispatcher build() {
- return new SCMDatanodeHeartbeatDispatcher(report2handler);
- }
- }
-
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java
deleted file mode 100644
index fb89b02215..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handles Datanode Node Report.
- */
-public class SCMDatanodeNodeReportHandler extends
- SCMDatanodeReportHandler {
-
- private static final Logger LOG = LoggerFactory.getLogger(
- SCMDatanodeNodeReportHandler.class);
-
- @Override
- public void processReport(DatanodeDetails datanodeDetails,
- NodeReportProto report) throws IOException {
- LOG.debug("Processing node report from {}.", datanodeDetails);
- //TODO: add logic to process node report.
- }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java
deleted file mode 100644
index d3386493c1..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-
-import java.io.IOException;
-
-/**
- * Datanode Report handlers should implement this interface in order to get
- * call back whenever the report is received from datanode.
- *
- * @param Type of report the handler is interested in.
- */
-public abstract class SCMDatanodeReportHandler
- implements Configurable {
-
- private Configuration config;
- private StorageContainerManager scm;
-
- /**
- * Initializes SCMDatanodeReportHandler and associates it with the given
- * StorageContainerManager instance.
- *
- * @param storageContainerManager StorageContainerManager instance to be
- * associated with.
- */
- public void init(StorageContainerManager storageContainerManager) {
- this.scm = storageContainerManager;
- }
-
- /**
- * Returns the associated StorageContainerManager instance. This will be
- * used by the ReportHandler implementations.
- *
- * @return {@link StorageContainerManager}
- */
- protected StorageContainerManager getSCM() {
- return scm;
- }
-
- @Override
- public void setConf(Configuration conf) {
- this.config = conf;
- }
-
- @Override
- public Configuration getConf() {
- return config;
- }
-
- /**
- * Processes the report received from datanode. Each ReportHandler
- * implementation is responsible for providing the logic to process the
- * report it's interested in.
- *
- * @param datanodeDetails Datanode Information
- * @param report Report to be processed
- *
- * @throws IOException In case of any exception
- */
- abstract void processReport(DatanodeDetails datanodeDetails, T report)
- throws IOException;
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java
deleted file mode 100644
index e88495fc23..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import java.util.HashMap;
-import java.util.Map;
-
-
-/**
- * Factory class to construct {@link SCMDatanodeReportHandler} given a report.
- */
-public class SCMDatanodeReportHandlerFactory {
-
- private final Configuration conf;
- private final StorageContainerManager scm;
- private final Map,
- Class extends SCMDatanodeReportHandler extends GeneratedMessage>>>
- report2handler;
-
- /**
- * Constructs {@link SCMDatanodeReportHandler} instance.
- *
- * @param conf Configuration to be passed to the
- * {@link SCMDatanodeReportHandler}
- */
- public SCMDatanodeReportHandlerFactory(Configuration conf,
- StorageContainerManager scm) {
- this.conf = conf;
- this.scm = scm;
- this.report2handler = new HashMap<>();
-
- report2handler.put(NodeReportProto.class,
- SCMDatanodeNodeReportHandler.class);
- report2handler.put(ContainerReportsProto.class,
- SCMDatanodeContainerReportHandler.class);
- }
-
- /**
- * Returns the SCMDatanodeReportHandler for the corresponding report.
- *
- * @param report report
- *
- * @return report handler
- */
- public SCMDatanodeReportHandler extends GeneratedMessage> getHandlerFor(
- Class extends GeneratedMessage> report) {
- Class extends SCMDatanodeReportHandler extends GeneratedMessage>>
- handlerClass = report2handler.get(report);
- if (handlerClass == null) {
- throw new RuntimeException("No handler found for report " + report);
- }
- SCMDatanodeReportHandler extends GeneratedMessage> instance =
- ReflectionUtils.newInstance(handlerClass, conf);
- instance.init(scm);
- return instance;
- }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
deleted file mode 100644
index fda3993096..0000000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases to verify SCMDatanodeContainerReportHandler's behavior.
- */
-public class TestSCMDatanodeContainerReportHandler {
-
- //TODO: add test cases to verify SCMDatanodeContainerReportHandler.
-
- @Test
- public void dummyTest() {
- Assert.assertTrue(true);
- }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java
deleted file mode 100644
index 5d086471c1..0000000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases to verify TestSCMDatanodeNodeReportHandler's behavior.
- */
-public class TestSCMDatanodeNodeReportHandler {
-
-
- //TODO: add test cases to verify SCMDatanodeNodeReportHandler.
-
- @Test
- public void dummyTest() {
- Assert.assertTrue(true);
- }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java
deleted file mode 100644
index 4b918f76c7..0000000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases to verify the functionality of SCMDatanodeReportHandlerFactory.
- */
-public class TestSCMDatanodeReportHandlerFactory {
-
- @Test
- public void testNodeReportHandlerConstruction() {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeReportHandlerFactory factory =
- new SCMDatanodeReportHandlerFactory(conf, null);
- Assert.assertTrue(factory.getHandlerFor(NodeReportProto.class)
- instanceof SCMDatanodeNodeReportHandler);
- }
-
- @Test
- public void testContainerReporttHandlerConstruction() {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeReportHandlerFactory factory =
- new SCMDatanodeReportHandlerFactory(conf, null);
- Assert.assertTrue(factory.getHandlerFor(ContainerReportsProto.class)
- instanceof SCMDatanodeContainerReportHandler);
- }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
deleted file mode 100644
index 4a3f59f016..0000000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-/**
- * Contains test-cases to test Datanode report handlers in SCM.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 072d821247..1a4dcd7ad2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -21,7 +21,6 @@
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.CommandQueue;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -201,10 +200,6 @@ public SCMNodeMetric getNodeStat(DatanodeDetails dd) {
return null;
}
- @Override
- public NodePoolManager getNodePoolManager() {
- return Mockito.mock(NodePoolManager.class);
- }
/**
* Wait for the heartbeat is processed by NodeManager.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
deleted file mode 100644
index ffcd752e84..0000000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Pool Manager replication mock.
- */
-public class ReplicationNodePoolManagerMock implements NodePoolManager {
-
- private final Map nodeMemberShip;
-
- /**
- * A node pool manager for testing.
- */
- public ReplicationNodePoolManagerMock() {
- nodeMemberShip = new HashMap<>();
- }
-
- /**
- * Add a node to a node pool.
- *
- * @param pool - name of the node pool.
- * @param node - data node.
- */
- @Override
- public void addNode(String pool, DatanodeDetails node) {
- nodeMemberShip.put(node, pool);
- }
-
- /**
- * Remove a node from a node pool.
- *
- * @param pool - name of the node pool.
- * @param node - data node.
- * @throws SCMException
- */
- @Override
- public void removeNode(String pool, DatanodeDetails node)
- throws SCMException {
- nodeMemberShip.remove(node);
-
- }
-
- /**
- * Get a list of known node pools.
- *
- * @return a list of known node pool names or an empty list if not node pool
- * is defined.
- */
- @Override
- public List getNodePools() {
- Set poolSet = new HashSet<>();
- for (Map.Entry entry : nodeMemberShip.entrySet()) {
- poolSet.add(entry.getValue());
- }
- return new ArrayList<>(poolSet);
-
- }
-
- /**
- * Get all nodes of a node pool given the name of the node pool.
- *
- * @param pool - name of the node pool.
- * @return a list of datanode ids or an empty list if the node pool was not
- * found.
- */
- @Override
- public List getNodes(String pool) {
- Set datanodeSet = new HashSet<>();
- for (Map.Entry entry : nodeMemberShip.entrySet()) {
- if (entry.getValue().equals(pool)) {
- datanodeSet.add(entry.getKey());
- }
- }
- return new ArrayList<>(datanodeSet);
- }
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- *
- * @param datanodeDetails DatanodeDetails.
- * @return node pool name if it has been assigned. null if the node has not
- * been assigned to any node pool yet.
- */
- @Override
- public String getNodePool(DatanodeDetails datanodeDetails) {
- return nodeMemberShip.get(datanodeDetails);
- }
-
- /**
- * Closes this stream and releases any system resources associated
- * with it. If the stream is already closed then invoking this
- * method has no effect.
- *
- *
As noted in {@link AutoCloseable#close()}, cases where the
- * close may fail require careful attention. It is strongly advised
- * to relinquish the underlying resources and to internally
- * mark the {@code Closeable} as closed, prior to throwing
- * the {@code IOException}.
- *
- * @throws IOException if an I/O error occurs
- */
- @Override
- public void close() throws IOException {
-
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
index ad7b0569f4..de9bbdab7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
@@ -19,8 +19,8 @@
import java.util.Arrays;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java
new file mode 100644
index 0000000000..e9959c192d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemMultipartUploader;
+import org.apache.hadoop.fs.MultipartUploader;
+import org.apache.hadoop.fs.MultipartUploaderFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+
+/**
+ * Support for HDFS multipart uploads, built on
+ * {@link FileSystem#concat(Path, Path[])}.
+ */
+public class DFSMultipartUploaderFactory extends MultipartUploaderFactory {
+ protected MultipartUploader createMultipartUploader(FileSystem fs,
+ Configuration conf) {
+ if (fs.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) {
+ return new FileSystemMultipartUploader(fs);
+ }
+ return null;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
index fe39df6305..5dfcc736b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index e83c8ae92b..a8c73a4220 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -31,7 +31,7 @@
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
-import org.apache.commons.lang.mutable.MutableBoolean;
+import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
index dc77a47a94..f873b84c8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.protocol;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.HadoopIllegalArgumentException;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
index d8a7de2b7b..e80f12aa0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
@@ -19,8 +19,8 @@
import java.util.Date;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
index daa77be118..6c9f27796a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
@@ -22,8 +22,8 @@
import javax.annotation.Nullable;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.InvalidRequestException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
index f1441b5727..0b851caff8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.protocol;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.crypto.CipherSuite;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
index 39489b479c..3559ab97d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.protocol;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java
index a5b95cb217..c8a2722621 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.protocol;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.Serializable;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
index 7939662ee3..8413c84df9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.protocol;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
index 6f8a8fa8f2..2262003112 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
@@ -29,7 +29,7 @@
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.lang.mutable.MutableBoolean;
+import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.net.DomainPeer;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index c2f0350bc3..9c2d2e0ecb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -34,7 +34,7 @@
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.collections.map.LinkedMap;
-import org.apache.commons.lang.mutable.MutableBoolean;
+import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
index fb0e06f4ac..b9fcadae52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
@@ -25,8 +25,8 @@
import java.util.NoSuchElementException;
import java.util.Random;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.io.nativeio.NativeIO;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 673acd6fa7..ec60a186c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -56,8 +56,6 @@
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.input.BoundedInputStream;
import org.apache.hadoop.conf.Configuration;
@@ -121,6 +119,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
+import org.apache.hadoop.util.JsonSerialization;
import org.apache.hadoop.util.KMSUtil;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.StringUtils;
@@ -172,8 +171,6 @@ public class WebHdfsFileSystem extends FileSystem
private boolean disallowFallbackToInsecureCluster;
private String restCsrfCustomHeader;
private Set restCsrfMethodsToIgnore;
- private static final ObjectReader READER =
- new ObjectMapper().readerFor(Map.class);
private DFSOpsCountStatistics storageStatistics;
@@ -476,7 +473,7 @@ private Path makeAbsolute(Path f) {
+ "\" (parsed=\"" + parsed + "\")");
}
}
- return READER.readValue(in);
+ return JsonSerialization.mapReader().readValue(in);
} finally {
in.close();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
index c6ebdd67eb..3e3fbfbd91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
@@ -18,8 +18,6 @@
*/
package org.apache.hadoop.hdfs.web.oauth2;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
import com.squareup.okhttp.OkHttpClient;
import com.squareup.okhttp.Request;
import com.squareup.okhttp.RequestBody;
@@ -28,6 +26,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.util.JsonSerialization;
import org.apache.hadoop.util.Timer;
import org.apache.http.HttpStatus;
@@ -55,8 +54,6 @@
@InterfaceStability.Evolving
public class ConfRefreshTokenBasedAccessTokenProvider
extends AccessTokenProvider {
- private static final ObjectReader READER =
- new ObjectMapper().readerFor(Map.class);
public static final String OAUTH_REFRESH_TOKEN_KEY
= "dfs.webhdfs.oauth2.refresh.token";
@@ -129,7 +126,8 @@ void refresh() throws IOException {
+ responseBody.code() + ", text = " + responseBody.toString());
}
- Map, ?> response = READER.readValue(responseBody.body().string());
+ Map, ?> response = JsonSerialization.mapReader().readValue(
+ responseBody.body().string());
String newExpiresIn = response.get(EXPIRES_IN).toString();
accessTokenTimer.setExpiresIn(newExpiresIn);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
index 5c629e0165..bfd7055990 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
@@ -18,8 +18,6 @@
*/
package org.apache.hadoop.hdfs.web.oauth2;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
import com.squareup.okhttp.OkHttpClient;
import com.squareup.okhttp.Request;
import com.squareup.okhttp.RequestBody;
@@ -28,6 +26,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.util.JsonSerialization;
import org.apache.hadoop.util.Timer;
import org.apache.http.HttpStatus;
@@ -55,8 +54,6 @@
@InterfaceStability.Evolving
public abstract class CredentialBasedAccessTokenProvider
extends AccessTokenProvider {
- private static final ObjectReader READER =
- new ObjectMapper().readerFor(Map.class);
public static final String OAUTH_CREDENTIAL_KEY
= "dfs.webhdfs.oauth2.credential";
@@ -123,7 +120,8 @@ void refresh() throws IOException {
+ responseBody.code() + ", text = " + responseBody.toString());
}
- Map, ?> response = READER.readValue(responseBody.body().string());
+ Map, ?> response = JsonSerialization.mapReader().readValue(
+ responseBody.body().string());
String newExpiresIn = response.get(EXPIRES_IN).toString();
timer.setExpiresIn(newExpiresIn);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory
new file mode 100644
index 0000000000..b153fd9924
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.hdfs.DFSMultipartUploaderFactory
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 8913f1a5ea..5c33ef6f6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -28,7 +28,7 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
index e9525e21b5..42d2c008a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
@@ -123,11 +123,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
commons-iocompile
-
- commons-lang
- commons-lang
- compile
- commons-loggingcommons-logging
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java
index 458fec203f..868476a826 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java
@@ -24,7 +24,7 @@
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.UserGroupInformation;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java
index 0959eaa34a..cf78be3190 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.federation.router;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
/**
* Base class for objects that are unique to a namespace.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
index 6b288b3555..60dbcdc10e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
@@ -30,7 +30,7 @@
import java.util.Collections;
import java.util.List;
-import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
import org.slf4j.Logger;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
index 005882ebdf..49cdf10364 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
@@ -26,7 +26,7 @@
import java.util.SortedMap;
import java.util.TreeMap;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index eaf9361e9f..fcd5ae1940 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -113,11 +113,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
commons-iocompile
-
- commons-lang
- commons-lang
- compile
- commons-loggingcommons-logging
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bc8e81f976..dde7eb79c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -581,7 +581,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_BALANCER_BLOCK_MOVE_TIMEOUT = "dfs.balancer.block-move.timeout";
public static final int DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT = 0;
public static final String DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.balancer.max-no-move-interval";
- public static final int DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute
+ public static final int DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute
+ public static final String DFS_BALANCER_MAX_ITERATION_TIME_KEY = "dfs.balancer.max-iteration-time";
+ public static final long DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT = 20 * 60 * 1000L; // 20 mins
public static final String DFS_MOVER_MOVEDWINWIDTH_KEY = "dfs.mover.movedWinWidth";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
index 89cf641a02..f8987a367b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
@@ -21,7 +21,7 @@
import java.util.Date;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
index e96fd4da60..64ac11ca23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
@@ -31,7 +31,7 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
-import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -157,7 +157,7 @@ private boolean checkStorageInfoOrSendError(JNStorage storage,
int myNsId = storage.getNamespaceID();
String myClusterId = storage.getClusterID();
- String theirStorageInfoString = StringEscapeUtils.escapeHtml(
+ String theirStorageInfoString = StringEscapeUtils.escapeHtml4(
request.getParameter(STORAGEINFO_PARAM));
if (theirStorageInfoString != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 452664a947..8f25d260b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -31,7 +31,7 @@
import java.util.List;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang.math.LongRange;
+import org.apache.commons.lang3.Range;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -842,8 +842,8 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
// Paranoid sanity check: if the new log is shorter than the log we
// currently have, we should not end up discarding any transactions
// which are already Committed.
- if (txnRange(currentSegment).containsLong(committedTxnId.get()) &&
- !txnRange(segment).containsLong(committedTxnId.get())) {
+ if (txnRange(currentSegment).contains(committedTxnId.get()) &&
+ !txnRange(segment).contains(committedTxnId.get())) {
throw new AssertionError(
"Cannot replace segment " +
TextFormat.shortDebugString(currentSegment) +
@@ -862,7 +862,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
// If we're shortening the log, update our highest txid
// used for lag metrics.
- if (txnRange(currentSegment).containsLong(highestWrittenTxId)) {
+ if (txnRange(currentSegment).contains(highestWrittenTxId)) {
updateHighestWrittenTxId(segment.getEndTxId());
}
}
@@ -906,10 +906,10 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
TextFormat.shortDebugString(newData) + " ; journal id: " + journalId);
}
- private LongRange txnRange(SegmentStateProto seg) {
+ private Range txnRange(SegmentStateProto seg) {
Preconditions.checkArgument(seg.hasEndTxId(),
"invalid segment: %s ; journal id: %s", seg, journalId);
- return new LongRange(seg.getStartTxId(), seg.getEndTxId());
+ return Range.between(seg.getStartTxId(), seg.getEndTxId());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 13d584644d..426c7ab074 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -289,13 +289,17 @@ static int getInt(Configuration conf, String key, int defaultValue) {
final int maxNoMoveInterval = conf.getInt(
DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY,
DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT);
+ final long maxIterationTime = conf.getLong(
+ DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
+ DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
this.nnc = theblockpool;
this.dispatcher =
new Dispatcher(theblockpool, p.getIncludedNodes(),
p.getExcludedNodes(), movedWinWidth, moverThreads,
dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize,
- getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval, conf);
+ getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval,
+ maxIterationTime, conf);
this.threshold = p.getThreshold();
this.policy = p.getBalancingPolicy();
this.sourceNodes = p.getSourceNodes();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 349ced13f3..060c013e37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -138,6 +138,8 @@ public class Dispatcher {
private final boolean connectToDnViaHostname;
private BlockPlacementPolicies placementPolicies;
+ private long maxIterationTime;
+
static class Allocator {
private final int max;
private int count = 0;
@@ -346,13 +348,19 @@ private boolean addTo(StorageGroup g) {
/** Dispatch the move to the proxy source & wait for the response. */
private void dispatch() {
- LOG.info("Start moving " + this);
- assert !(reportedBlock instanceof DBlockStriped);
-
Socket sock = new Socket();
DataOutputStream out = null;
DataInputStream in = null;
try {
+ if (source.isIterationOver()){
+ LOG.info("Cancel moving " + this +
+ " as iteration is already cancelled due to" +
+ " dfs.balancer.max-iteration-time is passed.");
+ throw new IOException("Block move cancelled.");
+ }
+ LOG.info("Start moving " + this);
+ assert !(reportedBlock instanceof DBlockStriped);
+
sock.connect(
NetUtils.createSocketAddr(target.getDatanodeInfo().
getXferAddr(Dispatcher.this.connectToDnViaHostname)),
@@ -760,7 +768,10 @@ private Source(StorageType storageType, long maxSize2Move, DDatanode dn) {
* Check if the iteration is over
*/
public boolean isIterationOver() {
- return (Time.monotonicNow()-startTime > MAX_ITERATION_TIME);
+ if (maxIterationTime < 0){
+ return false;
+ }
+ return (Time.monotonicNow()-startTime > maxIterationTime);
}
/** Add a task */
@@ -908,8 +919,6 @@ private boolean shouldFetchMoreBlocks() {
return blocksToReceive > 0;
}
- private static final long MAX_ITERATION_TIME = 20 * 60 * 1000L; // 20 mins
-
/**
* This method iteratively does the following: it first selects a block to
* move, then sends a request to the proxy source to start the block move
@@ -990,7 +999,7 @@ private void dispatchBlocks(long delay) {
}
if (isIterationOver()) {
- LOG.info("The maximum iteration time (" + MAX_ITERATION_TIME/1000
+ LOG.info("The maximum iteration time (" + maxIterationTime/1000
+ " seconds) has been reached. Stopping " + this);
}
}
@@ -1013,14 +1022,14 @@ public Dispatcher(NameNodeConnector nnc, Set includedNodes,
int maxNoMoveInterval, Configuration conf) {
this(nnc, includedNodes, excludedNodes, movedWinWidth,
moverThreads, dispatcherThreads, maxConcurrentMovesPerNode,
- 0L, 0L, 0, maxNoMoveInterval, conf);
+ 0L, 0L, 0, maxNoMoveInterval, -1, conf);
}
Dispatcher(NameNodeConnector nnc, Set includedNodes,
Set excludedNodes, long movedWinWidth, int moverThreads,
int dispatcherThreads, int maxConcurrentMovesPerNode,
- long getBlocksSize, long getBlocksMinBlockSize,
- int blockMoveTimeout, int maxNoMoveInterval, Configuration conf) {
+ long getBlocksSize, long getBlocksMinBlockSize, int blockMoveTimeout,
+ int maxNoMoveInterval, long maxIterationTime, Configuration conf) {
this.nnc = nnc;
this.excludedNodes = excludedNodes;
this.includedNodes = includedNodes;
@@ -1047,6 +1056,7 @@ public Dispatcher(NameNodeConnector nnc, Set includedNodes,
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null);
+ this.maxIterationTime = maxIterationTime;
}
public DistributedFileSystem getDistributedFileSystem() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 76a77816d5..72ea1c0692 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4576,7 +4576,7 @@ private void scanAndCompactStorages() throws InterruptedException {
datanodesAndStorages.add(node.getDatanodeUuid());
datanodesAndStorages.add(storage.getStorageID());
}
- LOG.info("StorageInfo TreeSet fill ratio {} : {}{}",
+ LOG.debug("StorageInfo TreeSet fill ratio {} : {}{}",
storage.getStorageID(), ratio,
(ratio < storageInfoDefragmentRatio)
? " (queued for defragmentation)" : "");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index ab9743cffc..39665e3e95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -38,7 +38,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -165,7 +165,7 @@ static class ScanInfoPerBlockPool extends
/**
* Merges {@code that} ScanInfoPerBlockPool into this one
*
- * @param the ScanInfoPerBlockPool to merge
+ * @param that ScanInfoPerBlockPool to merge
*/
public void addAll(ScanInfoPerBlockPool that) {
if (that == null) return;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
index f70d4afe29..767b150e1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
@@ -43,7 +43,7 @@
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 8eacdecf7b..968a5a77f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -26,8 +26,8 @@
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.text.StrBuilder;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.text.StrBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CommonConfigurationKeys;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index b765885e0f..90cc0c4800 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -22,8 +22,8 @@
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.text.StrBuilder;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.text.StrBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
index 58ef5ce51a..5f4e0f716f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
@@ -24,8 +24,8 @@
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.text.StrBuilder;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.text.StrBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index d06cd1cdef..5604a218d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -34,8 +34,8 @@
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 7160b861f7..769c13757b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -19,7 +19,7 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.XAttr;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a8c1926051..f94f6d072b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.commons.lang.StringEscapeUtils.escapeJava;
+import static org.apache.commons.lang3.StringEscapeUtils.escapeJava;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT;
@@ -1827,7 +1827,7 @@ public BatchedListEntries getFilesBlockingDecom(long prevId,
INodeFile inodeFile = ucFile.asFile();
String fullPathName = inodeFile.getFullPathName();
- if (org.apache.commons.lang.StringUtils.isEmpty(path)
+ if (org.apache.commons.lang3.StringUtils.isEmpty(path)
|| fullPathName.startsWith(path)) {
openFileEntries.add(new OpenFileEntry(inodeFile.getId(),
inodeFile.getFullPathName(),
@@ -2383,7 +2383,7 @@ private HdfsFileStatus startFileInt(String src,
boolean shouldReplicate = flag.contains(CreateFlag.SHOULD_REPLICATE);
if (shouldReplicate &&
- (!org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName))) {
+ (!org.apache.commons.lang3.StringUtils.isEmpty(ecPolicyName))) {
throw new HadoopIllegalArgumentException("SHOULD_REPLICATE flag and " +
"ecPolicyName are exclusive parameters. Set both is not allowed!");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 900f8a2291..5992e54124 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -107,6 +107,8 @@ public Long initialValue() {
private static final String WRITE_LOCK_METRIC_PREFIX = "FSNWriteLock";
private static final String LOCK_METRIC_SUFFIX = "Nanos";
+ private static final String OVERALL_METRIC_NAME = "Overall";
+
FSNamesystemLock(Configuration conf,
MutableRatesWithAggregation detailedHoldTimeMetrics) {
this(conf, detailedHoldTimeMetrics, new Timer());
@@ -320,12 +322,17 @@ public int getQueueLength() {
*/
private void addMetric(String operationName, long value, boolean isWrite) {
if (metricsEnabled) {
- String metricName =
- (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) +
- org.apache.commons.lang.StringUtils.capitalize(operationName) +
- LOCK_METRIC_SUFFIX;
- detailedHoldTimeMetrics.add(metricName, value);
+ String opMetric = getMetricName(operationName, isWrite);
+ detailedHoldTimeMetrics.add(opMetric, value);
+
+ String overallMetric = getMetricName(OVERALL_METRIC_NAME, isWrite);
+ detailedHoldTimeMetrics.add(overallMetric, value);
}
}
+ private static String getMetricName(String operationName, boolean isWrite) {
+ return (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) +
+ org.apache.commons.lang3.StringUtils.capitalize(operationName) +
+ LOCK_METRIC_SUFFIX;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
index 2f9bc370da..8392463d94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
index 9b23e09e49..0baf99d994 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
@@ -18,9 +18,9 @@
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.commons.lang.builder.CompareToBuilder;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.CompareToBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index 2719c8857e..4d61d0f95b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode.top.metrics;
import com.google.common.collect.Lists;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
index d8cbfc6b2e..9781ea14dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
@@ -22,7 +22,7 @@
import java.util.LinkedList;
import java.util.List;
-import org.apache.commons.lang.WordUtils;
+import org.apache.commons.lang3.text.WordUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
index bec44a99e9..280a2d775c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.util;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang3.ArrayUtils;
import java.util.Arrays;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index b55421c162..146ae6c9c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3540,6 +3540,16 @@
+
+ dfs.balancer.max-iteration-time
+ 1200000
+
+ Maximum amount of time while an iteration can be run by the Balancer. After
+ this time the Balancer will stop the iteration, and reevaluate the work
+ needs to be done to Balance the cluster. The default value is 20 minutes.
+
+
+
dfs.block.invalidate.limit1000
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
index 9cd46c191d..417d31ba52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
@@ -35,8 +35,8 @@
import java.util.concurrent.TimeoutException;
import org.apache.commons.collections.map.LinkedMap;
-import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.lang.mutable.MutableBoolean;
+import org.apache.commons.lang3.SystemUtils;
+import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
index 32d960ad6f..7027f3bc6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
@@ -26,7 +26,7 @@
import java.util.regex.Pattern;
import com.google.common.collect.Ordering;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java
new file mode 100644
index 0000000000..96c50938b3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSMultipartUploader.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+import java.io.IOException;
+
+public class TestHDFSMultipartUploader
+ extends AbstractSystemMultipartUploaderTest {
+
+ private static MiniDFSCluster cluster;
+ private Path tmp;
+
+ @Rule
+ public TestName name = new TestName();
+
+ @BeforeClass
+ public static void init() throws IOException {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf,
+ GenericTestUtils.getRandomizedTestDir())
+ .numDataNodes(1)
+ .build();
+ cluster.waitClusterUp();
+ }
+
+ @AfterClass
+ public static void cleanup() throws IOException {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ @Before
+ public void setup() throws IOException {
+ tmp = new Path(cluster.getFileSystem().getWorkingDirectory(),
+ name.getMethodName());
+ cluster.getFileSystem().mkdirs(tmp);
+ }
+
+ @Override
+ public FileSystem getFS() throws IOException {
+ return cluster.getFileSystem();
+ }
+
+ @Override
+ public Path getBaseTestPath() {
+ return tmp;
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
index 72fc6e6274..7544835c7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.fs;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 63199f31dd..e6a2a00252 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -81,7 +81,6 @@
import com.google.common.collect.Maps;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.UnhandledException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -2278,7 +2277,8 @@ public Boolean get() {
", current value = " + currentValue);
return currentValue == expectedValue;
} catch (Exception e) {
- throw new UnhandledException("Test failed due to unexpected exception", e);
+ throw new RuntimeException(
+ "Test failed due to unexpected exception", e);
}
}
}, 1000, 60000);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index c352dc99a2..b19bdeab57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -37,7 +37,7 @@
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.log4j.Level;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index c0a595bcb7..42b4257d71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -38,7 +38,7 @@
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
-import org.apache.commons.lang.text.StrBuilder;
+import org.apache.commons.lang3.text.StrBuilder;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
index cc456b244f..7aa9f2362d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
@@ -28,7 +28,7 @@
import java.util.ArrayList;
import java.util.Arrays;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
index e2426907ca..3463f57379 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
@@ -28,7 +28,7 @@
import java.util.Set;
import com.google.common.collect.Sets;
-import org.apache.commons.lang.ClassUtils;
+import org.apache.commons.lang3.ClassUtils;
import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer;
import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 0b6bc6adfd..c87a6d17e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -19,7 +19,7 @@
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
-import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
index 7c4763c13f..db97c02e09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
@@ -107,6 +107,8 @@ public class TestSecureEncryptionZoneWithKMS {
// MiniKMS
private static MiniKMS miniKMS;
private final String testKey = "test_key";
+ private static boolean testKeyCreated = false;
+ private static final long AUTH_TOKEN_VALIDITY = 1;
// MiniDFS
private MiniDFSCluster cluster;
@@ -128,7 +130,7 @@ public static File getTestDir() throws Exception {
}
@Rule
- public Timeout timeout = new Timeout(30000);
+ public Timeout timeout = new Timeout(120000);
@BeforeClass
public static void init() throws Exception {
@@ -215,6 +217,9 @@ public static void init() throws Exception {
"HTTP/localhost");
kmsConf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
kmsConf.set("hadoop.kms.acl.GENERATE_EEK", "hdfs");
+ // set kms auth token expiration low for testCreateZoneAfterAuthTokenExpiry
+ kmsConf.setLong("hadoop.kms.authentication.token.validity",
+ AUTH_TOKEN_VALIDITY);
Writer writer = new FileWriter(kmsFile);
kmsConf.writeXml(writer);
@@ -260,7 +265,10 @@ public void setup() throws Exception {
cluster.waitActive();
// Create a test key
- DFSTestUtil.createKey(testKey, cluster, conf);
+ if (!testKeyCreated) {
+ DFSTestUtil.createKey(testKey, cluster, conf);
+ testKeyCreated = true;
+ }
}
@After
@@ -307,4 +315,26 @@ public Void run() throws IOException {
}
});
}
+
+ @Test
+ public void testCreateZoneAfterAuthTokenExpiry() throws Exception {
+ final UserGroupInformation ugi = UserGroupInformation
+ .loginUserFromKeytabAndReturnUGI(hdfsPrincipal, keytab);
+ LOG.info("Created ugi: {} ", ugi);
+
+ ugi.doAs((PrivilegedExceptionAction
-
-
- hdds
-
- false
-
-
-
- org.apache.hadoop
- hadoop-ozone-filesystem
- compile
- ${project.version}
-
-
-
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index f421e580ba..dca59d31af 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -67,15 +67,4 @@
-
-
- hdds
-
- false
-
-
- hadoop-ozone
-
-
-
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
index eea81fe44d..a863910861 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -102,6 +102,26 @@ public static ResourceRequest newInstance(Priority priority, String hostName,
.build();
}
+ /**
+ * Clone a ResourceRequest object (shallow copy). Please keep it loaded with
+ * all (new) fields
+ *
+ * @param rr the object to copy from
+ * @return the copied object
+ */
+ @Public
+ @Evolving
+ public static ResourceRequest clone(ResourceRequest rr) {
+ // Please keep it loaded with all (new) fields
+ return ResourceRequest.newBuilder().priority(rr.getPriority())
+ .resourceName(rr.getResourceName()).capability(rr.getCapability())
+ .numContainers(rr.getNumContainers())
+ .relaxLocality(rr.getRelaxLocality())
+ .nodeLabelExpression(rr.getNodeLabelExpression())
+ .executionTypeRequest(rr.getExecutionTypeRequest())
+ .allocationRequestId(rr.getAllocationRequestId()).build();
+ }
+
@Public
@Unstable
public static ResourceRequestBuilder newBuilder() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5292a25053..5842d64357 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -240,7 +240,7 @@ private static void addDeprecatedKeys() {
public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" +
DEFAULT_RM_SCHEDULER_PORT;
- /** Miniumum request grant-able by the RM scheduler. */
+ /** Minimum request grant-able by the RM scheduler. */
public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB =
YARN_PREFIX + "scheduler.minimum-allocation-mb";
public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 1d2d719d32..5b3c72cae4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -28,6 +28,8 @@ public interface RestApiErrorMessages {
"than 63 characters";
String ERROR_COMPONENT_NAME_INVALID =
"Component name must be no more than %s characters: %s";
+ String ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME =
+ "Component name %s must not be same as service name %s";
String ERROR_USER_NAME_INVALID =
"User name must be no more than 63 characters";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index 549927327d..705e04065c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -143,6 +143,11 @@ public static void validateAndResolveService(Service service,
throw new IllegalArgumentException(String.format(RestApiErrorMessages
.ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName()));
}
+ if (service.getName().equals(comp.getName())) {
+ throw new IllegalArgumentException(String.format(RestApiErrorMessages
+ .ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME,
+ comp.getName(), service.getName()));
+ }
if (componentNames.contains(comp.getName())) {
throw new IllegalArgumentException("Component name collision: " +
comp.getName());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
index 243c6b3a61..ae031d4aad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
@@ -333,6 +333,24 @@ public void testDuplicateComponents() throws IOException {
}
}
+ @Test
+ public void testComponentNameSameAsServiceName() throws IOException {
+ SliderFileSystem sfs = ServiceTestUtils.initMockFs();
+ Service app = new Service();
+ app.setName("test");
+ app.setVersion("v1");
+ app.addComponent(createValidComponent("test"));
+
+ //component name same as service name
+ try {
+ ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
+ Assert.fail(EXCEPTION_PREFIX + "component name matches service name");
+ } catch (IllegalArgumentException e) {
+ assertEquals("Component name test must not be same as service name test",
+ e.getMessage());
+ }
+ }
+
@Test
public void testExternalDuplicateComponent() throws IOException {
Service ext = createValidApplication("comp1");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 36c3cf1d4e..7265d24ac0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -451,16 +451,7 @@ private List cloneAsks() {
for(ResourceRequest r : ask) {
// create a copy of ResourceRequest as we might change it while the
// RPC layer is using it to send info across
- ResourceRequest rr =
- ResourceRequest.newBuilder().priority(r.getPriority())
- .resourceName(r.getResourceName()).capability(r.getCapability())
- .numContainers(r.getNumContainers())
- .relaxLocality(r.getRelaxLocality())
- .nodeLabelExpression(r.getNodeLabelExpression())
- .executionTypeRequest(r.getExecutionTypeRequest())
- .allocationRequestId(r.getAllocationRequestId())
- .build();
- askList.add(rr);
+ askList.add(ResourceRequest.clone(r));
}
return askList;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
index 11d703d890..51048660ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
@@ -570,11 +570,7 @@ public synchronized Allocation allocate(
ContainerUpdates updateRequests) {
List askCopy = new ArrayList();
for (ResourceRequest req : ask) {
- ResourceRequest reqCopy =
- ResourceRequest.newInstance(req.getPriority(),
- req.getResourceName(), req.getCapability(),
- req.getNumContainers(), req.getRelaxLocality());
- askCopy.add(reqCopy);
+ askCopy.add(ResourceRequest.clone(req));
}
lastAsk = ask;
lastRelease = release;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 70ff47b746..17e43cacda 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -18,41 +18,9 @@
package org.apache.hadoop.yarn.client.api.impl;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.lang.Thread.State;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataInputByteBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
@@ -74,7 +42,6 @@
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -92,7 +59,6 @@
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.AHSClient;
-import org.apache.hadoop.yarn.client.api.TimelineClient;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -100,7 +66,6 @@
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase;
@@ -115,8 +80,28 @@
import org.mockito.ArgumentCaptor;
import org.slf4j.event.Level;
+import java.io.IOException;
+import java.lang.Thread.State;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
/**
- * This class is to test class {@link YarnClient) and {@link YarnClientImpl}.
+ * This class is to test class {@link YarnClient).
*/
public class TestYarnClient extends ParameterizedSchedulerTestBase {
@@ -146,17 +131,6 @@ public void testClientStop() {
rm.stop();
}
- @Test
- public void testStartWithTimelineV15() throws Exception {
- Configuration conf = getConf();
- conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
- conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f);
- YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient();
- client.init(conf);
- client.start();
- client.stop();
- }
-
@Test
public void testStartTimelineClientWithErrors()
throws Exception {
@@ -413,7 +387,7 @@ public void testApplicationType() throws Exception {
RMApp app = rm.submitApp(2000);
RMApp app1 =
rm.submitApp(200, "name", "user",
- new HashMap(), false, "default", -1,
+ new HashMap<>(), false, "default", -1,
null, "MAPREDUCE");
Assert.assertEquals("YARN", app.getApplicationType());
Assert.assertEquals("MAPREDUCE", app1.getApplicationType());
@@ -427,7 +401,7 @@ public void testApplicationTypeLimit() throws Exception {
rm.start();
RMApp app1 =
rm.submitApp(200, "name", "user",
- new HashMap(), false, "default", -1,
+ new HashMap<>(), false, "default", -1,
null, "MAPREDUCE-LENGTH-IS-20");
Assert.assertEquals("MAPREDUCE-LENGTH-IS-", app1.getApplicationType());
rm.stop();
@@ -444,7 +418,7 @@ public void testGetApplications() throws YarnException, IOException {
List reports = client.getApplications();
Assert.assertEquals(reports, expectedReports);
- Set appTypes = new HashSet();
+ Set appTypes = new HashSet<>();
appTypes.add("YARN");
appTypes.add("NON-YARN");
@@ -601,7 +575,7 @@ public void testGetLabelsToNodes() throws YarnException, IOException {
Assert.assertEquals(labelsToNodes.size(), 3);
// Get labels to nodes for selected labels
- Set setLabels = new HashSet(Arrays.asList("x", "z"));
+ Set setLabels = new HashSet<>(Arrays.asList("x", "z"));
expectedLabelsToNodes =
((MockYarnClient)client).getLabelsToNodesMap(setLabels);
labelsToNodes = client.getLabelsToNodes(setLabels);
@@ -633,12 +607,12 @@ private static class MockYarnClient extends YarnClientImpl {
private ApplicationReport mockReport;
private List reports;
- private HashMap> attempts =
- new HashMap>();
- private HashMap> containers =
- new HashMap>();
+ private HashMap> attempts =
+ new HashMap<>();
+ private HashMap> containers =
+ new HashMap<>();
private HashMap> containersFromAHS =
- new HashMap>();
+ new HashMap<>();
GetApplicationsResponse mockAppResponse =
mock(GetApplicationsResponse.class);
@@ -739,9 +713,9 @@ private List createAppReports() {
"user", "queue", "appname", "host", 124, null,
YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
- List applicationReports = new ArrayList();
+ List applicationReports = new ArrayList<>();
applicationReports.add(newApplicationReport);
- List appAttempts = new ArrayList();
+ List appAttempts = new ArrayList<>();
ApplicationAttemptReport attempt = ApplicationAttemptReport.newInstance(
ApplicationAttemptId.newInstance(applicationId, 1),
"host",
@@ -767,7 +741,7 @@ private List createAppReports() {
appAttempts.add(attempt1);
attempts.put(applicationId, appAttempts);
- List containerReports = new ArrayList();
+ List containerReports = new ArrayList<>();
ContainerReport container = ContainerReport.newInstance(
ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null,
NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
@@ -785,7 +759,7 @@ private List createAppReports() {
//add containers to be sent from AHS
List containerReportsForAHS =
- new ArrayList();
+ new ArrayList<>();
container = ContainerReport.newInstance(
ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null,
@@ -843,7 +817,7 @@ private List getApplicationReports(
List applicationReports,
Set applicationTypes, EnumSet applicationStates) {
- List appReports = new ArrayList();
+ List appReports = new ArrayList<>();
for (ApplicationReport appReport : applicationReports) {
if (applicationTypes != null && !applicationTypes.isEmpty()) {
if (!applicationTypes.contains(appReport.getApplicationType())) {
@@ -878,9 +852,9 @@ public Map> getLabelsToNodes(Set labels)
}
public Map> getLabelsToNodesMap() {
- Map> map = new HashMap>();
+ Map> map = new HashMap<>();
Set setNodeIds =
- new HashSet(Arrays.asList(
+ new HashSet<>(Arrays.asList(
NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0)));
map.put("x", setNodeIds);
map.put("y", setNodeIds);
@@ -889,8 +863,8 @@ public Map> getLabelsToNodesMap() {
}
public Map> getLabelsToNodesMap(Set labels) {
- Map> map = new HashMap>();
- Set setNodeIds = new HashSet(Arrays.asList(
+ Map> map = new HashMap<>();
+ Set setNodeIds = new HashSet<>(Arrays.asList(
NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0)));
for (String label : labels) {
map.put(label, setNodeIds);
@@ -907,8 +881,8 @@ public Map> getNodeToLabels() throws YarnException,
}
public Map> getNodeToLabelsMap() {
- Map> map = new HashMap>();
- Set setNodeLabels = new HashSet(Arrays.asList("x", "y"));
+ Map> map = new HashMap<>();
+ Set setNodeLabels = new HashSet<>(Arrays.asList("x", "y"));
map.put(NodeId.newInstance("host", 0), setNodeLabels);
return map;
}
@@ -985,7 +959,7 @@ public List getContainersReport(
private ContainerReport getContainer(
ContainerId containerId,
HashMap> containersToAppAttemptMapping)
- throws YarnException, IOException {
+ throws YarnException {
List containersForAppAttempt =
containersToAppAttemptMapping.get(containerId
.getApplicationAttemptId());
@@ -1119,174 +1093,6 @@ private void waitTillAccepted(YarnClient rmClient, ApplicationId appId,
Assert.assertEquals(unmanagedApplication, report.isUnmanagedApp());
}
- @Test
- public void testAsyncAPIPollTimeout() {
- testAsyncAPIPollTimeoutHelper(null, false);
- testAsyncAPIPollTimeoutHelper(0L, true);
- testAsyncAPIPollTimeoutHelper(1L, true);
- }
-
- private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout,
- boolean expectedTimeoutEnforcement) {
- YarnClientImpl client = new YarnClientImpl();
- try {
- Configuration conf = getConf();
- if (valueForTimeout != null) {
- conf.setLong(
- YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS,
- valueForTimeout);
- }
-
- client.init(conf);
-
- Assert.assertEquals(
- expectedTimeoutEnforcement, client.enforceAsyncAPITimeout());
- } finally {
- IOUtils.closeQuietly(client);
- }
- }
-
- @Test
- public void testBestEffortTimelineDelegationToken()
- throws Exception {
- Configuration conf = getConf();
- conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
- SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
-
- YarnClientImpl client = spy(new YarnClientImpl() {
-
- @Override
- TimelineClient createTimelineClient() throws IOException, YarnException {
- timelineClient = mock(TimelineClient.class);
- when(timelineClient.getDelegationToken(any(String.class)))
- .thenThrow(new RuntimeException("Best effort test exception"));
- return timelineClient;
- }
- });
-
- client.init(conf);
- conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT,
- true);
- client.serviceInit(conf);
- client.getTimelineDelegationToken();
-
- try {
- conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false);
- client.serviceInit(conf);
- client.getTimelineDelegationToken();
- Assert.fail("Get delegation token should have thrown an exception");
- } catch (IOException e) {
- // Success
- }
- }
-
- @Test
- public void testAutomaticTimelineDelegationTokenLoading()
- throws Exception {
- Configuration conf = getConf();
- conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
- SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
- TimelineDelegationTokenIdentifier timelineDT =
- new TimelineDelegationTokenIdentifier();
- final Token dToken =
- new Token(
- timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text());
- // create a mock client
- YarnClientImpl client = spy(new YarnClientImpl() {
-
- @Override
- TimelineClient createTimelineClient() throws IOException, YarnException {
- timelineClient = mock(TimelineClient.class);
- when(timelineClient.getDelegationToken(any(String.class)))
- .thenReturn(dToken);
- return timelineClient;
- }
-
-
- @Override
- protected void serviceStart() throws Exception {
- rmClient = mock(ApplicationClientProtocol.class);
- }
-
- @Override
- protected void serviceStop() throws Exception {
- }
-
- @Override
- public ApplicationReport getApplicationReport(ApplicationId appId) {
- ApplicationReport report = mock(ApplicationReport.class);
- when(report.getYarnApplicationState())
- .thenReturn(YarnApplicationState.RUNNING);
- return report;
- }
-
- @Override
- public boolean isSecurityEnabled() {
- return true;
- }
- });
- client.init(conf);
- client.start();
- try {
- // when i == 0, timeline DT already exists, no need to get one more
- // when i == 1, timeline DT doesn't exist, need to get one more
- for (int i = 0; i < 2; ++i) {
- ApplicationSubmissionContext context =
- mock(ApplicationSubmissionContext.class);
- ApplicationId applicationId = ApplicationId.newInstance(0, i + 1);
- when(context.getApplicationId()).thenReturn(applicationId);
- DataOutputBuffer dob = new DataOutputBuffer();
- Credentials credentials = new Credentials();
- if (i == 0) {
- credentials.addToken(client.timelineService, dToken);
- }
- credentials.writeTokenStorageToStream(dob);
- ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
- ContainerLaunchContext clc = ContainerLaunchContext.newInstance(
- null, null, null, null, tokens, null);
- when(context.getAMContainerSpec()).thenReturn(clc);
- client.submitApplication(context);
- if (i == 0) {
- // GetTimelineDelegationToken shouldn't be called
- verify(client, never()).getTimelineDelegationToken();
- }
- // In either way, token should be there
- credentials = new Credentials();
- DataInputByteBuffer dibb = new DataInputByteBuffer();
- tokens = clc.getTokens();
- if (tokens != null) {
- dibb.reset(tokens);
- credentials.readTokenStorageStream(dibb);
- tokens.rewind();
- }
- Collection> dTokens =
- credentials.getAllTokens();
- Assert.assertEquals(1, dTokens.size());
- Assert.assertEquals(dToken, dTokens.iterator().next());
- }
- } finally {
- client.stop();
- }
- }
-
- @Test
- public void testParseTimelineDelegationTokenRenewer() throws Exception {
- // Client side
- YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient();
- Configuration conf = getConf();
- conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
- conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/_HOST@EXAMPLE.COM");
- conf.set(
- YarnConfiguration.RM_ADDRESS, "localhost:8188");
- try {
- client.init(conf);
- client.start();
- Assert.assertEquals("rm/localhost@EXAMPLE.COM", client.timelineDTRenewer);
- } finally {
- client.stop();
- }
- }
-
@Test(timeout = 30000, expected = ApplicationNotFoundException.class)
public void testShouldNotRetryForeverForNonNetworkExceptions() throws Exception {
YarnConfiguration conf = getConf();
@@ -1353,38 +1159,35 @@ private void testCreateTimelineClientWithError(
timelineClientBestEffort);
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION,
timelineVersion);
- YarnClient client = new MockYarnClient();
- if (client instanceof YarnClientImpl) {
- YarnClientImpl impl = (YarnClientImpl) client;
- YarnClientImpl spyClient = spy(impl);
- when(spyClient.createTimelineClient()).thenThrow(mockErr);
- CreateTimelineClientErrorVerifier verifier = spy(errVerifier);
- spyClient.init(conf);
- spyClient.start();
+ MockYarnClient client = new MockYarnClient();
+ MockYarnClient spyClient = spy(client);
+ when(spyClient.createTimelineClient()).thenThrow(mockErr);
+ CreateTimelineClientErrorVerifier verifier = spy(errVerifier);
+ spyClient.init(conf);
+ spyClient.start();
- ApplicationSubmissionContext context =
- mock(ApplicationSubmissionContext.class);
- ContainerLaunchContext containerContext =
- mock(ContainerLaunchContext.class);
- ApplicationId applicationId =
- ApplicationId.newInstance(System.currentTimeMillis(), 1);
- when(containerContext.getTokens()).thenReturn(null);
- when(context.getApplicationId()).thenReturn(applicationId);
- when(spyClient.isSecurityEnabled()).thenReturn(true);
- when(context.getAMContainerSpec()).thenReturn(containerContext);
+ ApplicationSubmissionContext context =
+ mock(ApplicationSubmissionContext.class);
+ ContainerLaunchContext containerContext =
+ mock(ContainerLaunchContext.class);
+ ApplicationId applicationId =
+ ApplicationId.newInstance(System.currentTimeMillis(), 1);
+ when(containerContext.getTokens()).thenReturn(null);
+ when(context.getApplicationId()).thenReturn(applicationId);
+ when(spyClient.isSecurityEnabled()).thenReturn(true);
+ when(context.getAMContainerSpec()).thenReturn(containerContext);
- try {
- spyClient.submitApplication(context);
- } catch (Throwable e) {
- verifier.verifyError(e);
- } finally {
- // Make sure the verifier runs with expected times
- // This is required because in case throwable is swallowed
- // and verifyError never gets the chance to run
- verify(verifier, times(verifier.getExpectedTimes()))
- .verifyError(any(Throwable.class));
- spyClient.stop();
- }
+ try {
+ spyClient.submitApplication(context);
+ } catch (Throwable e) {
+ verifier.verifyError(e);
+ } finally {
+ // Make sure the verifier runs with expected times
+ // This is required because in case throwable is swallowed
+ // and verifyError never gets the chance to run
+ verify(verifier, times(verifier.getExpectedTimes()))
+ .verifyError(any(Throwable.class));
+ spyClient.stop();
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java
new file mode 100644
index 0000000000..dd0aa5c1a0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientImpl.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager
+ .ParameterizedSchedulerTestBase;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * This class is to test class {@link YarnClientImpl ).
+ */
+public class TestYarnClientImpl extends ParameterizedSchedulerTestBase {
+
+ public TestYarnClientImpl(SchedulerType type) throws IOException {
+ super(type);
+ }
+
+ @Before
+ public void setup() {
+ QueueMetrics.clearQueueMetrics();
+ DefaultMetricsSystem.setMiniClusterMode(true);
+ }
+
+ @Test
+ public void testStartWithTimelineV15() {
+ Configuration conf = getConf();
+ conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+ conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f);
+ YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient();
+ client.init(conf);
+ client.start();
+ client.stop();
+ }
+
+ @Test
+ public void testAsyncAPIPollTimeout() {
+ testAsyncAPIPollTimeoutHelper(null, false);
+ testAsyncAPIPollTimeoutHelper(0L, true);
+ testAsyncAPIPollTimeoutHelper(1L, true);
+ }
+
+ private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout,
+ boolean expectedTimeoutEnforcement) {
+ YarnClientImpl client = new YarnClientImpl();
+ try {
+ Configuration conf = getConf();
+ if (valueForTimeout != null) {
+ conf.setLong(
+ YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS,
+ valueForTimeout);
+ }
+
+ client.init(conf);
+
+ Assert.assertEquals(
+ expectedTimeoutEnforcement, client.enforceAsyncAPITimeout());
+ } finally {
+ IOUtils.closeQuietly(client);
+ }
+ }
+
+ @Test
+ public void testBestEffortTimelineDelegationToken()
+ throws Exception {
+ Configuration conf = getConf();
+ conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+ SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
+
+ YarnClientImpl client = spy(new YarnClientImpl() {
+
+ @Override
+ TimelineClient createTimelineClient() throws IOException, YarnException {
+ timelineClient = mock(TimelineClient.class);
+ when(timelineClient.getDelegationToken(any(String.class)))
+ .thenThrow(new RuntimeException("Best effort test exception"));
+ return timelineClient;
+ }
+ });
+
+ client.init(conf);
+ conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT,
+ true);
+ client.serviceInit(conf);
+ client.getTimelineDelegationToken();
+
+ try {
+ conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false);
+ client.serviceInit(conf);
+ client.getTimelineDelegationToken();
+ Assert.fail("Get delegation token should have thrown an exception");
+ } catch (IOException e) {
+ // Success
+ }
+ }
+
+ @Test
+ public void testAutomaticTimelineDelegationTokenLoading()
+ throws Exception {
+ Configuration conf = getConf();
+ conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+ SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
+ TimelineDelegationTokenIdentifier timelineDT =
+ new TimelineDelegationTokenIdentifier();
+ final Token dToken =
+ new Token<>(
+ timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text());
+ // create a mock client
+ YarnClientImpl client = spy(new YarnClientImpl() {
+
+ @Override
+ TimelineClient createTimelineClient() throws IOException, YarnException {
+ timelineClient = mock(TimelineClient.class);
+ when(timelineClient.getDelegationToken(any(String.class)))
+ .thenReturn(dToken);
+ return timelineClient;
+ }
+
+
+ @Override
+ protected void serviceStart() {
+ rmClient = mock(ApplicationClientProtocol.class);
+ }
+
+ @Override
+ protected void serviceStop() {
+ }
+
+ @Override
+ public ApplicationReport getApplicationReport(ApplicationId appId) {
+ ApplicationReport report = mock(ApplicationReport.class);
+ when(report.getYarnApplicationState())
+ .thenReturn(YarnApplicationState.RUNNING);
+ return report;
+ }
+
+ @Override
+ public boolean isSecurityEnabled() {
+ return true;
+ }
+ });
+ client.init(conf);
+ client.start();
+ try {
+ // when i == 0, timeline DT already exists, no need to get one more
+ // when i == 1, timeline DT doesn't exist, need to get one more
+ for (int i = 0; i < 2; ++i) {
+ ApplicationSubmissionContext context =
+ mock(ApplicationSubmissionContext.class);
+ ApplicationId applicationId = ApplicationId.newInstance(0, i + 1);
+ when(context.getApplicationId()).thenReturn(applicationId);
+ DataOutputBuffer dob = new DataOutputBuffer();
+ Credentials credentials = new Credentials();
+ if (i == 0) {
+ credentials.addToken(client.timelineService, dToken);
+ }
+ credentials.writeTokenStorageToStream(dob);
+ ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+ ContainerLaunchContext clc = ContainerLaunchContext.newInstance(
+ null, null, null, null, tokens, null);
+ when(context.getAMContainerSpec()).thenReturn(clc);
+ client.submitApplication(context);
+ if (i == 0) {
+ // GetTimelineDelegationToken shouldn't be called
+ verify(client, never()).getTimelineDelegationToken();
+ }
+ // In either way, token should be there
+ credentials = new Credentials();
+ DataInputByteBuffer dibb = new DataInputByteBuffer();
+ tokens = clc.getTokens();
+ if (tokens != null) {
+ dibb.reset(tokens);
+ credentials.readTokenStorageStream(dibb);
+ tokens.rewind();
+ }
+ Collection> dTokens =
+ credentials.getAllTokens();
+ Assert.assertEquals(1, dTokens.size());
+ Assert.assertEquals(dToken, dTokens.iterator().next());
+ }
+ } finally {
+ client.stop();
+ }
+ }
+
+ @Test
+ public void testParseTimelineDelegationTokenRenewer() {
+ // Client side
+ YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient();
+ Configuration conf = getConf();
+ conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+ conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/_HOST@EXAMPLE.COM");
+ conf.set(
+ YarnConfiguration.RM_ADDRESS, "localhost:8188");
+ try {
+ client.init(conf);
+ client.start();
+ Assert.assertEquals("rm/localhost@EXAMPLE.COM", client.timelineDTRenewer);
+ } finally {
+ client.stop();
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index af1440a56e..eddcbaae67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -242,7 +242,7 @@
src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psdsrc/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.jssrc/main/resources/webapps/static/jt/jquery.jstree.js
- src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js
+ src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.jssrc/main/resources/webapps/static/jquery/jquery-3.3.1.min.jssrc/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.csssrc/test/resources/application_1440536969523_0001.har/_index
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 0d045f36a9..0e9f0a77be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -83,6 +83,7 @@ static class ServletStruct {
public String name;
public String spec;
public Map params;
+ public boolean loadExistingFilters = true;
}
final String name;
@@ -151,12 +152,13 @@ public Builder withServlet(String name, String pathSpec,
public Builder withServlet(String name, String pathSpec,
Class extends HttpServlet> servlet,
- Map params) {
+ Map params,boolean loadExistingFilters) {
ServletStruct struct = new ServletStruct();
struct.clazz = servlet;
struct.name = name;
struct.spec = pathSpec;
struct.params = params;
+ struct.loadExistingFilters = loadExistingFilters;
servlets.add(struct);
return this;
}
@@ -256,9 +258,15 @@ public void setup() {
pathList.add("/" + wsName + "/*");
}
}
+
for (ServletStruct s : servlets) {
if (!pathList.contains(s.spec)) {
- pathList.add(s.spec);
+ // The servlet told us to not load-existing filters, but we still want
+ // to add the default authentication filter always, so add it to the
+ // pathList
+ if (!s.loadExistingFilters) {
+ pathList.add(s.spec);
+ }
}
}
if (conf == null) {
@@ -333,7 +341,7 @@ public void setup() {
HttpServer2 server = builder.build();
for(ServletStruct struct: servlets) {
- if (struct.params != null) {
+ if (!struct.loadExistingFilters) {
server.addInternalServlet(struct.name, struct.spec,
struct.clazz, struct.params);
} else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index d4fba1f241..91e5f89df7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -68,7 +68,7 @@ protected void render(Block html) {
html.link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css"))
.link(root_url("static/dt-1.9.4/css/jui-dt.css"))
.script(root_url("static/jquery/jquery-3.3.1.min.js"))
- .script(root_url("static/jquery/jquery-ui-1.9.1.custom.min.js"))
+ .script(root_url("static/jquery/jquery-ui-1.12.1.custom.min.js"))
.script(root_url("static/dt-1.9.4/js/jquery.dataTables.min.js"))
.script(root_url("static/yarn.dt.plugins.js"))
.script(root_url("static/dt-sorting/natural.js"))
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js
new file mode 100644
index 0000000000..25398a1674
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js
@@ -0,0 +1,13 @@
+/*! jQuery UI - v1.12.1 - 2016-09-14
+* http://jqueryui.com
+* Includes: widget.js, position.js, data.js, disable-selection.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js, focusable.js, form-reset-mixin.js, jquery-1-7.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/draggable.js, widgets/droppable.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/resizable.js, widgets/selectable.js, widgets/selectmenu.js, widgets/slider.js, widgets/sortable.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js
+* Copyright jQuery Foundation and other contributors; Licensed MIT */
+
+(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)})(function(t){function e(t){for(var e=t.css("visibility");"inherit"===e;)t=t.parent(),e=t.css("visibility");return"hidden"!==e}function i(t){for(var e,i;t.length&&t[0]!==document;){if(e=t.css("position"),("absolute"===e||"relative"===e||"fixed"===e)&&(i=parseInt(t.css("zIndex"),10),!isNaN(i)&&0!==i))return i;t=t.parent()}return 0}function s(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},t.extend(this._defaults,this.regional[""]),this.regional.en=t.extend(!0,{},this.regional[""]),this.regional["en-US"]=t.extend(!0,{},this.regional.en),this.dpDiv=n(t(""))}function n(e){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return e.on("mouseout",i,function(){t(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).removeClass("ui-datepicker-next-hover")}).on("mouseover",i,o)}function o(){t.datepicker._isDisabledDatepicker(m.inline?m.dpDiv.parent()[0]:m.input[0])||(t(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),t(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).addClass("ui-datepicker-next-hover"))}function a(e,i){t.extend(e,i);for(var s in i)null==i[s]&&(e[s]=i[s]);return e}function r(t){return function(){var e=this.element.val();t.apply(this,arguments),this._refresh(),e!==this.element.val()&&this._trigger("change")}}t.ui=t.ui||{},t.ui.version="1.12.1";var h=0,l=Array.prototype.slice;t.cleanData=function(e){return function(i){var s,n,o;for(o=0;null!=(n=i[o]);o++)try{s=t._data(n,"events"),s&&s.remove&&t(n).triggerHandler("remove")}catch(a){}e(i)}}(t.cleanData),t.widget=function(e,i,s){var n,o,a,r={},h=e.split(".")[0];e=e.split(".")[1];var l=h+"-"+e;return s||(s=i,i=t.Widget),t.isArray(s)&&(s=t.extend.apply(null,[{}].concat(s))),t.expr[":"][l.toLowerCase()]=function(e){return!!t.data(e,l)},t[h]=t[h]||{},n=t[h][e],o=t[h][e]=function(t,e){return this._createWidget?(arguments.length&&this._createWidget(t,e),void 0):new o(t,e)},t.extend(o,n,{version:s.version,_proto:t.extend({},s),_childConstructors:[]}),a=new i,a.options=t.widget.extend({},a.options),t.each(s,function(e,s){return t.isFunction(s)?(r[e]=function(){function t(){return i.prototype[e].apply(this,arguments)}function n(t){return i.prototype[e].apply(this,t)}return function(){var e,i=this._super,o=this._superApply;return this._super=t,this._superApply=n,e=s.apply(this,arguments),this._super=i,this._superApply=o,e}}(),void 0):(r[e]=s,void 0)}),o.prototype=t.widget.extend(a,{widgetEventPrefix:n?a.widgetEventPrefix||e:e},r,{constructor:o,namespace:h,widgetName:e,widgetFullName:l}),n?(t.each(n._childConstructors,function(e,i){var s=i.prototype;t.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete n._childConstructors):i._childConstructors.push(o),t.widget.bridge(e,o),o},t.widget.extend=function(e){for(var i,s,n=l.call(arguments,1),o=0,a=n.length;a>o;o++)for(i in n[o])s=n[o][i],n[o].hasOwnProperty(i)&&void 0!==s&&(e[i]=t.isPlainObject(s)?t.isPlainObject(e[i])?t.widget.extend({},e[i],s):t.widget.extend({},s):s);return e},t.widget.bridge=function(e,i){var s=i.prototype.widgetFullName||e;t.fn[e]=function(n){var o="string"==typeof n,a=l.call(arguments,1),r=this;return o?this.length||"instance"!==n?this.each(function(){var i,o=t.data(this,s);return"instance"===n?(r=o,!1):o?t.isFunction(o[n])&&"_"!==n.charAt(0)?(i=o[n].apply(o,a),i!==o&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+n+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+n+"'")}):r=void 0:(a.length&&(n=t.widget.extend.apply(null,[n].concat(a))),this.each(function(){var e=t.data(this,s);e?(e.option(n||{}),e._init&&e._init()):t.data(this,s,new i(n,this))})),r}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
").text(i.label)).appendTo(e)},_move:function(t,e){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[t](e),void 0):(this.search(null,e),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),t.extend(t.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(e,i){var s=RegExp(t.ui.autocomplete.escapeRegex(i),"i");return t.grep(e,function(t){return s.test(t.label||t.value||t)})}}),t.widget("ui.autocomplete",t.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(t>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(e){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=e&&e.length?this.options.messages.results(e.length):this.options.messages.noResults,this.liveRegion.children().hide(),t("