Merge remote-tracking branch 'apache-commit/trunk' into HDDS-48
This commit is contained in:
commit
c104525168
@ -196,6 +196,14 @@ by Google Inc, which can be obtained at:
|
|||||||
* HOMEPAGE:
|
* HOMEPAGE:
|
||||||
* http://code.google.com/p/snappy/
|
* http://code.google.com/p/snappy/
|
||||||
|
|
||||||
|
This product contains a modified portion of UnsignedBytes LexicographicalComparator
|
||||||
|
from Guava v21 project by Google Inc, which can be obtained at:
|
||||||
|
|
||||||
|
* LICENSE:
|
||||||
|
* license/COPYING (Apache License 2.0)
|
||||||
|
* HOMEPAGE:
|
||||||
|
* https://github.com/google/guava
|
||||||
|
|
||||||
This product optionally depends on 'JBoss Marshalling', an alternative Java
|
This product optionally depends on 'JBoss Marshalling', an alternative Java
|
||||||
serialization API, which can be obtained at:
|
serialization API, which can be obtained at:
|
||||||
|
|
||||||
|
@ -145,6 +145,8 @@ run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
|
|||||||
run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
|
run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
|
||||||
run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
|
run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
|
||||||
run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
|
run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
|
||||||
|
mkdir -p "./share/hadoop/ozonefs"
|
||||||
|
cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
|
||||||
# Optional documentation, could be missing
|
# Optional documentation, could be missing
|
||||||
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/
|
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/
|
||||||
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
|
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
|
||||||
@ -153,5 +155,5 @@ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdd
|
|||||||
mkdir -p ./share/hadoop/mapreduce
|
mkdir -p ./share/hadoop/mapreduce
|
||||||
mkdir -p ./share/hadoop/yarn
|
mkdir -p ./share/hadoop/yarn
|
||||||
echo
|
echo
|
||||||
echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}"
|
echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
|
||||||
echo
|
echo
|
||||||
|
@ -166,10 +166,6 @@
|
|||||||
<groupId>commons-io</groupId>
|
<groupId>commons-io</groupId>
|
||||||
<artifactId>commons-io</artifactId>
|
<artifactId>commons-io</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
<exclusion>
|
|
||||||
<groupId>commons-lang</groupId>
|
|
||||||
<artifactId>commons-lang</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>commons-logging</groupId>
|
<groupId>commons-logging</groupId>
|
||||||
<artifactId>commons-logging</artifactId>
|
<artifactId>commons-logging</artifactId>
|
||||||
@ -495,10 +491,6 @@
|
|||||||
<groupId>commons-codec</groupId>
|
<groupId>commons-codec</groupId>
|
||||||
<artifactId>commons-codec</artifactId>
|
<artifactId>commons-codec</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
<exclusion>
|
|
||||||
<groupId>commons-lang</groupId>
|
|
||||||
<artifactId>commons-lang</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>commons-logging</groupId>
|
<groupId>commons-logging</groupId>
|
||||||
<artifactId>commons-logging</artifactId>
|
<artifactId>commons-logging</artifactId>
|
||||||
|
@ -156,11 +156,6 @@
|
|||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>commons-lang</groupId>
|
|
||||||
<artifactId>commons-lang</artifactId>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>commons-beanutils</groupId>
|
<groupId>commons-beanutils</groupId>
|
||||||
<artifactId>commons-beanutils</artifactId>
|
<artifactId>commons-beanutils</artifactId>
|
||||||
|
@ -3189,25 +3189,25 @@ public void addTags(Properties prop) {
|
|||||||
if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) {
|
if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) {
|
||||||
String systemTags = prop.getProperty(CommonConfigurationKeys
|
String systemTags = prop.getProperty(CommonConfigurationKeys
|
||||||
.HADOOP_TAGS_SYSTEM);
|
.HADOOP_TAGS_SYSTEM);
|
||||||
Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
|
TAGS.addAll(Arrays.asList(systemTags.split(",")));
|
||||||
}
|
}
|
||||||
// Get all custom tags
|
// Get all custom tags
|
||||||
if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) {
|
if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) {
|
||||||
String customTags = prop.getProperty(CommonConfigurationKeys
|
String customTags = prop.getProperty(CommonConfigurationKeys
|
||||||
.HADOOP_TAGS_CUSTOM);
|
.HADOOP_TAGS_CUSTOM);
|
||||||
Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
|
TAGS.addAll(Arrays.asList(customTags.split(",")));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) {
|
if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) {
|
||||||
String systemTags = prop.getProperty(CommonConfigurationKeys
|
String systemTags = prop.getProperty(CommonConfigurationKeys
|
||||||
.HADOOP_SYSTEM_TAGS);
|
.HADOOP_SYSTEM_TAGS);
|
||||||
Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
|
TAGS.addAll(Arrays.asList(systemTags.split(",")));
|
||||||
}
|
}
|
||||||
// Get all custom tags
|
// Get all custom tags
|
||||||
if (prop.containsKey(CommonConfigurationKeys.HADOOP_CUSTOM_TAGS)) {
|
if (prop.containsKey(CommonConfigurationKeys.HADOOP_CUSTOM_TAGS)) {
|
||||||
String customTags = prop.getProperty(CommonConfigurationKeys
|
String customTags = prop.getProperty(CommonConfigurationKeys
|
||||||
.HADOOP_CUSTOM_TAGS);
|
.HADOOP_CUSTOM_TAGS);
|
||||||
Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
|
TAGS.addAll(Arrays.asList(customTags.split(",")));
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.conf;
|
package org.apache.hadoop.conf;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringEscapeUtils;
|
import org.apache.commons.lang3.StringEscapeUtils;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Enumeration;
|
import java.util.Enumeration;
|
||||||
@ -72,10 +72,10 @@ private Reconfigurable getReconfigurable(HttpServletRequest req) {
|
|||||||
private void printHeader(PrintWriter out, String nodeName) {
|
private void printHeader(PrintWriter out, String nodeName) {
|
||||||
out.print("<html><head>");
|
out.print("<html><head>");
|
||||||
out.printf("<title>%s Reconfiguration Utility</title>%n",
|
out.printf("<title>%s Reconfiguration Utility</title>%n",
|
||||||
StringEscapeUtils.escapeHtml(nodeName));
|
StringEscapeUtils.escapeHtml4(nodeName));
|
||||||
out.print("</head><body>\n");
|
out.print("</head><body>\n");
|
||||||
out.printf("<h1>%s Reconfiguration Utility</h1>%n",
|
out.printf("<h1>%s Reconfiguration Utility</h1>%n",
|
||||||
StringEscapeUtils.escapeHtml(nodeName));
|
StringEscapeUtils.escapeHtml4(nodeName));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void printFooter(PrintWriter out) {
|
private void printFooter(PrintWriter out) {
|
||||||
@ -103,20 +103,20 @@ private void printConf(PrintWriter out, Reconfigurable reconf) {
|
|||||||
out.print("<tr><td>");
|
out.print("<tr><td>");
|
||||||
if (!reconf.isPropertyReconfigurable(c.prop)) {
|
if (!reconf.isPropertyReconfigurable(c.prop)) {
|
||||||
out.print("<font color=\"red\">" +
|
out.print("<font color=\"red\">" +
|
||||||
StringEscapeUtils.escapeHtml(c.prop) + "</font>");
|
StringEscapeUtils.escapeHtml4(c.prop) + "</font>");
|
||||||
changeOK = false;
|
changeOK = false;
|
||||||
} else {
|
} else {
|
||||||
out.print(StringEscapeUtils.escapeHtml(c.prop));
|
out.print(StringEscapeUtils.escapeHtml4(c.prop));
|
||||||
out.print("<input type=\"hidden\" name=\"" +
|
out.print("<input type=\"hidden\" name=\"" +
|
||||||
StringEscapeUtils.escapeHtml(c.prop) + "\" value=\"" +
|
StringEscapeUtils.escapeHtml4(c.prop) + "\" value=\"" +
|
||||||
StringEscapeUtils.escapeHtml(c.newVal) + "\"/>");
|
StringEscapeUtils.escapeHtml4(c.newVal) + "\"/>");
|
||||||
}
|
}
|
||||||
out.print("</td><td>" +
|
out.print("</td><td>" +
|
||||||
(c.oldVal == null ? "<it>default</it>" :
|
(c.oldVal == null ? "<it>default</it>" :
|
||||||
StringEscapeUtils.escapeHtml(c.oldVal)) +
|
StringEscapeUtils.escapeHtml4(c.oldVal)) +
|
||||||
"</td><td>" +
|
"</td><td>" +
|
||||||
(c.newVal == null ? "<it>default</it>" :
|
(c.newVal == null ? "<it>default</it>" :
|
||||||
StringEscapeUtils.escapeHtml(c.newVal)) +
|
StringEscapeUtils.escapeHtml4(c.newVal)) +
|
||||||
"</td>");
|
"</td>");
|
||||||
out.print("</tr>\n");
|
out.print("</tr>\n");
|
||||||
}
|
}
|
||||||
@ -147,9 +147,9 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
|
|||||||
synchronized(oldConf) {
|
synchronized(oldConf) {
|
||||||
while (params.hasMoreElements()) {
|
while (params.hasMoreElements()) {
|
||||||
String rawParam = params.nextElement();
|
String rawParam = params.nextElement();
|
||||||
String param = StringEscapeUtils.unescapeHtml(rawParam);
|
String param = StringEscapeUtils.unescapeHtml4(rawParam);
|
||||||
String value =
|
String value =
|
||||||
StringEscapeUtils.unescapeHtml(req.getParameter(rawParam));
|
StringEscapeUtils.unescapeHtml4(req.getParameter(rawParam));
|
||||||
if (value != null) {
|
if (value != null) {
|
||||||
if (value.equals(newConf.getRaw(param)) || value.equals("default") ||
|
if (value.equals(newConf.getRaw(param)) || value.equals("default") ||
|
||||||
value.equals("null") || value.isEmpty()) {
|
value.equals("null") || value.isEmpty()) {
|
||||||
@ -157,8 +157,8 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
|
|||||||
value.isEmpty()) &&
|
value.isEmpty()) &&
|
||||||
oldConf.getRaw(param) != null) {
|
oldConf.getRaw(param) != null) {
|
||||||
out.println("<p>Changed \"" +
|
out.println("<p>Changed \"" +
|
||||||
StringEscapeUtils.escapeHtml(param) + "\" from \"" +
|
StringEscapeUtils.escapeHtml4(param) + "\" from \"" +
|
||||||
StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) +
|
StringEscapeUtils.escapeHtml4(oldConf.getRaw(param)) +
|
||||||
"\" to default</p>");
|
"\" to default</p>");
|
||||||
reconf.reconfigureProperty(param, null);
|
reconf.reconfigureProperty(param, null);
|
||||||
} else if (!value.equals("default") && !value.equals("null") &&
|
} else if (!value.equals("default") && !value.equals("null") &&
|
||||||
@ -168,16 +168,16 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
|
|||||||
// change from default or value to different value
|
// change from default or value to different value
|
||||||
if (oldConf.getRaw(param) == null) {
|
if (oldConf.getRaw(param) == null) {
|
||||||
out.println("<p>Changed \"" +
|
out.println("<p>Changed \"" +
|
||||||
StringEscapeUtils.escapeHtml(param) +
|
StringEscapeUtils.escapeHtml4(param) +
|
||||||
"\" from default to \"" +
|
"\" from default to \"" +
|
||||||
StringEscapeUtils.escapeHtml(value) + "\"</p>");
|
StringEscapeUtils.escapeHtml4(value) + "\"</p>");
|
||||||
} else {
|
} else {
|
||||||
out.println("<p>Changed \"" +
|
out.println("<p>Changed \"" +
|
||||||
StringEscapeUtils.escapeHtml(param) + "\" from \"" +
|
StringEscapeUtils.escapeHtml4(param) + "\" from \"" +
|
||||||
StringEscapeUtils.escapeHtml(oldConf.
|
StringEscapeUtils.escapeHtml4(oldConf.
|
||||||
getRaw(param)) +
|
getRaw(param)) +
|
||||||
"\" to \"" +
|
"\" to \"" +
|
||||||
StringEscapeUtils.escapeHtml(value) + "\"</p>");
|
StringEscapeUtils.escapeHtml4(value) + "\"</p>");
|
||||||
}
|
}
|
||||||
reconf.reconfigureProperty(param, value);
|
reconf.reconfigureProperty(param, value);
|
||||||
} else {
|
} else {
|
||||||
@ -185,10 +185,10 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// parameter value != newConf value
|
// parameter value != newConf value
|
||||||
out.println("<p>\"" + StringEscapeUtils.escapeHtml(param) +
|
out.println("<p>\"" + StringEscapeUtils.escapeHtml4(param) +
|
||||||
"\" not changed because value has changed from \"" +
|
"\" not changed because value has changed from \"" +
|
||||||
StringEscapeUtils.escapeHtml(value) + "\" to \"" +
|
StringEscapeUtils.escapeHtml4(value) + "\" to \"" +
|
||||||
StringEscapeUtils.escapeHtml(newConf.getRaw(param)) +
|
StringEscapeUtils.escapeHtml4(newConf.getRaw(param)) +
|
||||||
"\" since approval</p>");
|
"\" since approval</p>");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -33,8 +33,8 @@
|
|||||||
|
|
||||||
import com.google.gson.stream.JsonReader;
|
import com.google.gson.stream.JsonReader;
|
||||||
import com.google.gson.stream.JsonWriter;
|
import com.google.gson.stream.JsonWriter;
|
||||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
|
import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
|
||||||
|
@ -32,7 +32,9 @@
|
|||||||
import org.apache.hadoop.security.ProviderUtils;
|
import org.apache.hadoop.security.ProviderUtils;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||||
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
|
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
|
||||||
|
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
|
||||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
@ -40,6 +42,7 @@
|
|||||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
|
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
|
||||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||||
|
import org.apache.hadoop.util.JsonSerialization;
|
||||||
import org.apache.hadoop.util.KMSUtil;
|
import org.apache.hadoop.util.KMSUtil;
|
||||||
import org.apache.http.client.utils.URIBuilder;
|
import org.apache.http.client.utils.URIBuilder;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
@ -77,7 +80,6 @@
|
|||||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
|
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.fasterxml.jackson.databind.ObjectWriter;
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.base.Strings;
|
import com.google.common.base.Strings;
|
||||||
@ -130,9 +132,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||||||
|
|
||||||
private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
|
private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
|
||||||
|
|
||||||
private static final ObjectWriter WRITER =
|
|
||||||
new ObjectMapper().writerWithDefaultPrettyPrinter();
|
|
||||||
|
|
||||||
private final Text dtService;
|
private final Text dtService;
|
||||||
|
|
||||||
// Allow fallback to default kms server port 9600 for certain tests that do
|
// Allow fallback to default kms server port 9600 for certain tests that do
|
||||||
@ -235,7 +234,7 @@ public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
|
|||||||
private static void writeJson(Object obj, OutputStream os)
|
private static void writeJson(Object obj, OutputStream os)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
|
Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
|
||||||
WRITER.writeValue(writer, obj);
|
JsonSerialization.writer().writeValue(writer, obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -543,7 +542,9 @@ private <T> T call(HttpURLConnection conn, Object jsonOutput,
|
|||||||
String requestMethod = conn.getRequestMethod();
|
String requestMethod = conn.getRequestMethod();
|
||||||
URL url = conn.getURL();
|
URL url = conn.getURL();
|
||||||
conn = createConnection(url, requestMethod);
|
conn = createConnection(url, requestMethod);
|
||||||
conn.setRequestProperty(CONTENT_TYPE, contentType);
|
if (contentType != null && !contentType.isEmpty()) {
|
||||||
|
conn.setRequestProperty(CONTENT_TYPE, contentType);
|
||||||
|
}
|
||||||
return call(conn, jsonOutput, expectedResponse, klass,
|
return call(conn, jsonOutput, expectedResponse, klass,
|
||||||
authRetryCount - 1);
|
authRetryCount - 1);
|
||||||
}
|
}
|
||||||
@ -1087,8 +1088,7 @@ private UserGroupInformation getActualUgi() throws IOException {
|
|||||||
actualUgi = currentUgi.getRealUser();
|
actualUgi = currentUgi.getRealUser();
|
||||||
}
|
}
|
||||||
if (UserGroupInformation.isSecurityEnabled() &&
|
if (UserGroupInformation.isSecurityEnabled() &&
|
||||||
!containsKmsDt(actualUgi) &&
|
!containsKmsDt(actualUgi) && !actualUgi.shouldRelogin()) {
|
||||||
!actualUgi.hasKerberosCredentials()) {
|
|
||||||
// Use login user is only necessary when Kerberos is enabled
|
// Use login user is only necessary when Kerberos is enabled
|
||||||
// but the actual user does not have either
|
// but the actual user does not have either
|
||||||
// Kerberos credential or KMS delegation token for KMS operations
|
// Kerberos credential or KMS delegation token for KMS operations
|
||||||
|
@ -0,0 +1,58 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte array backed part handle.
|
||||||
|
*/
|
||||||
|
public final class BBPartHandle implements PartHandle {
|
||||||
|
|
||||||
|
private static final long serialVersionUID = 0x23ce3eb1;
|
||||||
|
|
||||||
|
private final byte[] bytes;
|
||||||
|
|
||||||
|
private BBPartHandle(ByteBuffer byteBuffer){
|
||||||
|
this.bytes = byteBuffer.array();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static PartHandle from(ByteBuffer byteBuffer) {
|
||||||
|
return new BBPartHandle(byteBuffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteBuffer bytes() {
|
||||||
|
return ByteBuffer.wrap(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Arrays.hashCode(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object other) {
|
||||||
|
if (!(other instanceof PartHandle)) {
|
||||||
|
return false;
|
||||||
|
|
||||||
|
}
|
||||||
|
PartHandle o = (PartHandle) other;
|
||||||
|
return bytes().equals(o.bytes());
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,57 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Byte array backed upload handle.
|
||||||
|
*/
|
||||||
|
public final class BBUploadHandle implements UploadHandle {
|
||||||
|
|
||||||
|
private static final long serialVersionUID = 0x69d5509b;
|
||||||
|
|
||||||
|
private final byte[] bytes;
|
||||||
|
|
||||||
|
private BBUploadHandle(ByteBuffer byteBuffer){
|
||||||
|
this.bytes = byteBuffer.array();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static UploadHandle from(ByteBuffer byteBuffer) {
|
||||||
|
return new BBUploadHandle(byteBuffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Arrays.hashCode(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteBuffer bytes() {
|
||||||
|
return ByteBuffer.wrap(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object other) {
|
||||||
|
if (!(other instanceof UploadHandle)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
UploadHandle o = (UploadHandle) other;
|
||||||
|
return bytes().equals(o.bytes());
|
||||||
|
}
|
||||||
|
}
|
@ -542,7 +542,7 @@ public class CommonConfigurationKeysPublic {
|
|||||||
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
|
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
|
||||||
* core-default.xml</a>
|
* core-default.xml</a>
|
||||||
*/
|
*/
|
||||||
public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
|
public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY =
|
||||||
"hadoop.security.groups.shell.command.timeout";
|
"hadoop.security.groups.shell.command.timeout";
|
||||||
/**
|
/**
|
||||||
* @see
|
* @see
|
||||||
@ -550,7 +550,7 @@ public class CommonConfigurationKeysPublic {
|
|||||||
* core-default.xml</a>
|
* core-default.xml</a>
|
||||||
*/
|
*/
|
||||||
public static final long
|
public static final long
|
||||||
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
|
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT =
|
||||||
0L;
|
0L;
|
||||||
/**
|
/**
|
||||||
* @see
|
* @see
|
||||||
|
@ -115,6 +115,27 @@ public abstract class FSDataOutputStreamBuilder
|
|||||||
*/
|
*/
|
||||||
protected abstract B getThisBuilder();
|
protected abstract B getThisBuilder();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct from a {@link FileContext}.
|
||||||
|
*
|
||||||
|
* @param fc FileContext
|
||||||
|
* @param p path.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
FSDataOutputStreamBuilder(@Nonnull FileContext fc,
|
||||||
|
@Nonnull Path p) throws IOException {
|
||||||
|
Preconditions.checkNotNull(fc);
|
||||||
|
Preconditions.checkNotNull(p);
|
||||||
|
this.fs = null;
|
||||||
|
this.path = p;
|
||||||
|
|
||||||
|
AbstractFileSystem afs = fc.getFSofPath(p);
|
||||||
|
FsServerDefaults defaults = afs.getServerDefaults(p);
|
||||||
|
bufferSize = defaults.getFileBufferSize();
|
||||||
|
replication = defaults.getReplication();
|
||||||
|
blockSize = defaults.getBlockSize();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor.
|
* Constructor.
|
||||||
*/
|
*/
|
||||||
@ -131,6 +152,7 @@ protected FSDataOutputStreamBuilder(@Nonnull FileSystem fileSystem,
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected FileSystem getFS() {
|
protected FileSystem getFS() {
|
||||||
|
Preconditions.checkNotNull(fs);
|
||||||
return fs;
|
return fs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
@ -35,6 +36,8 @@
|
|||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
|
|
||||||
|
import javax.annotation.Nonnull;
|
||||||
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
@ -694,6 +697,69 @@ public FSDataOutputStream next(final AbstractFileSystem fs, final Path p)
|
|||||||
}.resolve(this, absF);
|
}.resolve(this, absF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link FSDataOutputStreamBuilder} for {@liink FileContext}.
|
||||||
|
*/
|
||||||
|
private static final class FCDataOutputStreamBuilder extends
|
||||||
|
FSDataOutputStreamBuilder<
|
||||||
|
FSDataOutputStream, FCDataOutputStreamBuilder> {
|
||||||
|
private final FileContext fc;
|
||||||
|
|
||||||
|
private FCDataOutputStreamBuilder(
|
||||||
|
@Nonnull FileContext fc, @Nonnull Path p) throws IOException {
|
||||||
|
super(fc, p);
|
||||||
|
this.fc = fc;
|
||||||
|
Preconditions.checkNotNull(fc);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected FCDataOutputStreamBuilder getThisBuilder() {
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FSDataOutputStream build() throws IOException {
|
||||||
|
final EnumSet<CreateFlag> flags = getFlags();
|
||||||
|
List<CreateOpts> createOpts = new ArrayList<>(Arrays.asList(
|
||||||
|
CreateOpts.blockSize(getBlockSize()),
|
||||||
|
CreateOpts.bufferSize(getBufferSize()),
|
||||||
|
CreateOpts.repFac(getReplication()),
|
||||||
|
CreateOpts.perms(getPermission())
|
||||||
|
));
|
||||||
|
if (getChecksumOpt() != null) {
|
||||||
|
createOpts.add(CreateOpts.checksumParam(getChecksumOpt()));
|
||||||
|
}
|
||||||
|
if (getProgress() != null) {
|
||||||
|
createOpts.add(CreateOpts.progress(getProgress()));
|
||||||
|
}
|
||||||
|
if (isRecursive()) {
|
||||||
|
createOpts.add(CreateOpts.createParent());
|
||||||
|
}
|
||||||
|
return fc.create(getPath(), flags,
|
||||||
|
createOpts.toArray(new CreateOpts[0]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a {@link FSDataOutputStreamBuilder} for creating or overwriting
|
||||||
|
* a file on indicated path.
|
||||||
|
*
|
||||||
|
* @param f the file path to create builder for.
|
||||||
|
* @return {@link FSDataOutputStreamBuilder} to build a
|
||||||
|
* {@link FSDataOutputStream}.
|
||||||
|
*
|
||||||
|
* Upon {@link FSDataOutputStreamBuilder#build()} being invoked,
|
||||||
|
* builder parameters will be verified by {@link FileContext} and
|
||||||
|
* {@link AbstractFileSystem#create}. And filesystem states will be modified.
|
||||||
|
*
|
||||||
|
* Client should expect {@link FSDataOutputStreamBuilder#build()} throw the
|
||||||
|
* same exceptions as create(Path, EnumSet, CreateOpts...).
|
||||||
|
*/
|
||||||
|
public FSDataOutputStreamBuilder<FSDataOutputStream, ?> create(final Path f)
|
||||||
|
throws IOException {
|
||||||
|
return new FCDataOutputStreamBuilder(this, f).create();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make(create) a directory and all the non-existent parents.
|
* Make(create) a directory and all the non-existent parents.
|
||||||
*
|
*
|
||||||
|
@ -0,0 +1,132 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import com.google.common.base.Charsets;
|
||||||
|
import org.apache.commons.compress.utils.IOUtils;
|
||||||
|
import org.apache.commons.lang3.tuple.Pair;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A MultipartUploader that uses the basic FileSystem commands.
|
||||||
|
* This is done in three stages:
|
||||||
|
* Init - create a temp _multipart directory.
|
||||||
|
* PutPart - copying the individual parts of the file to the temp directory.
|
||||||
|
* Complete - use {@link FileSystem#concat} to merge the files; and then delete
|
||||||
|
* the temp directory.
|
||||||
|
*/
|
||||||
|
public class FileSystemMultipartUploader extends MultipartUploader {
|
||||||
|
|
||||||
|
private final FileSystem fs;
|
||||||
|
|
||||||
|
public FileSystemMultipartUploader(FileSystem fs) {
|
||||||
|
this.fs = fs;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public UploadHandle initialize(Path filePath) throws IOException {
|
||||||
|
Path collectorPath = createCollectorPath(filePath);
|
||||||
|
fs.mkdirs(collectorPath, FsPermission.getDirDefault());
|
||||||
|
|
||||||
|
ByteBuffer byteBuffer = ByteBuffer.wrap(
|
||||||
|
collectorPath.toString().getBytes(Charsets.UTF_8));
|
||||||
|
return BBUploadHandle.from(byteBuffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PartHandle putPart(Path filePath, InputStream inputStream,
|
||||||
|
int partNumber, UploadHandle uploadId, long lengthInBytes)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
byte[] uploadIdByteArray = uploadId.toByteArray();
|
||||||
|
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
|
||||||
|
uploadIdByteArray.length, Charsets.UTF_8));
|
||||||
|
Path partPath =
|
||||||
|
Path.mergePaths(collectorPath, Path.mergePaths(new Path(Path.SEPARATOR),
|
||||||
|
new Path(Integer.toString(partNumber) + ".part")));
|
||||||
|
FSDataOutputStreamBuilder outputStream = fs.createFile(partPath);
|
||||||
|
FSDataOutputStream fsDataOutputStream = outputStream.build();
|
||||||
|
IOUtils.copy(inputStream, fsDataOutputStream, 4096);
|
||||||
|
fsDataOutputStream.close();
|
||||||
|
return BBPartHandle.from(ByteBuffer.wrap(
|
||||||
|
partPath.toString().getBytes(Charsets.UTF_8)));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Path createCollectorPath(Path filePath) {
|
||||||
|
return Path.mergePaths(filePath.getParent(),
|
||||||
|
Path.mergePaths(new Path(filePath.getName().split("\\.")[0]),
|
||||||
|
Path.mergePaths(new Path("_multipart"),
|
||||||
|
new Path(Path.SEPARATOR))));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
@SuppressWarnings("deprecation") // rename w/ OVERWRITE
|
||||||
|
public PathHandle complete(Path filePath,
|
||||||
|
List<Pair<Integer, PartHandle>> handles, UploadHandle multipartUploadId)
|
||||||
|
throws IOException {
|
||||||
|
handles.sort(Comparator.comparing(Pair::getKey));
|
||||||
|
List<Path> partHandles = handles
|
||||||
|
.stream()
|
||||||
|
.map(pair -> {
|
||||||
|
byte[] byteArray = pair.getValue().toByteArray();
|
||||||
|
return new Path(new String(byteArray, 0, byteArray.length,
|
||||||
|
Charsets.UTF_8));
|
||||||
|
})
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
|
||||||
|
Path collectorPath = createCollectorPath(filePath);
|
||||||
|
Path filePathInsideCollector = Path.mergePaths(collectorPath,
|
||||||
|
new Path(Path.SEPARATOR + filePath.getName()));
|
||||||
|
fs.create(filePathInsideCollector).close();
|
||||||
|
fs.concat(filePathInsideCollector,
|
||||||
|
partHandles.toArray(new Path[handles.size()]));
|
||||||
|
fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE);
|
||||||
|
fs.delete(collectorPath, true);
|
||||||
|
FileStatus status = fs.getFileStatus(filePath);
|
||||||
|
return fs.getPathHandle(status);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void abort(Path filePath, UploadHandle uploadId) throws IOException {
|
||||||
|
byte[] uploadIdByteArray = uploadId.toByteArray();
|
||||||
|
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
|
||||||
|
uploadIdByteArray.length, Charsets.UTF_8));
|
||||||
|
fs.delete(collectorPath, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory for creating MultipartUploaderFactory objects for file://
|
||||||
|
* filesystems.
|
||||||
|
*/
|
||||||
|
public static class Factory extends MultipartUploaderFactory {
|
||||||
|
protected MultipartUploader createMultipartUploader(FileSystem fs,
|
||||||
|
Configuration conf) {
|
||||||
|
if (fs.getScheme().equals("file")) {
|
||||||
|
return new FileSystemMultipartUploader(fs);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -23,7 +23,6 @@
|
|||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
|
||||||
import org.apache.commons.lang.WordUtils;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
@ -275,7 +274,7 @@ private void printInstanceHelp(PrintStream out, Command instance) {
|
|||||||
listing = null;
|
listing = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (String descLine : WordUtils.wrap(
|
for (String descLine : StringUtils.wrap(
|
||||||
line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
|
line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
|
||||||
out.println(prefix + descLine);
|
out.println(prefix + descLine);
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,100 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import com.google.protobuf.ByteString;
|
||||||
|
import org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque handle to an entity in a FileSystem.
|
||||||
|
*/
|
||||||
|
public class LocalFileSystemPathHandle implements PathHandle {
|
||||||
|
|
||||||
|
private final String path;
|
||||||
|
private final Long mtime;
|
||||||
|
|
||||||
|
public LocalFileSystemPathHandle(String path, Optional<Long> mtime) {
|
||||||
|
this.path = path;
|
||||||
|
this.mtime = mtime.orElse(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public LocalFileSystemPathHandle(ByteBuffer bytes) throws IOException {
|
||||||
|
if (null == bytes) {
|
||||||
|
throw new IOException("Missing PathHandle");
|
||||||
|
}
|
||||||
|
LocalFileSystemPathHandleProto p =
|
||||||
|
LocalFileSystemPathHandleProto.parseFrom(ByteString.copyFrom(bytes));
|
||||||
|
path = p.hasPath() ? p.getPath() : null;
|
||||||
|
mtime = p.hasMtime() ? p.getMtime() : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPath() {
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void verify(FileStatus stat) throws InvalidPathHandleException {
|
||||||
|
if (null == stat) {
|
||||||
|
throw new InvalidPathHandleException("Could not resolve handle");
|
||||||
|
}
|
||||||
|
if (mtime != null && mtime != stat.getModificationTime()) {
|
||||||
|
throw new InvalidPathHandleException("Content changed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteBuffer bytes() {
|
||||||
|
LocalFileSystemPathHandleProto.Builder b =
|
||||||
|
LocalFileSystemPathHandleProto.newBuilder();
|
||||||
|
b.setPath(path);
|
||||||
|
if (mtime != null) {
|
||||||
|
b.setMtime(mtime);
|
||||||
|
}
|
||||||
|
return b.build().toByteString().asReadOnlyByteBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
LocalFileSystemPathHandle that = (LocalFileSystemPathHandle) o;
|
||||||
|
return Objects.equals(path, that.path) &&
|
||||||
|
Objects.equals(mtime, that.mtime);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(path, mtime);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "LocalFileSystemPathHandle{" +
|
||||||
|
"path='" + path + '\'' +
|
||||||
|
", mtime=" + mtime +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,90 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.tuple.Pair;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* MultipartUploader is an interface for copying files multipart and across
|
||||||
|
* multiple nodes. Users should:
|
||||||
|
* 1. Initialize an upload
|
||||||
|
* 2. Upload parts in any order
|
||||||
|
* 3. Complete the upload in order to have it materialize in the destination FS.
|
||||||
|
*
|
||||||
|
* Implementers should make sure that the complete function should make sure
|
||||||
|
* that 'complete' will reorder parts if the destination FS doesn't already
|
||||||
|
* do it for them.
|
||||||
|
*/
|
||||||
|
public abstract class MultipartUploader {
|
||||||
|
public static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(MultipartUploader.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize a multipart upload.
|
||||||
|
* @param filePath Target path for upload.
|
||||||
|
* @return unique identifier associating part uploads.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public abstract UploadHandle initialize(Path filePath) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Put part as part of a multipart upload. It should be possible to have
|
||||||
|
* parts uploaded in any order (or in parallel).
|
||||||
|
* @param filePath Target path for upload (same as {@link #initialize(Path)}).
|
||||||
|
* @param inputStream Data for this part.
|
||||||
|
* @param partNumber Index of the part relative to others.
|
||||||
|
* @param uploadId Identifier from {@link #initialize(Path)}.
|
||||||
|
* @param lengthInBytes Target length to read from the stream.
|
||||||
|
* @return unique PartHandle identifier for the uploaded part.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public abstract PartHandle putPart(Path filePath, InputStream inputStream,
|
||||||
|
int partNumber, UploadHandle uploadId, long lengthInBytes)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete a multipart upload.
|
||||||
|
* @param filePath Target path for upload (same as {@link #initialize(Path)}.
|
||||||
|
* @param handles Identifiers with associated part numbers from
|
||||||
|
* {@link #putPart(Path, InputStream, int, UploadHandle, long)}.
|
||||||
|
* Depending on the backend, the list order may be significant.
|
||||||
|
* @param multipartUploadId Identifier from {@link #initialize(Path)}.
|
||||||
|
* @return unique PathHandle identifier for the uploaded file.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public abstract PathHandle complete(Path filePath,
|
||||||
|
List<Pair<Integer, PartHandle>> handles, UploadHandle multipartUploadId)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Aborts a multipart upload.
|
||||||
|
* @param filePath Target path for upload (same as {@link #initialize(Path)}.
|
||||||
|
* @param multipartuploadId Identifier from {@link #initialize(Path)}.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public abstract void abort(Path filePath, UploadHandle multipartuploadId)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.ServiceLoader;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link ServiceLoader}-driven uploader API for storage services supporting
|
||||||
|
* multipart uploads.
|
||||||
|
*/
|
||||||
|
public abstract class MultipartUploaderFactory {
|
||||||
|
public static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(MultipartUploaderFactory.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Multipart Uploaders listed as services.
|
||||||
|
*/
|
||||||
|
private static ServiceLoader<MultipartUploaderFactory> serviceLoader =
|
||||||
|
ServiceLoader.load(MultipartUploaderFactory.class,
|
||||||
|
MultipartUploaderFactory.class.getClassLoader());
|
||||||
|
|
||||||
|
// Iterate through the serviceLoader to avoid lazy loading.
|
||||||
|
// Lazy loading would require synchronization in concurrent use cases.
|
||||||
|
static {
|
||||||
|
Iterator<MultipartUploaderFactory> iterServices = serviceLoader.iterator();
|
||||||
|
while (iterServices.hasNext()) {
|
||||||
|
iterServices.next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static MultipartUploader get(FileSystem fs, Configuration conf)
|
||||||
|
throws IOException {
|
||||||
|
MultipartUploader mpu = null;
|
||||||
|
for (MultipartUploaderFactory factory : serviceLoader) {
|
||||||
|
mpu = factory.createMultipartUploader(fs, conf);
|
||||||
|
if (mpu != null) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mpu;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected abstract MultipartUploader createMultipartUploader(FileSystem fs,
|
||||||
|
Configuration conf) throws IOException;
|
||||||
|
}
|
@ -55,6 +55,9 @@ public static ChecksumParam checksumParam(
|
|||||||
ChecksumOpt csumOpt) {
|
ChecksumOpt csumOpt) {
|
||||||
return new ChecksumParam(csumOpt);
|
return new ChecksumParam(csumOpt);
|
||||||
}
|
}
|
||||||
|
public static Progress progress(Progressable prog) {
|
||||||
|
return new Progress(prog);
|
||||||
|
}
|
||||||
public static Perms perms(FsPermission perm) {
|
public static Perms perms(FsPermission perm) {
|
||||||
return new Perms(perm);
|
return new Perms(perm);
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,45 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque, serializable reference to an part id for multipart uploads.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public interface PartHandle extends Serializable {
|
||||||
|
/**
|
||||||
|
* @return Serialized from in bytes.
|
||||||
|
*/
|
||||||
|
default byte[] toByteArray() {
|
||||||
|
ByteBuffer bb = bytes();
|
||||||
|
byte[] ret = new byte[bb.remaining()];
|
||||||
|
bb.get(ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteBuffer bytes();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
boolean equals(Object other);
|
||||||
|
}
|
@ -27,7 +27,7 @@
|
|||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.avro.reflect.Stringable;
|
import org.apache.avro.reflect.Stringable;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
import java.nio.file.attribute.FileTime;
|
import java.nio.file.attribute.FileTime;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.StringTokenizer;
|
import java.util.StringTokenizer;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
@ -212,7 +213,19 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
|
|||||||
return new FSDataInputStream(new BufferedFSInputStream(
|
return new FSDataInputStream(new BufferedFSInputStream(
|
||||||
new LocalFSFileInputStream(f), bufferSize));
|
new LocalFSFileInputStream(f), bufferSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FSDataInputStream open(PathHandle fd, int bufferSize)
|
||||||
|
throws IOException {
|
||||||
|
if (!(fd instanceof LocalFileSystemPathHandle)) {
|
||||||
|
fd = new LocalFileSystemPathHandle(fd.bytes());
|
||||||
|
}
|
||||||
|
LocalFileSystemPathHandle id = (LocalFileSystemPathHandle) fd;
|
||||||
|
id.verify(getFileStatus(new Path(id.getPath())));
|
||||||
|
return new FSDataInputStream(new BufferedFSInputStream(
|
||||||
|
new LocalFSFileInputStream(new Path(id.getPath())), bufferSize));
|
||||||
|
}
|
||||||
|
|
||||||
/*********************************************************
|
/*********************************************************
|
||||||
* For create()'s FSOutputStream.
|
* For create()'s FSOutputStream.
|
||||||
*********************************************************/
|
*********************************************************/
|
||||||
@ -246,7 +259,7 @@ private LocalFSFileOutputStream(Path f, boolean append,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Just forward to the fos
|
* Just forward to the fos
|
||||||
*/
|
*/
|
||||||
@ -350,6 +363,18 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void concat(final Path trg, final Path [] psrcs) throws IOException {
|
||||||
|
final int bufferSize = 4096;
|
||||||
|
try(FSDataOutputStream out = create(trg)) {
|
||||||
|
for (Path src : psrcs) {
|
||||||
|
try(FSDataInputStream in = open(src)) {
|
||||||
|
IOUtils.copyBytes(in, out, bufferSize, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean rename(Path src, Path dst) throws IOException {
|
public boolean rename(Path src, Path dst) throws IOException {
|
||||||
// Attempt rename using Java API.
|
// Attempt rename using Java API.
|
||||||
@ -863,6 +888,38 @@ public void setTimes(Path p, long mtime, long atime) throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hook to implement support for {@link PathHandle} operations.
|
||||||
|
* @param stat Referent in the target FileSystem
|
||||||
|
* @param opts Constraints that determine the validity of the
|
||||||
|
* {@link PathHandle} reference.
|
||||||
|
*/
|
||||||
|
protected PathHandle createPathHandle(FileStatus stat,
|
||||||
|
Options.HandleOpt... opts) {
|
||||||
|
if (stat.isDirectory() || stat.isSymlink()) {
|
||||||
|
throw new IllegalArgumentException("PathHandle only available for files");
|
||||||
|
}
|
||||||
|
String authority = stat.getPath().toUri().getAuthority();
|
||||||
|
if (authority != null && !authority.equals("file://")) {
|
||||||
|
throw new IllegalArgumentException("Wrong FileSystem: " + stat.getPath());
|
||||||
|
}
|
||||||
|
Options.HandleOpt.Data data =
|
||||||
|
Options.HandleOpt.getOpt(Options.HandleOpt.Data.class, opts)
|
||||||
|
.orElse(Options.HandleOpt.changed(false));
|
||||||
|
Options.HandleOpt.Location loc =
|
||||||
|
Options.HandleOpt.getOpt(Options.HandleOpt.Location.class, opts)
|
||||||
|
.orElse(Options.HandleOpt.moved(false));
|
||||||
|
if (loc.allowChange()) {
|
||||||
|
throw new UnsupportedOperationException("Tracking file movement in " +
|
||||||
|
"basic FileSystem is not supported");
|
||||||
|
}
|
||||||
|
final Path p = stat.getPath();
|
||||||
|
final Optional<Long> mtime = !data.allowChange()
|
||||||
|
? Optional.of(stat.getModificationTime())
|
||||||
|
: Optional.empty();
|
||||||
|
return new LocalFileSystemPathHandle(p.toString(), mtime);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean supportsSymlinks() {
|
public boolean supportsSymlinks() {
|
||||||
return true;
|
return true;
|
||||||
|
@ -0,0 +1,41 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* MultipartUploader for a given file system name/scheme is not supported.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
|
public class UnsupportedMultipartUploaderException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1L;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs exception with the specified detail message.
|
||||||
|
*
|
||||||
|
* @param message exception message.
|
||||||
|
*/
|
||||||
|
public UnsupportedMultipartUploaderException(final String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,47 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque, serializable reference to an uploadId for multipart uploads.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public interface UploadHandle extends Serializable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Serialized from in bytes.
|
||||||
|
*/
|
||||||
|
default byte[] toByteArray() {
|
||||||
|
ByteBuffer bb = bytes();
|
||||||
|
byte[] ret = new byte[bb.remaining()];
|
||||||
|
bb.get(ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteBuffer bytes();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
boolean equals(Object other);
|
||||||
|
|
||||||
|
}
|
@ -23,7 +23,7 @@
|
|||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
package org.apache.hadoop.io;
|
package org.apache.hadoop.io;
|
||||||
|
|
||||||
import com.google.common.collect.ComparisonChain;
|
import com.google.common.collect.ComparisonChain;
|
||||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import sun.misc.Unsafe;
|
import sun.misc.Unsafe;
|
||||||
|
|
||||||
import com.google.common.primitives.Longs;
|
|
||||||
import com.google.common.primitives.UnsignedBytes;
|
import com.google.common.primitives.UnsignedBytes;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -195,52 +194,43 @@ public int compareTo(byte[] buffer1, int offset1, int length1,
|
|||||||
length1 == length2) {
|
length1 == length2) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
final int stride = 8;
|
||||||
int minLength = Math.min(length1, length2);
|
int minLength = Math.min(length1, length2);
|
||||||
int minWords = minLength / Longs.BYTES;
|
int strideLimit = minLength & ~(stride - 1);
|
||||||
int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
|
int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
|
||||||
int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
|
int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
|
||||||
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a
|
* Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a
|
||||||
* time is no slower than comparing 4 bytes at a time even on 32-bit.
|
* time is no slower than comparing 4 bytes at a time even on 32-bit.
|
||||||
* On the other hand, it is substantially faster on 64-bit.
|
* On the other hand, it is substantially faster on 64-bit.
|
||||||
*/
|
*/
|
||||||
for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) {
|
for (i = 0; i < strideLimit; i += stride) {
|
||||||
long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i);
|
long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i);
|
||||||
long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i);
|
long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i);
|
||||||
long diff = lw ^ rw;
|
|
||||||
|
|
||||||
if (diff != 0) {
|
if (lw != rw) {
|
||||||
if (!littleEndian) {
|
if (!littleEndian) {
|
||||||
return lessThanUnsigned(lw, rw) ? -1 : 1;
|
return lessThanUnsigned(lw, rw) ? -1 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use binary search
|
/*
|
||||||
int n = 0;
|
* We want to compare only the first index where left[index] !=
|
||||||
int y;
|
* right[index]. This corresponds to the least significant nonzero
|
||||||
int x = (int) diff;
|
* byte in lw ^ rw, since lw and rw are little-endian.
|
||||||
if (x == 0) {
|
* Long.numberOfTrailingZeros(diff) tells us the least significant
|
||||||
x = (int) (diff >>> 32);
|
* nonzero bit, and zeroing out the first three bits of L.nTZ gives
|
||||||
n = 32;
|
* us the shift to get that least significant nonzero byte. This
|
||||||
}
|
* comparison logic is based on UnsignedBytes from Guava v21
|
||||||
|
*/
|
||||||
y = x << 16;
|
int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
|
||||||
if (y == 0) {
|
return ((int) ((lw >>> n) & 0xFF)) - ((int) ((rw >>> n) & 0xFF));
|
||||||
n += 16;
|
|
||||||
} else {
|
|
||||||
x = y;
|
|
||||||
}
|
|
||||||
|
|
||||||
y = x << 8;
|
|
||||||
if (y == 0) {
|
|
||||||
n += 8;
|
|
||||||
}
|
|
||||||
return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The epilogue to cover the last (minLength % 8) elements.
|
// The epilogue to cover the last (minLength % 8) elements.
|
||||||
for (int i = minWords * Longs.BYTES; i < minLength; i++) {
|
for (; i < minLength; i++) {
|
||||||
int result = UnsignedBytes.compare(
|
int result = UnsignedBytes.compare(
|
||||||
buffer1[offset1 + i],
|
buffer1[offset1 + i],
|
||||||
buffer2[offset2 + i]);
|
buffer2[offset2 + i]);
|
||||||
|
@ -22,8 +22,8 @@
|
|||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ public final class ErasureCodeNative {
|
|||||||
loadLibrary();
|
loadLibrary();
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
problem = "Loading ISA-L failed: " + t.getMessage();
|
problem = "Loading ISA-L failed: " + t.getMessage();
|
||||||
LOG.error("Loading ISA-L failed", t);
|
LOG.warn(problem);
|
||||||
}
|
}
|
||||||
LOADING_FAILURE_REASON = problem;
|
LOADING_FAILURE_REASON = problem;
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.FileDescriptor;
|
import java.io.FileDescriptor;
|
||||||
|
|
||||||
import org.apache.commons.lang.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
import com.fasterxml.jackson.databind.ObjectWriter;
|
import com.fasterxml.jackson.databind.ObjectWriter;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.util.concurrent.AtomicDoubleArray;
|
import com.google.common.util.concurrent.AtomicDoubleArray;
|
||||||
import org.apache.commons.lang.exception.ExceptionUtils;
|
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||||
@ -429,7 +429,7 @@ private void decayCurrentCounts() {
|
|||||||
updateAverageResponseTime(true);
|
updateAverageResponseTime(true);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
LOG.error("decayCurrentCounts exception: " +
|
LOG.error("decayCurrentCounts exception: " +
|
||||||
ExceptionUtils.getFullStackTrace(ex));
|
ExceptionUtils.getStackTrace(ex));
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.commons.lang.NotImplementedException;
|
import org.apache.commons.lang3.NotImplementedException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
|
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
@ -286,7 +286,7 @@ public int size() {
|
|||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public Iterator<E> iterator() {
|
public Iterator<E> iterator() {
|
||||||
throw new NotImplementedException();
|
throw new NotImplementedException("Code is not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.metrics2;
|
package org.apache.hadoop.metrics2;
|
||||||
|
|
||||||
import org.apache.commons.lang.exception.ExceptionUtils;
|
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.codehaus.jackson.map.ObjectMapper;
|
import org.codehaus.jackson.map.ObjectMapper;
|
||||||
|
@ -37,10 +37,8 @@
|
|||||||
import org.apache.commons.configuration2.Configuration;
|
import org.apache.commons.configuration2.Configuration;
|
||||||
import org.apache.commons.configuration2.PropertiesConfiguration;
|
import org.apache.commons.configuration2.PropertiesConfiguration;
|
||||||
import org.apache.commons.configuration2.SubsetConfiguration;
|
import org.apache.commons.configuration2.SubsetConfiguration;
|
||||||
import org.apache.commons.configuration2.builder.fluent.Configurations;
|
|
||||||
import org.apache.commons.configuration2.builder.fluent.Parameters;
|
|
||||||
import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
|
|
||||||
import org.apache.commons.configuration2.ex.ConfigurationException;
|
import org.apache.commons.configuration2.ex.ConfigurationException;
|
||||||
|
import org.apache.commons.configuration2.io.FileHandler;
|
||||||
import org.apache.hadoop.metrics2.MetricsFilter;
|
import org.apache.hadoop.metrics2.MetricsFilter;
|
||||||
import org.apache.hadoop.metrics2.MetricsPlugin;
|
import org.apache.hadoop.metrics2.MetricsPlugin;
|
||||||
import org.apache.hadoop.metrics2.filter.GlobFilter;
|
import org.apache.hadoop.metrics2.filter.GlobFilter;
|
||||||
@ -112,12 +110,11 @@ static MetricsConfig create(String prefix, String... fileNames) {
|
|||||||
static MetricsConfig loadFirst(String prefix, String... fileNames) {
|
static MetricsConfig loadFirst(String prefix, String... fileNames) {
|
||||||
for (String fname : fileNames) {
|
for (String fname : fileNames) {
|
||||||
try {
|
try {
|
||||||
Configuration cf = new Configurations().propertiesBuilder(fname)
|
PropertiesConfiguration pcf = new PropertiesConfiguration();
|
||||||
.configure(new Parameters().properties()
|
FileHandler fh = new FileHandler(pcf);
|
||||||
.setFileName(fname)
|
fh.setFileName(fname);
|
||||||
.setListDelimiterHandler(new DefaultListDelimiterHandler(',')))
|
fh.load();
|
||||||
.getConfiguration()
|
Configuration cf = pcf.interpolatedConfiguration();
|
||||||
.interpolatedConfiguration();
|
|
||||||
LOG.info("Loaded properties from {}", fname);
|
LOG.info("Loaded properties from {}", fname);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Properties: {}", toString(cf));
|
LOG.debug("Properties: {}", toString(cf));
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.*;
|
import static com.google.common.base.Preconditions.*;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import org.apache.hadoop.metrics2.MetricsException;
|
import org.apache.hadoop.metrics2.MetricsException;
|
||||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
import java.lang.reflect.Field;
|
import java.lang.reflect.Field;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.metrics2.MetricsException;
|
import org.apache.hadoop.metrics2.MetricsException;
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.metrics2.lib;
|
package org.apache.hadoop.metrics2.lib;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.configuration2.SubsetConfiguration;
|
import org.apache.commons.configuration2.SubsetConfiguration;
|
||||||
import org.apache.commons.lang.time.FastDateFormat;
|
import org.apache.commons.lang3.time.FastDateFormat;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
import java.nio.channels.ReadableByteChannel;
|
import java.nio.channels.ReadableByteChannel;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
import org.apache.commons.lang.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
import org.apache.hadoop.util.CloseableReferenceCount;
|
import org.apache.hadoop.util.CloseableReferenceCount;
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
import java.util.concurrent.locks.Condition;
|
import java.util.concurrent.locks.Condition;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
import org.apache.commons.lang.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
package org.apache.hadoop.security;
|
package org.apache.hadoop.security;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.StringTokenizer;
|
import java.util.StringTokenizer;
|
||||||
@ -26,7 +25,7 @@
|
|||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
@ -52,7 +51,8 @@ public class ShellBasedUnixGroupsMapping extends Configured
|
|||||||
protected static final Logger LOG =
|
protected static final Logger LOG =
|
||||||
LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
|
LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
|
||||||
|
|
||||||
private long timeout = 0L;
|
private long timeout = CommonConfigurationKeys.
|
||||||
|
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT;
|
||||||
private static final List<String> EMPTY_GROUPS = new LinkedList<>();
|
private static final List<String> EMPTY_GROUPS = new LinkedList<>();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -61,10 +61,10 @@ public void setConf(Configuration conf) {
|
|||||||
if (conf != null) {
|
if (conf != null) {
|
||||||
timeout = conf.getTimeDuration(
|
timeout = conf.getTimeDuration(
|
||||||
CommonConfigurationKeys.
|
CommonConfigurationKeys.
|
||||||
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
|
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
|
||||||
CommonConfigurationKeys.
|
CommonConfigurationKeys.
|
||||||
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT,
|
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT,
|
||||||
TimeUnit.SECONDS);
|
TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -831,7 +831,9 @@ private long getRefreshTime(KerberosTicket tgt) {
|
|||||||
return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
|
return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean shouldRelogin() {
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Unstable
|
||||||
|
public boolean shouldRelogin() {
|
||||||
return hasKerberosCredentials() && isHadoopLogin();
|
return hasKerberosCredentials() && isHadoopLogin();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.tools.CommandShell;
|
import org.apache.hadoop.tools.CommandShell;
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
import javax.servlet.http.HttpServletRequest;
|
import javax.servlet.http.HttpServletRequest;
|
||||||
import javax.servlet.http.HttpServletResponse;
|
import javax.servlet.http.HttpServletResponse;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
import java.util.ServiceLoader;
|
import java.util.ServiceLoader;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.security.token.delegation.web;
|
package org.apache.hadoop.security.token.delegation.web;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectReader;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
@ -31,6 +29,7 @@
|
|||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||||
|
import org.apache.hadoop.util.JsonSerialization;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
@ -56,9 +55,6 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
|
|||||||
private static final String CONTENT_TYPE = "Content-Type";
|
private static final String CONTENT_TYPE = "Content-Type";
|
||||||
private static final String APPLICATION_JSON_MIME = "application/json";
|
private static final String APPLICATION_JSON_MIME = "application/json";
|
||||||
|
|
||||||
private static final ObjectReader READER =
|
|
||||||
new ObjectMapper().readerFor(Map.class);
|
|
||||||
|
|
||||||
private static final String HTTP_GET = "GET";
|
private static final String HTTP_GET = "GET";
|
||||||
private static final String HTTP_PUT = "PUT";
|
private static final String HTTP_PUT = "PUT";
|
||||||
|
|
||||||
@ -328,7 +324,7 @@ private Map doDelegationTokenOperation(URL url,
|
|||||||
if (contentType != null &&
|
if (contentType != null &&
|
||||||
contentType.contains(APPLICATION_JSON_MIME)) {
|
contentType.contains(APPLICATION_JSON_MIME)) {
|
||||||
try {
|
try {
|
||||||
ret = READER.readValue(conn.getInputStream());
|
ret = JsonSerialization.mapReader().readValue(conn.getInputStream());
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
throw new AuthenticationException(String.format(
|
throw new AuthenticationException(String.format(
|
||||||
"'%s' did not handle the '%s' delegation token operation: %s",
|
"'%s' did not handle the '%s' delegation token operation: %s",
|
||||||
|
@ -20,8 +20,7 @@
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.commons.lang.WordUtils;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -103,7 +102,8 @@ String[] getRow(int idx) {
|
|||||||
// Line-wrap if it's too long
|
// Line-wrap if it's too long
|
||||||
String[] lines = new String[] {raw};
|
String[] lines = new String[] {raw};
|
||||||
if (wrap) {
|
if (wrap) {
|
||||||
lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n");
|
lines = org.apache.hadoop.util.StringUtils.wrap(lines[0], wrapWidth,
|
||||||
|
"\n", true).split("\n");
|
||||||
}
|
}
|
||||||
for (int i=0; i<lines.length; i++) {
|
for (int i=0; i<lines.length; i++) {
|
||||||
if (justification == Justification.LEFT) {
|
if (justification == Justification.LEFT) {
|
||||||
|
@ -17,9 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectReader;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectWriter;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
@ -56,11 +53,6 @@ public class HttpExceptionUtils {
|
|||||||
|
|
||||||
private static final String ENTER = System.getProperty("line.separator");
|
private static final String ENTER = System.getProperty("line.separator");
|
||||||
|
|
||||||
private static final ObjectReader READER =
|
|
||||||
new ObjectMapper().readerFor(Map.class);
|
|
||||||
private static final ObjectWriter WRITER =
|
|
||||||
new ObjectMapper().writerWithDefaultPrettyPrinter();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a HTTP servlet response serializing the exception in it as JSON.
|
* Creates a HTTP servlet response serializing the exception in it as JSON.
|
||||||
*
|
*
|
||||||
@ -82,7 +74,7 @@ public static void createServletExceptionResponse(
|
|||||||
Map<String, Object> jsonResponse = new LinkedHashMap<String, Object>();
|
Map<String, Object> jsonResponse = new LinkedHashMap<String, Object>();
|
||||||
jsonResponse.put(ERROR_JSON, json);
|
jsonResponse.put(ERROR_JSON, json);
|
||||||
Writer writer = response.getWriter();
|
Writer writer = response.getWriter();
|
||||||
WRITER.writeValue(writer, jsonResponse);
|
JsonSerialization.writer().writeValue(writer, jsonResponse);
|
||||||
writer.flush();
|
writer.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +142,7 @@ public static void validateResponse(HttpURLConnection conn,
|
|||||||
InputStream es = null;
|
InputStream es = null;
|
||||||
try {
|
try {
|
||||||
es = conn.getErrorStream();
|
es = conn.getErrorStream();
|
||||||
Map json = READER.readValue(es);
|
Map json = JsonSerialization.mapReader().readValue(es);
|
||||||
json = (Map) json.get(ERROR_JSON);
|
json = (Map) json.get(ERROR_JSON);
|
||||||
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
|
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
|
||||||
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);
|
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);
|
||||||
|
@ -25,14 +25,18 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import com.fasterxml.jackson.core.JsonParseException;
|
import com.fasterxml.jackson.core.JsonParseException;
|
||||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||||
import com.fasterxml.jackson.databind.DeserializationFeature;
|
import com.fasterxml.jackson.databind.DeserializationFeature;
|
||||||
import com.fasterxml.jackson.databind.JsonMappingException;
|
import com.fasterxml.jackson.databind.JsonMappingException;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectReader;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectWriter;
|
||||||
import com.fasterxml.jackson.databind.SerializationFeature;
|
import com.fasterxml.jackson.databind.SerializationFeature;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@ -65,6 +69,26 @@ public class JsonSerialization<T> {
|
|||||||
private final Class<T> classType;
|
private final Class<T> classType;
|
||||||
private final ObjectMapper mapper;
|
private final ObjectMapper mapper;
|
||||||
|
|
||||||
|
private static final ObjectWriter WRITER =
|
||||||
|
new ObjectMapper().writerWithDefaultPrettyPrinter();
|
||||||
|
|
||||||
|
private static final ObjectReader MAP_READER =
|
||||||
|
new ObjectMapper().readerFor(Map.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return an ObjectWriter which pretty-prints its output
|
||||||
|
*/
|
||||||
|
public static ObjectWriter writer() {
|
||||||
|
return WRITER;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return an ObjectReader which returns simple Maps.
|
||||||
|
*/
|
||||||
|
public static ObjectReader mapReader() {
|
||||||
|
return MAP_READER;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an instance bound to a specific type.
|
* Create an instance bound to a specific type.
|
||||||
* @param classType class to marshall
|
* @param classType class to marshall
|
||||||
|
@ -1191,7 +1191,7 @@ public ShellCommandExecutor(String[] execString, File dir,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the timeout value set for the executor's sub-commands.
|
* Returns the timeout value set for the executor's sub-commands.
|
||||||
* @return The timeout value in seconds
|
* @return The timeout value in milliseconds
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public long getTimeoutInterval() {
|
public long getTimeoutInterval() {
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.lang.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.apache.commons.lang3.time.FastDateFormat;
|
import org.apache.commons.lang3.time.FastDateFormat;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
@ -987,7 +987,7 @@ public static String camelize(String s) {
|
|||||||
String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR, '_');
|
String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR, '_');
|
||||||
|
|
||||||
for (String word : words)
|
for (String word : words)
|
||||||
sb.append(org.apache.commons.lang.StringUtils.capitalize(word));
|
sb.append(org.apache.commons.lang3.StringUtils.capitalize(word));
|
||||||
|
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
@ -1183,4 +1183,64 @@ public static boolean isAlpha(String str) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Same as WordUtils#wrap in commons-lang 2.6. Unlike commons-lang3, leading
|
||||||
|
* spaces on the first line are NOT stripped.
|
||||||
|
*
|
||||||
|
* @param str the String to be word wrapped, may be null
|
||||||
|
* @param wrapLength the column to wrap the words at, less than 1 is treated
|
||||||
|
* as 1
|
||||||
|
* @param newLineStr the string to insert for a new line,
|
||||||
|
* <code>null</code> uses the system property line separator
|
||||||
|
* @param wrapLongWords true if long words (such as URLs) should be wrapped
|
||||||
|
* @return a line with newlines inserted, <code>null</code> if null input
|
||||||
|
*/
|
||||||
|
public static String wrap(String str, int wrapLength, String newLineStr,
|
||||||
|
boolean wrapLongWords) {
|
||||||
|
if(str == null) {
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
if(newLineStr == null) {
|
||||||
|
newLineStr = System.lineSeparator();
|
||||||
|
}
|
||||||
|
|
||||||
|
if(wrapLength < 1) {
|
||||||
|
wrapLength = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int inputLineLength = str.length();
|
||||||
|
int offset = 0;
|
||||||
|
StringBuffer wrappedLine = new StringBuffer(inputLineLength + 32);
|
||||||
|
|
||||||
|
while(inputLineLength - offset > wrapLength) {
|
||||||
|
if(str.charAt(offset) == 32) {
|
||||||
|
++offset;
|
||||||
|
} else {
|
||||||
|
int spaceToWrapAt = str.lastIndexOf(32, wrapLength + offset);
|
||||||
|
if(spaceToWrapAt >= offset) {
|
||||||
|
wrappedLine.append(str.substring(offset, spaceToWrapAt));
|
||||||
|
wrappedLine.append(newLineStr);
|
||||||
|
offset = spaceToWrapAt + 1;
|
||||||
|
} else if(wrapLongWords) {
|
||||||
|
wrappedLine.append(str.substring(offset, wrapLength + offset));
|
||||||
|
wrappedLine.append(newLineStr);
|
||||||
|
offset += wrapLength;
|
||||||
|
} else {
|
||||||
|
spaceToWrapAt = str.indexOf(32, wrapLength + offset);
|
||||||
|
if(spaceToWrapAt >= 0) {
|
||||||
|
wrappedLine.append(str.substring(offset, spaceToWrapAt));
|
||||||
|
wrappedLine.append(newLineStr);
|
||||||
|
offset = spaceToWrapAt + 1;
|
||||||
|
} else {
|
||||||
|
wrappedLine.append(str.substring(offset));
|
||||||
|
offset = inputLineLength;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedLine.append(str.substring(offset));
|
||||||
|
return wrappedLine.toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -216,6 +216,21 @@ private void readProcMemInfoFile() {
|
|||||||
readProcMemInfoFile(false);
|
readProcMemInfoFile(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Wrapper for Long.parseLong() that returns zero if the value is
|
||||||
|
* invalid. Under some circumstances, swapFree in /proc/meminfo can
|
||||||
|
* go negative, reported as a very large decimal value.
|
||||||
|
*/
|
||||||
|
private long safeParseLong(String strVal) {
|
||||||
|
long parsedVal;
|
||||||
|
try {
|
||||||
|
parsedVal = Long.parseLong(strVal);
|
||||||
|
} catch (NumberFormatException nfe) {
|
||||||
|
parsedVal = 0;
|
||||||
|
}
|
||||||
|
return parsedVal;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Read /proc/meminfo, parse and compute memory information.
|
* Read /proc/meminfo, parse and compute memory information.
|
||||||
* @param readAgain if false, read only on the first time
|
* @param readAgain if false, read only on the first time
|
||||||
@ -252,9 +267,9 @@ private void readProcMemInfoFile(boolean readAgain) {
|
|||||||
} else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
|
} else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
|
||||||
swapSize = Long.parseLong(mat.group(2));
|
swapSize = Long.parseLong(mat.group(2));
|
||||||
} else if (mat.group(1).equals(MEMFREE_STRING)) {
|
} else if (mat.group(1).equals(MEMFREE_STRING)) {
|
||||||
ramSizeFree = Long.parseLong(mat.group(2));
|
ramSizeFree = safeParseLong(mat.group(2));
|
||||||
} else if (mat.group(1).equals(SWAPFREE_STRING)) {
|
} else if (mat.group(1).equals(SWAPFREE_STRING)) {
|
||||||
swapSizeFree = Long.parseLong(mat.group(2));
|
swapSizeFree = safeParseLong(mat.group(2));
|
||||||
} else if (mat.group(1).equals(INACTIVE_STRING)) {
|
} else if (mat.group(1).equals(INACTIVE_STRING)) {
|
||||||
inactiveSize = Long.parseLong(mat.group(2));
|
inactiveSize = Long.parseLong(mat.group(2));
|
||||||
} else if (mat.group(1).equals(INACTIVEFILE_STRING)) {
|
} else if (mat.group(1).equals(INACTIVEFILE_STRING)) {
|
||||||
|
@ -68,3 +68,11 @@ message FileStatusProto {
|
|||||||
optional bytes ec_data = 17;
|
optional bytes ec_data = 17;
|
||||||
optional uint32 flags = 18 [default = 0];
|
optional uint32 flags = 18 [default = 0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Placeholder type for consistent basic FileSystem operations.
|
||||||
|
*/
|
||||||
|
message LocalFileSystemPathHandleProto {
|
||||||
|
optional uint64 mtime = 1;
|
||||||
|
optional string path = 2;
|
||||||
|
}
|
||||||
|
@ -0,0 +1,16 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
org.apache.hadoop.fs.FileSystemMultipartUploader$Factory
|
@ -62,7 +62,7 @@
|
|||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration.IntegerRanges;
|
import org.apache.hadoop.conf.Configuration.IntegerRanges;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.conf;
|
package org.apache.hadoop.conf;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Ignore;
|
import org.junit.Ignore;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.commons.lang.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||||
import org.junit.Assume;
|
import org.junit.Assume;
|
||||||
|
@ -0,0 +1,143 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.commons.lang3.tuple.Pair;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
public abstract class AbstractSystemMultipartUploaderTest {
|
||||||
|
|
||||||
|
abstract FileSystem getFS() throws IOException;
|
||||||
|
|
||||||
|
abstract Path getBaseTestPath();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipartUpload() throws Exception {
|
||||||
|
FileSystem fs = getFS();
|
||||||
|
Path file = new Path(getBaseTestPath(), "some-file");
|
||||||
|
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
|
||||||
|
UploadHandle uploadHandle = mpu.initialize(file);
|
||||||
|
List<Pair<Integer, PartHandle>> partHandles = new ArrayList<>();
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (int i = 1; i <= 100; ++i) {
|
||||||
|
String contents = "ThisIsPart" + i + "\n";
|
||||||
|
sb.append(contents);
|
||||||
|
int len = contents.getBytes().length;
|
||||||
|
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
|
||||||
|
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
|
||||||
|
partHandles.add(Pair.of(i, partHandle));
|
||||||
|
}
|
||||||
|
PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
|
||||||
|
byte[] fdData = IOUtils.toByteArray(fs.open(fd));
|
||||||
|
byte[] fileData = IOUtils.toByteArray(fs.open(file));
|
||||||
|
String readString = new String(fdData);
|
||||||
|
assertEquals(sb.toString(), readString);
|
||||||
|
assertArrayEquals(fdData, fileData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipartUploadReverseOrder() throws Exception {
|
||||||
|
FileSystem fs = getFS();
|
||||||
|
Path file = new Path(getBaseTestPath(), "some-file");
|
||||||
|
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
|
||||||
|
UploadHandle uploadHandle = mpu.initialize(file);
|
||||||
|
List<Pair<Integer, PartHandle>> partHandles = new ArrayList<>();
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (int i = 1; i <= 100; ++i) {
|
||||||
|
String contents = "ThisIsPart" + i + "\n";
|
||||||
|
sb.append(contents);
|
||||||
|
}
|
||||||
|
for (int i = 100; i > 0; --i) {
|
||||||
|
String contents = "ThisIsPart" + i + "\n";
|
||||||
|
int len = contents.getBytes().length;
|
||||||
|
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
|
||||||
|
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
|
||||||
|
partHandles.add(Pair.of(i, partHandle));
|
||||||
|
}
|
||||||
|
PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
|
||||||
|
byte[] fdData = IOUtils.toByteArray(fs.open(fd));
|
||||||
|
byte[] fileData = IOUtils.toByteArray(fs.open(file));
|
||||||
|
String readString = new String(fdData);
|
||||||
|
assertEquals(sb.toString(), readString);
|
||||||
|
assertArrayEquals(fdData, fileData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipartUploadReverseOrderNoNContiguousPartNumbers()
|
||||||
|
throws Exception {
|
||||||
|
FileSystem fs = getFS();
|
||||||
|
Path file = new Path(getBaseTestPath(), "some-file");
|
||||||
|
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
|
||||||
|
UploadHandle uploadHandle = mpu.initialize(file);
|
||||||
|
List<Pair<Integer, PartHandle>> partHandles = new ArrayList<>();
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (int i = 2; i <= 200; i += 2) {
|
||||||
|
String contents = "ThisIsPart" + i + "\n";
|
||||||
|
sb.append(contents);
|
||||||
|
}
|
||||||
|
for (int i = 200; i > 0; i -= 2) {
|
||||||
|
String contents = "ThisIsPart" + i + "\n";
|
||||||
|
int len = contents.getBytes().length;
|
||||||
|
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
|
||||||
|
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
|
||||||
|
partHandles.add(Pair.of(i, partHandle));
|
||||||
|
}
|
||||||
|
PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
|
||||||
|
byte[] fdData = IOUtils.toByteArray(fs.open(fd));
|
||||||
|
byte[] fileData = IOUtils.toByteArray(fs.open(file));
|
||||||
|
String readString = new String(fdData);
|
||||||
|
assertEquals(sb.toString(), readString);
|
||||||
|
assertArrayEquals(fdData, fileData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipartUploadAbort() throws Exception {
|
||||||
|
FileSystem fs = getFS();
|
||||||
|
Path file = new Path(getBaseTestPath(), "some-file");
|
||||||
|
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
|
||||||
|
UploadHandle uploadHandle = mpu.initialize(file);
|
||||||
|
for (int i = 100; i >= 50; --i) {
|
||||||
|
String contents = "ThisIsPart" + i + "\n";
|
||||||
|
int len = contents.getBytes().length;
|
||||||
|
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
|
||||||
|
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
|
||||||
|
}
|
||||||
|
mpu.abort(file, uploadHandle);
|
||||||
|
|
||||||
|
String contents = "ThisIsPart49\n";
|
||||||
|
int len = contents.getBytes().length;
|
||||||
|
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
|
||||||
|
|
||||||
|
try {
|
||||||
|
mpu.putPart(file, is, 49, uploadHandle, len);
|
||||||
|
fail("putPart should have thrown an exception");
|
||||||
|
} catch (IOException ok) {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.commons.lang.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.hadoop.fs.Options.CreateOpts;
|
import org.apache.hadoop.fs.Options.CreateOpts;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
|
||||||
|
@ -810,7 +810,49 @@ public void testCreateFlagAppendCreateOverwrite() throws IOException {
|
|||||||
fc.create(p, EnumSet.of(CREATE, APPEND, OVERWRITE));
|
fc.create(p, EnumSet.of(CREATE, APPEND, OVERWRITE));
|
||||||
Assert.fail("Excepted exception not thrown");
|
Assert.fail("Excepted exception not thrown");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBuilderCreateNonExistingFile() throws IOException {
|
||||||
|
Path p = getTestRootPath(fc, "test/testBuilderCreateNonExistingFile");
|
||||||
|
FSDataOutputStream out = fc.create(p).build();
|
||||||
|
writeData(fc, p, out, data, data.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBuilderCreateExistingFile() throws IOException {
|
||||||
|
Path p = getTestRootPath(fc, "test/testBuilderCreateExistingFile");
|
||||||
|
createFile(p);
|
||||||
|
FSDataOutputStream out = fc.create(p).overwrite(true).build();
|
||||||
|
writeData(fc, p, out, data, data.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBuilderCreateAppendNonExistingFile() throws IOException {
|
||||||
|
Path p = getTestRootPath(fc, "test/testBuilderCreateAppendNonExistingFile");
|
||||||
|
FSDataOutputStream out = fc.create(p).append().build();
|
||||||
|
writeData(fc, p, out, data, data.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBuilderCreateAppendExistingFile() throws IOException {
|
||||||
|
Path p = getTestRootPath(fc, "test/testBuilderCreateAppendExistingFile");
|
||||||
|
createFile(p);
|
||||||
|
FSDataOutputStream out = fc.create(p).append().build();
|
||||||
|
writeData(fc, p, out, data, 2 * data.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBuilderCreateRecursive() throws IOException {
|
||||||
|
Path p = getTestRootPath(fc, "test/parent/no/exist/file1");
|
||||||
|
try (FSDataOutputStream out = fc.create(p).build()) {
|
||||||
|
fail("Should throw FileNotFoundException on non-exist directory");
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
}
|
||||||
|
|
||||||
|
FSDataOutputStream out = fc.create(p).recursive().build();
|
||||||
|
writeData(fc, p, out, data, data.length);
|
||||||
|
}
|
||||||
|
|
||||||
private static void writeData(FileContext fc, Path p, FSDataOutputStream out,
|
private static void writeData(FileContext fc, Path p, FSDataOutputStream out,
|
||||||
byte[] data, long expectedLen) throws IOException {
|
byte[] data, long expectedLen) throws IOException {
|
||||||
out.write(data, 0, data.length);
|
out.write(data, 0, data.length);
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
import org.apache.commons.lang.RandomStringUtils;
|
|
||||||
import org.apache.hadoop.fs.Options.CreateOpts;
|
import org.apache.hadoop.fs.Options.CreateOpts;
|
||||||
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
|
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
import org.apache.commons.lang.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
import org.apache.commons.lang.math.RandomUtils;
|
import org.apache.commons.lang3.RandomUtils;
|
||||||
import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
|
import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
|
||||||
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
@ -67,15 +67,15 @@ public class TestFileSystemStorageStatistics {
|
|||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() {
|
public void setup() {
|
||||||
statistics.incrementBytesRead(RandomUtils.nextInt(100));
|
statistics.incrementBytesRead(RandomUtils.nextInt(0, 100));
|
||||||
statistics.incrementBytesWritten(RandomUtils.nextInt(100));
|
statistics.incrementBytesWritten(RandomUtils.nextInt(0, 100));
|
||||||
statistics.incrementLargeReadOps(RandomUtils.nextInt(100));
|
statistics.incrementLargeReadOps(RandomUtils.nextInt(0, 100));
|
||||||
statistics.incrementWriteOps(RandomUtils.nextInt(100));
|
statistics.incrementWriteOps(RandomUtils.nextInt(0, 100));
|
||||||
|
|
||||||
statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100));
|
statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(0, 100));
|
||||||
statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100));
|
statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(0, 100));
|
||||||
statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100));
|
statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(0, 100));
|
||||||
statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100));
|
statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(0, 100));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -689,17 +689,18 @@ public void testFSOutputStreamBuilder() throws Exception {
|
|||||||
// and permission
|
// and permission
|
||||||
FSDataOutputStreamBuilder builder =
|
FSDataOutputStreamBuilder builder =
|
||||||
fileSys.createFile(path);
|
fileSys.createFile(path);
|
||||||
builder.build();
|
try (FSDataOutputStream stream = builder.build()) {
|
||||||
Assert.assertEquals("Should be default block size",
|
Assert.assertEquals("Should be default block size",
|
||||||
builder.getBlockSize(), fileSys.getDefaultBlockSize());
|
builder.getBlockSize(), fileSys.getDefaultBlockSize());
|
||||||
Assert.assertEquals("Should be default replication factor",
|
Assert.assertEquals("Should be default replication factor",
|
||||||
builder.getReplication(), fileSys.getDefaultReplication());
|
builder.getReplication(), fileSys.getDefaultReplication());
|
||||||
Assert.assertEquals("Should be default buffer size",
|
Assert.assertEquals("Should be default buffer size",
|
||||||
builder.getBufferSize(),
|
builder.getBufferSize(),
|
||||||
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
|
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
|
||||||
IO_FILE_BUFFER_SIZE_DEFAULT));
|
IO_FILE_BUFFER_SIZE_DEFAULT));
|
||||||
Assert.assertEquals("Should be default permission",
|
Assert.assertEquals("Should be default permission",
|
||||||
builder.getPermission(), FsPermission.getFileDefault());
|
builder.getPermission(), FsPermission.getFileDefault());
|
||||||
|
}
|
||||||
|
|
||||||
// Test set 0 to replication, block size and buffer size
|
// Test set 0 to replication, block size and buffer size
|
||||||
builder = fileSys.createFile(path);
|
builder = fileSys.createFile(path);
|
||||||
|
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import static org.apache.hadoop.test.GenericTestUtils.getRandomizedTestDir;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test the FileSystemMultipartUploader on local file system.
|
||||||
|
*/
|
||||||
|
public class TestLocalFileSystemMultipartUploader
|
||||||
|
extends AbstractSystemMultipartUploaderTest {
|
||||||
|
|
||||||
|
private static FileSystem fs;
|
||||||
|
private File tmp;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void init() throws IOException {
|
||||||
|
fs = LocalFileSystem.getLocal(new Configuration());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() throws IOException {
|
||||||
|
tmp = getRandomizedTestDir();
|
||||||
|
tmp.mkdirs();
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws IOException {
|
||||||
|
tmp.delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FileSystem getFS() {
|
||||||
|
return fs;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Path getBaseTestPath() {
|
||||||
|
return new Path(tmp.getAbsolutePath());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -123,6 +123,12 @@ public void testChanged() throws IOException {
|
|||||||
HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts)
|
HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts)
|
||||||
.orElseThrow(IllegalArgumentException::new);
|
.orElseThrow(IllegalArgumentException::new);
|
||||||
FileStatus stat = testFile(B1);
|
FileStatus stat = testFile(B1);
|
||||||
|
try {
|
||||||
|
// Temporary workaround while RawLocalFS supports only second precision
|
||||||
|
Thread.sleep(1000);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
}
|
||||||
// modify the file by appending data
|
// modify the file by appending data
|
||||||
appendFile(getFileSystem(), stat.getPath(), B2);
|
appendFile(getFileSystem(), stat.getPath(), B2);
|
||||||
byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length);
|
byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length);
|
||||||
|
@ -0,0 +1,40 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.Options;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
import org.apache.hadoop.fs.contract.localfs.LocalFSContract;
|
||||||
|
import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract;
|
||||||
|
|
||||||
|
public class TestRawlocalContractPathHandle
|
||||||
|
extends AbstractContractPathHandleTest {
|
||||||
|
|
||||||
|
public TestRawlocalContractPathHandle(String testname,
|
||||||
|
Options.HandleOpt[] opts, boolean serialized) {
|
||||||
|
super(testname, opts, serialized);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -17,8 +17,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.shell;
|
package org.apache.hadoop.fs.shell;
|
||||||
|
|
||||||
import org.apache.commons.lang.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.commons.lang.math.RandomUtils;
|
import org.apache.commons.lang3.RandomUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
@ -56,11 +56,11 @@ public static int initialize(Path dir) throws Exception {
|
|||||||
fs.mkdirs(toDirPath);
|
fs.mkdirs(toDirPath);
|
||||||
|
|
||||||
int numTotalFiles = 0;
|
int numTotalFiles = 0;
|
||||||
int numDirs = RandomUtils.nextInt(5);
|
int numDirs = RandomUtils.nextInt(0, 5);
|
||||||
for (int dirCount = 0; dirCount < numDirs; ++dirCount) {
|
for (int dirCount = 0; dirCount < numDirs; ++dirCount) {
|
||||||
Path subDirPath = new Path(fromDirPath, "subdir" + dirCount);
|
Path subDirPath = new Path(fromDirPath, "subdir" + dirCount);
|
||||||
fs.mkdirs(subDirPath);
|
fs.mkdirs(subDirPath);
|
||||||
int numFiles = RandomUtils.nextInt(10);
|
int numFiles = RandomUtils.nextInt(0, 10);
|
||||||
for (int fileCount = 0; fileCount < numFiles; ++fileCount) {
|
for (int fileCount = 0; fileCount < numFiles; ++fileCount) {
|
||||||
numTotalFiles++;
|
numTotalFiles++;
|
||||||
Path subFile = new Path(subDirPath, "file" + fileCount);
|
Path subFile = new Path(subDirPath, "file" + fileCount);
|
||||||
@ -115,7 +115,7 @@ public void testCopyFromLocalWithThreads() throws Exception {
|
|||||||
Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
|
Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
|
||||||
int numFiles = TestCopyFromLocal.initialize(dir);
|
int numFiles = TestCopyFromLocal.initialize(dir);
|
||||||
int maxThreads = Runtime.getRuntime().availableProcessors() * 2;
|
int maxThreads = Runtime.getRuntime().availableProcessors() * 2;
|
||||||
int randThreads = RandomUtils.nextInt(maxThreads - 1) + 1;
|
int randThreads = RandomUtils.nextInt(0, maxThreads - 1) + 1;
|
||||||
String numThreads = Integer.toString(randThreads);
|
String numThreads = Integer.toString(randThreads);
|
||||||
run(new TestMultiThreadedCopy(randThreads,
|
run(new TestMultiThreadedCopy(randThreads,
|
||||||
randThreads == 1 ? 0 : numFiles), "-t", numThreads,
|
randThreads == 1 ? 0 : numFiles), "-t", numThreads,
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
import org.junit.Assume;
|
import org.junit.Assume;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.apache.commons.lang.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
import com.google.protobuf.BlockingService;
|
import com.google.protobuf.BlockingService;
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.ipc.metrics.RpcMetrics;
|
import org.apache.hadoop.ipc.metrics.RpcMetrics;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
@ -38,7 +38,7 @@
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.security.KerberosAuthException;
|
import org.apache.hadoop.security.KerberosAuthException;
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.apache.commons.lang.exception.ExceptionUtils;
|
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket.DomainChannel;
|
import org.apache.hadoop.net.unix.DomainSocket.DomainChannel;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
@ -173,6 +173,37 @@ public void testGetNumericGroupsResolvable() throws Exception {
|
|||||||
assertTrue(groups.contains("zzz"));
|
assertTrue(groups.contains("zzz"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getTimeoutInterval(String timeout) {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
String userName = "foobarnonexistinguser";
|
||||||
|
conf.set(
|
||||||
|
CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
|
||||||
|
timeout);
|
||||||
|
TestDelayedGroupCommand mapping = ReflectionUtils
|
||||||
|
.newInstance(TestDelayedGroupCommand.class, conf);
|
||||||
|
ShellCommandExecutor executor = mapping.createGroupExecutor(userName);
|
||||||
|
return executor.getTimeoutInterval();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testShellTimeOutConf() {
|
||||||
|
|
||||||
|
// Test a 1 second max-runtime timeout
|
||||||
|
assertEquals(
|
||||||
|
"Expected the group names executor to carry the configured timeout",
|
||||||
|
1000L, getTimeoutInterval("1s"));
|
||||||
|
|
||||||
|
// Test a 1 minute max-runtime timeout
|
||||||
|
assertEquals(
|
||||||
|
"Expected the group names executor to carry the configured timeout",
|
||||||
|
60000L, getTimeoutInterval("1m"));
|
||||||
|
|
||||||
|
// Test a 1 millisecond max-runtime timeout
|
||||||
|
assertEquals(
|
||||||
|
"Expected the group names executor to carry the configured timeout",
|
||||||
|
1L, getTimeoutInterval("1"));
|
||||||
|
}
|
||||||
|
|
||||||
private class TestGroupResolvable
|
private class TestGroupResolvable
|
||||||
extends ShellBasedUnixGroupsMapping {
|
extends ShellBasedUnixGroupsMapping {
|
||||||
/**
|
/**
|
||||||
@ -222,7 +253,7 @@ public void testGetGroupsResolvable() throws Exception {
|
|||||||
private static class TestDelayedGroupCommand
|
private static class TestDelayedGroupCommand
|
||||||
extends ShellBasedUnixGroupsMapping {
|
extends ShellBasedUnixGroupsMapping {
|
||||||
|
|
||||||
private Long timeoutSecs = 2L;
|
private Long timeoutSecs = 1L;
|
||||||
|
|
||||||
TestDelayedGroupCommand() {
|
TestDelayedGroupCommand() {
|
||||||
super();
|
super();
|
||||||
@ -249,12 +280,12 @@ public void testFiniteGroupResolutionTime() throws Exception {
|
|||||||
String userName = "foobarnonexistinguser";
|
String userName = "foobarnonexistinguser";
|
||||||
String commandTimeoutMessage =
|
String commandTimeoutMessage =
|
||||||
"ran longer than the configured timeout limit";
|
"ran longer than the configured timeout limit";
|
||||||
long testTimeout = 1L;
|
long testTimeout = 500L;
|
||||||
|
|
||||||
// Test a 1 second max-runtime timeout
|
// Test a 1 second max-runtime timeout
|
||||||
conf.setLong(
|
conf.setLong(
|
||||||
CommonConfigurationKeys.
|
CommonConfigurationKeys.
|
||||||
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
|
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
|
||||||
testTimeout);
|
testTimeout);
|
||||||
|
|
||||||
TestDelayedGroupCommand mapping =
|
TestDelayedGroupCommand mapping =
|
||||||
@ -306,7 +337,7 @@ public void testFiniteGroupResolutionTime() throws Exception {
|
|||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
long defaultTimeout =
|
long defaultTimeout =
|
||||||
CommonConfigurationKeys.
|
CommonConfigurationKeys.
|
||||||
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT;
|
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT;
|
||||||
|
|
||||||
mapping =
|
mapping =
|
||||||
ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
|
ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import org.apache.commons.lang.mutable.MutableBoolean;
|
import org.apache.commons.lang3.mutable.MutableBoolean;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.service.launcher;
|
package org.apache.hadoop.service.launcher;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.service.Service;
|
import org.apache.hadoop.service.Service;
|
||||||
import org.apache.hadoop.service.ServiceOperations;
|
import org.apache.hadoop.service.ServiceOperations;
|
||||||
|
@ -40,7 +40,7 @@
|
|||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.lang.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
@ -344,7 +344,7 @@ public static void assertExceptionContains(String expectedText,
|
|||||||
throw new AssertionError(E_NULL_THROWABLE_STRING, t);
|
throw new AssertionError(E_NULL_THROWABLE_STRING, t);
|
||||||
}
|
}
|
||||||
if (expectedText != null && !msg.contains(expectedText)) {
|
if (expectedText != null && !msg.contains(expectedText)) {
|
||||||
String prefix = org.apache.commons.lang.StringUtils.isEmpty(message)
|
String prefix = org.apache.commons.lang3.StringUtils.isEmpty(message)
|
||||||
? "" : (message + ": ");
|
? "" : (message + ": ");
|
||||||
throw new AssertionError(
|
throw new AssertionError(
|
||||||
String.format("%s Expected to find '%s' %s: %s",
|
String.format("%s Expected to find '%s' %s: %s",
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
import org.apache.commons.lang.exception.ExceptionUtils;
|
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
import org.apache.commons.lang.SystemUtils;
|
import org.apache.commons.lang3.SystemUtils;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Assume;
|
import org.junit.Assume;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -161,6 +161,36 @@ int readDiskBlockInformation(String diskName, int defSector) {
|
|||||||
"DirectMap2M: 2027520 kB\n" +
|
"DirectMap2M: 2027520 kB\n" +
|
||||||
"DirectMap1G: 132120576 kB\n";
|
"DirectMap1G: 132120576 kB\n";
|
||||||
|
|
||||||
|
static final String MEMINFO_FORMAT3 =
|
||||||
|
"MemTotal: %d kB\n" +
|
||||||
|
"MemFree: %s kB\n" +
|
||||||
|
"Buffers: 138244 kB\n" +
|
||||||
|
"Cached: 947780 kB\n" +
|
||||||
|
"SwapCached: 142880 kB\n" +
|
||||||
|
"Active: 3229888 kB\n" +
|
||||||
|
"Inactive: %d kB\n" +
|
||||||
|
"SwapTotal: %d kB\n" +
|
||||||
|
"SwapFree: %s kB\n" +
|
||||||
|
"Dirty: 122012 kB\n" +
|
||||||
|
"Writeback: 0 kB\n" +
|
||||||
|
"AnonPages: 2710792 kB\n" +
|
||||||
|
"Mapped: 24740 kB\n" +
|
||||||
|
"Slab: 132528 kB\n" +
|
||||||
|
"SReclaimable: 105096 kB\n" +
|
||||||
|
"SUnreclaim: 27432 kB\n" +
|
||||||
|
"PageTables: 11448 kB\n" +
|
||||||
|
"NFS_Unstable: 0 kB\n" +
|
||||||
|
"Bounce: 0 kB\n" +
|
||||||
|
"CommitLimit: 4125904 kB\n" +
|
||||||
|
"Committed_AS: 4143556 kB\n" +
|
||||||
|
"VmallocTotal: 34359738367 kB\n" +
|
||||||
|
"VmallocUsed: 1632 kB\n" +
|
||||||
|
"VmallocChunk: 34359736375 kB\n" +
|
||||||
|
"HugePages_Total: %d\n" +
|
||||||
|
"HugePages_Free: 0\n" +
|
||||||
|
"HugePages_Rsvd: 0\n" +
|
||||||
|
"Hugepagesize: 2048 kB";
|
||||||
|
|
||||||
static final String CPUINFO_FORMAT =
|
static final String CPUINFO_FORMAT =
|
||||||
"processor : %s\n" +
|
"processor : %s\n" +
|
||||||
"vendor_id : AuthenticAMD\n" +
|
"vendor_id : AuthenticAMD\n" +
|
||||||
@ -384,6 +414,36 @@ public void parsingProcMemFile2() throws IOException {
|
|||||||
(nrHugePages * 2048) + swapTotal));
|
(nrHugePages * 2048) + swapTotal));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test parsing /proc/meminfo
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void parsingProcMemFileWithBadValues() throws IOException {
|
||||||
|
long memTotal = 4058864L;
|
||||||
|
long memFree = 0L; // bad value should return 0
|
||||||
|
long inactive = 567732L;
|
||||||
|
long swapTotal = 2096472L;
|
||||||
|
long swapFree = 0L; // bad value should return 0
|
||||||
|
int nrHugePages = 10;
|
||||||
|
String badFreeValue = "18446744073709551596";
|
||||||
|
File tempFile = new File(FAKE_MEMFILE);
|
||||||
|
tempFile.deleteOnExit();
|
||||||
|
FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
|
||||||
|
fWriter.write(String.format(MEMINFO_FORMAT3,
|
||||||
|
memTotal, badFreeValue, inactive, swapTotal, badFreeValue, nrHugePages));
|
||||||
|
|
||||||
|
fWriter.close();
|
||||||
|
assertEquals(plugin.getAvailablePhysicalMemorySize(),
|
||||||
|
1024L * (memFree + inactive));
|
||||||
|
assertEquals(plugin.getAvailableVirtualMemorySize(),
|
||||||
|
1024L * (memFree + inactive + swapFree));
|
||||||
|
assertEquals(plugin.getPhysicalMemorySize(),
|
||||||
|
1024L * (memTotal - (nrHugePages * 2048)));
|
||||||
|
assertEquals(plugin.getVirtualMemorySize(),
|
||||||
|
1024L * (memTotal - (nrHugePages * 2048) + swapTotal));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCoreCounts() throws IOException {
|
public void testCoreCounts() throws IOException {
|
||||||
|
|
||||||
|
@ -122,4 +122,9 @@
|
|||||||
<value>true</value>
|
<value>true</value>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-content-check</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
trap "echo SIGTERM trapped!" SIGTERM
|
||||||
|
trap "echo SIGINT trapped!" SIGINT
|
||||||
|
|
||||||
|
echo "$$" > "$1"
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
sleep 1.3
|
||||||
|
done
|
@ -17,10 +17,9 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.crypto.key.kms.server;
|
package org.apache.hadoop.crypto.key.kms.server;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.http.JettyUtils;
|
import org.apache.hadoop.http.JettyUtils;
|
||||||
|
import org.apache.hadoop.util.JsonSerialization;
|
||||||
|
|
||||||
import javax.ws.rs.Produces;
|
import javax.ws.rs.Produces;
|
||||||
import javax.ws.rs.WebApplicationException;
|
import javax.ws.rs.WebApplicationException;
|
||||||
@ -67,8 +66,7 @@ public void writeTo(Object obj, Class<?> aClass, Type type,
|
|||||||
OutputStream outputStream) throws IOException, WebApplicationException {
|
OutputStream outputStream) throws IOException, WebApplicationException {
|
||||||
Writer writer = new OutputStreamWriter(outputStream, Charset
|
Writer writer = new OutputStreamWriter(outputStream, Charset
|
||||||
.forName("UTF-8"));
|
.forName("UTF-8"));
|
||||||
ObjectMapper jsonMapper = new ObjectMapper();
|
JsonSerialization.writer().writeValue(writer, obj);
|
||||||
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -265,6 +265,11 @@
|
|||||||
<artifactId>hadoop-ozone-docs</artifactId>
|
<artifactId>hadoop-ozone-docs</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
@ -16,18 +16,6 @@
|
|||||||
|
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
namenode:
|
|
||||||
image: apache/hadoop-runner
|
|
||||||
hostname: namenode
|
|
||||||
volumes:
|
|
||||||
- ../../ozone:/opt/hadoop
|
|
||||||
ports:
|
|
||||||
- 9870:9870
|
|
||||||
environment:
|
|
||||||
ENSURE_NAMENODE_DIR: /data/namenode
|
|
||||||
env_file:
|
|
||||||
- ./docker-config
|
|
||||||
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
|
||||||
datanode:
|
datanode:
|
||||||
image: apache/hadoop-runner
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
|
|
||||||
OZONE-SITE.XML_ozone.ksm.address=ksm
|
OZONE-SITE.XML_ozone.ksm.address=ksm
|
||||||
OZONE-SITE.XML_ozone.scm.names=scm
|
OZONE-SITE.XML_ozone.scm.names=scm
|
||||||
OZONE-SITE.XML_ozone.enabled=True
|
OZONE-SITE.XML_ozone.enabled=True
|
||||||
@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
|
|||||||
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
||||||
OZONE-SITE.XML_ozone.handler.type=distributed
|
OZONE-SITE.XML_ozone.handler.type=distributed
|
||||||
OZONE-SITE.XML_ozone.scm.client.address=scm
|
OZONE-SITE.XML_ozone.scm.client.address=scm
|
||||||
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
|
|
||||||
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
|
|
||||||
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
|
|
||||||
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||||
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||||
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
|
|
||||||
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
||||||
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||||
|
@ -16,19 +16,6 @@
|
|||||||
|
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
namenode:
|
|
||||||
image: apache/hadoop-runner
|
|
||||||
hostname: namenode
|
|
||||||
volumes:
|
|
||||||
- ../../ozone:/opt/hadoop
|
|
||||||
- ./jmxpromo.jar:/opt/jmxpromo.jar
|
|
||||||
ports:
|
|
||||||
- 9870:9870
|
|
||||||
environment:
|
|
||||||
ENSURE_NAMENODE_DIR: /data/namenode
|
|
||||||
env_file:
|
|
||||||
- ./docker-config
|
|
||||||
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
|
||||||
datanode:
|
datanode:
|
||||||
image: apache/hadoop-runner
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
|
|
||||||
OZONE-SITE.XML_ozone.ksm.address=ksm
|
OZONE-SITE.XML_ozone.ksm.address=ksm
|
||||||
OZONE-SITE.XML_ozone.scm.names=scm
|
OZONE-SITE.XML_ozone.scm.names=scm
|
||||||
OZONE-SITE.XML_ozone.enabled=True
|
OZONE-SITE.XML_ozone.enabled=True
|
||||||
@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
|
|||||||
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
||||||
OZONE-SITE.XML_ozone.handler.type=distributed
|
OZONE-SITE.XML_ozone.handler.type=distributed
|
||||||
OZONE-SITE.XML_ozone.scm.client.address=scm
|
OZONE-SITE.XML_ozone.scm.client.address=scm
|
||||||
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
|
|
||||||
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
|
|
||||||
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
|
|
||||||
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||||
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||||
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
|
|
||||||
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
||||||
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||||
|
@ -244,32 +244,6 @@ public final class ScmConfigKeys {
|
|||||||
public static final String
|
public static final String
|
||||||
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
|
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
|
||||||
|
|
||||||
/**
|
|
||||||
* Don't start processing a pool if we have not had a minimum number of
|
|
||||||
* seconds from the last processing.
|
|
||||||
*/
|
|
||||||
public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
|
|
||||||
"ozone.scm.container.report.processing.interval";
|
|
||||||
public static final String
|
|
||||||
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This determines the total number of pools to be processed in parallel.
|
|
||||||
*/
|
|
||||||
public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
|
|
||||||
"ozone.scm.max.nodepool.processing.threads";
|
|
||||||
public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1;
|
|
||||||
/**
|
|
||||||
* These 2 settings control the number of threads in executor pool and time
|
|
||||||
* outs for thw container reports from all nodes.
|
|
||||||
*/
|
|
||||||
public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
|
|
||||||
"ozone.scm.max.container.report.threads";
|
|
||||||
public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
|
|
||||||
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
|
|
||||||
"ozone.scm.container.reports.wait.timeout";
|
|
||||||
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
|
|
||||||
"5m";
|
|
||||||
|
|
||||||
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
|
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
|
||||||
"ozone.scm.block.deletion.max.retry";
|
"ozone.scm.block.deletion.max.retry";
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
package org.apache.hadoop.hdds.scm.container.common.helpers;
|
package org.apache.hadoop.hdds.scm.container.common.helpers;
|
||||||
|
|
||||||
import com.fasterxml.jackson.annotation.JsonAutoDetect;
|
import com.fasterxml.jackson.annotation.JsonAutoDetect;
|
||||||
|
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||||
import com.fasterxml.jackson.annotation.PropertyAccessor;
|
import com.fasterxml.jackson.annotation.PropertyAccessor;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.fasterxml.jackson.databind.ObjectWriter;
|
import com.fasterxml.jackson.databind.ObjectWriter;
|
||||||
@ -30,6 +31,7 @@
|
|||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
|
||||||
import static java.lang.Math.max;
|
import static java.lang.Math.max;
|
||||||
@ -63,6 +65,13 @@ public class ContainerInfo
|
|||||||
private String owner;
|
private String owner;
|
||||||
private long containerID;
|
private long containerID;
|
||||||
private long deleteTransactionId;
|
private long deleteTransactionId;
|
||||||
|
/**
|
||||||
|
* Allows you to maintain private data on ContainerInfo. This is not
|
||||||
|
* serialized via protobuf, just allows us to maintain some private data.
|
||||||
|
*/
|
||||||
|
@JsonIgnore
|
||||||
|
private byte[] data;
|
||||||
|
|
||||||
ContainerInfo(
|
ContainerInfo(
|
||||||
long containerID,
|
long containerID,
|
||||||
HddsProtos.LifeCycleState state,
|
HddsProtos.LifeCycleState state,
|
||||||
@ -295,6 +304,29 @@ public String toJsonString() throws IOException {
|
|||||||
return WRITER.writeValueAsString(this);
|
return WRITER.writeValueAsString(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns private data that is set on this containerInfo.
|
||||||
|
*
|
||||||
|
* @return blob, the user can interpret it any way they like.
|
||||||
|
*/
|
||||||
|
public byte[] getData() {
|
||||||
|
if (this.data != null) {
|
||||||
|
return Arrays.copyOf(this.data, this.data.length);
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set private data on ContainerInfo object.
|
||||||
|
*
|
||||||
|
* @param data -- private data.
|
||||||
|
*/
|
||||||
|
public void setData(byte[] data) {
|
||||||
|
if (data != null) {
|
||||||
|
this.data = Arrays.copyOf(data, data.length);
|
||||||
|
}
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Builder class for ContainerInfo.
|
* Builder class for ContainerInfo.
|
||||||
*/
|
*/
|
||||||
|
@ -27,14 +27,14 @@
|
|||||||
import com.fasterxml.jackson.databind.ser.FilterProvider;
|
import com.fasterxml.jackson.databind.ser.FilterProvider;
|
||||||
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
|
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
|
||||||
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
|
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Map;
|
||||||
|
import java.util.TreeMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -46,7 +46,7 @@ public class Pipeline {
|
|||||||
|
|
||||||
static {
|
static {
|
||||||
ObjectMapper mapper = new ObjectMapper();
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
String[] ignorableFieldNames = {"data"};
|
String[] ignorableFieldNames = {"leaderID", "datanodes"};
|
||||||
FilterProvider filters = new SimpleFilterProvider()
|
FilterProvider filters = new SimpleFilterProvider()
|
||||||
.addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter
|
.addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter
|
||||||
.serializeAllExcept(ignorableFieldNames));
|
.serializeAllExcept(ignorableFieldNames));
|
||||||
@ -57,38 +57,66 @@ public class Pipeline {
|
|||||||
WRITER = mapper.writer(filters);
|
WRITER = mapper.writer(filters);
|
||||||
}
|
}
|
||||||
|
|
||||||
private PipelineChannel pipelineChannel;
|
|
||||||
/**
|
|
||||||
* Allows you to maintain private data on pipelines. This is not serialized
|
|
||||||
* via protobuf, just allows us to maintain some private data.
|
|
||||||
*/
|
|
||||||
@JsonIgnore
|
@JsonIgnore
|
||||||
private byte[] data;
|
private String leaderID;
|
||||||
|
@JsonIgnore
|
||||||
|
private Map<String, DatanodeDetails> datanodes;
|
||||||
|
private HddsProtos.LifeCycleState lifeCycleState;
|
||||||
|
private HddsProtos.ReplicationType type;
|
||||||
|
private HddsProtos.ReplicationFactor factor;
|
||||||
|
private String name;
|
||||||
|
// TODO: change to long based id
|
||||||
|
//private long id;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new pipeline data structure.
|
* Constructs a new pipeline data structure.
|
||||||
*
|
*
|
||||||
* @param pipelineChannel - transport information for this container
|
* @param leaderID - Leader datanode id
|
||||||
|
* @param lifeCycleState - Pipeline State
|
||||||
|
* @param replicationType - Replication protocol
|
||||||
|
* @param replicationFactor - replication count on datanodes
|
||||||
|
* @param name - pipelineName
|
||||||
*/
|
*/
|
||||||
public Pipeline(PipelineChannel pipelineChannel) {
|
public Pipeline(String leaderID, HddsProtos.LifeCycleState lifeCycleState,
|
||||||
this.pipelineChannel = pipelineChannel;
|
HddsProtos.ReplicationType replicationType,
|
||||||
data = null;
|
HddsProtos.ReplicationFactor replicationFactor, String name) {
|
||||||
|
this.leaderID = leaderID;
|
||||||
|
this.lifeCycleState = lifeCycleState;
|
||||||
|
this.type = replicationType;
|
||||||
|
this.factor = replicationFactor;
|
||||||
|
this.name = name;
|
||||||
|
datanodes = new TreeMap<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets pipeline object from protobuf.
|
* Gets pipeline object from protobuf.
|
||||||
*
|
*
|
||||||
* @param pipeline - ProtoBuf definition for the pipeline.
|
* @param pipelineProto - ProtoBuf definition for the pipeline.
|
||||||
* @return Pipeline Object
|
* @return Pipeline Object
|
||||||
*/
|
*/
|
||||||
public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) {
|
public static Pipeline getFromProtoBuf(
|
||||||
Preconditions.checkNotNull(pipeline);
|
HddsProtos.Pipeline pipelineProto) {
|
||||||
PipelineChannel pipelineChannel =
|
Preconditions.checkNotNull(pipelineProto);
|
||||||
PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel());
|
Pipeline pipeline =
|
||||||
return new Pipeline(pipelineChannel);
|
new Pipeline(pipelineProto.getLeaderID(),
|
||||||
|
pipelineProto.getState(),
|
||||||
|
pipelineProto.getType(),
|
||||||
|
pipelineProto.getFactor(),
|
||||||
|
pipelineProto.getName());
|
||||||
|
|
||||||
|
for (HddsProtos.DatanodeDetailsProto dataID :
|
||||||
|
pipelineProto.getMembersList()) {
|
||||||
|
pipeline.addMember(DatanodeDetails.getFromProtoBuf(dataID));
|
||||||
|
}
|
||||||
|
return pipeline;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns the replication count.
|
||||||
|
* @return Replication Factor
|
||||||
|
*/
|
||||||
public HddsProtos.ReplicationFactor getFactor() {
|
public HddsProtos.ReplicationFactor getFactor() {
|
||||||
return pipelineChannel.getFactor();
|
return factor;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -98,19 +126,34 @@ public HddsProtos.ReplicationFactor getFactor() {
|
|||||||
*/
|
*/
|
||||||
@JsonIgnore
|
@JsonIgnore
|
||||||
public DatanodeDetails getLeader() {
|
public DatanodeDetails getLeader() {
|
||||||
return pipelineChannel.getDatanodes().get(pipelineChannel.getLeaderID());
|
return getDatanodes().get(leaderID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void addMember(DatanodeDetails datanodeDetails) {
|
||||||
|
datanodes.put(datanodeDetails.getUuid().toString(),
|
||||||
|
datanodeDetails);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, DatanodeDetails> getDatanodes() {
|
||||||
|
return datanodes;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Returns the leader host.
|
* Returns the leader host.
|
||||||
*
|
*
|
||||||
* @return First Machine.
|
* @return First Machine.
|
||||||
*/
|
*/
|
||||||
public String getLeaderHost() {
|
public String getLeaderHost() {
|
||||||
return pipelineChannel.getDatanodes()
|
return getDatanodes()
|
||||||
.get(pipelineChannel.getLeaderID()).getHostName();
|
.get(leaderID).getHostName();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @return lead
|
||||||
|
*/
|
||||||
|
public String getLeaderID() {
|
||||||
|
return leaderID;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Returns all machines that make up this pipeline.
|
* Returns all machines that make up this pipeline.
|
||||||
*
|
*
|
||||||
@ -118,7 +161,7 @@ public String getLeaderHost() {
|
|||||||
*/
|
*/
|
||||||
@JsonIgnore
|
@JsonIgnore
|
||||||
public List<DatanodeDetails> getMachines() {
|
public List<DatanodeDetails> getMachines() {
|
||||||
return new ArrayList<>(pipelineChannel.getDatanodes().values());
|
return new ArrayList<>(getDatanodes().values());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -128,7 +171,7 @@ public List<DatanodeDetails> getMachines() {
|
|||||||
*/
|
*/
|
||||||
public List<String> getDatanodeHosts() {
|
public List<String> getDatanodeHosts() {
|
||||||
List<String> dataHosts = new ArrayList<>();
|
List<String> dataHosts = new ArrayList<>();
|
||||||
for (DatanodeDetails id : pipelineChannel.getDatanodes().values()) {
|
for (DatanodeDetails id :getDatanodes().values()) {
|
||||||
dataHosts.add(id.getHostName());
|
dataHosts.add(id.getHostName());
|
||||||
}
|
}
|
||||||
return dataHosts;
|
return dataHosts;
|
||||||
@ -143,46 +186,31 @@ public List<String> getDatanodeHosts() {
|
|||||||
public HddsProtos.Pipeline getProtobufMessage() {
|
public HddsProtos.Pipeline getProtobufMessage() {
|
||||||
HddsProtos.Pipeline.Builder builder =
|
HddsProtos.Pipeline.Builder builder =
|
||||||
HddsProtos.Pipeline.newBuilder();
|
HddsProtos.Pipeline.newBuilder();
|
||||||
builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage());
|
for (DatanodeDetails datanode : datanodes.values()) {
|
||||||
|
builder.addMembers(datanode.getProtoBufMessage());
|
||||||
|
}
|
||||||
|
builder.setLeaderID(leaderID);
|
||||||
|
|
||||||
|
if (this.getLifeCycleState() != null) {
|
||||||
|
builder.setState(this.getLifeCycleState());
|
||||||
|
}
|
||||||
|
if (this.getType() != null) {
|
||||||
|
builder.setType(this.getType());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.getFactor() != null) {
|
||||||
|
builder.setFactor(this.getFactor());
|
||||||
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns private data that is set on this pipeline.
|
|
||||||
*
|
|
||||||
* @return blob, the user can interpret it any way they like.
|
|
||||||
*/
|
|
||||||
public byte[] getData() {
|
|
||||||
if (this.data != null) {
|
|
||||||
return Arrays.copyOf(this.data, this.data.length);
|
|
||||||
} else {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
public PipelineChannel getPipelineChannel() {
|
|
||||||
return pipelineChannel;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set private data on pipeline.
|
|
||||||
*
|
|
||||||
* @param data -- private data.
|
|
||||||
*/
|
|
||||||
public void setData(byte[] data) {
|
|
||||||
if (data != null) {
|
|
||||||
this.data = Arrays.copyOf(data, data.length);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the State of the pipeline.
|
* Gets the State of the pipeline.
|
||||||
*
|
*
|
||||||
* @return - LifeCycleStates.
|
* @return - LifeCycleStates.
|
||||||
*/
|
*/
|
||||||
public HddsProtos.LifeCycleState getLifeCycleState() {
|
public HddsProtos.LifeCycleState getLifeCycleState() {
|
||||||
return pipelineChannel.getLifeCycleState();
|
return lifeCycleState;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -191,7 +219,7 @@ public HddsProtos.LifeCycleState getLifeCycleState() {
|
|||||||
* @return - Name of the pipeline
|
* @return - Name of the pipeline
|
||||||
*/
|
*/
|
||||||
public String getPipelineName() {
|
public String getPipelineName() {
|
||||||
return pipelineChannel.getName();
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -200,16 +228,16 @@ public String getPipelineName() {
|
|||||||
* @return type - Standalone, Ratis, Chained.
|
* @return type - Standalone, Ratis, Chained.
|
||||||
*/
|
*/
|
||||||
public HddsProtos.ReplicationType getType() {
|
public HddsProtos.ReplicationType getType() {
|
||||||
return pipelineChannel.getType();
|
return type;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
|
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
|
||||||
.append("[");
|
.append("[");
|
||||||
pipelineChannel.getDatanodes().keySet().stream()
|
getDatanodes().keySet().stream()
|
||||||
.forEach(id -> b.
|
.forEach(id -> b.
|
||||||
append(id.endsWith(pipelineChannel.getLeaderID()) ? "*" + id : id));
|
append(id.endsWith(getLeaderID()) ? "*" + id : id));
|
||||||
b.append(" name:").append(getPipelineName());
|
b.append(" name:").append(getPipelineName());
|
||||||
if (getType() != null) {
|
if (getType() != null) {
|
||||||
b.append(" type:").append(getType().toString());
|
b.append(" type:").append(getType().toString());
|
||||||
|
@ -1,124 +0,0 @@
|
|||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
* <p>
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
* <p>
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hdds.scm.container.common.helpers;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
|
||||||
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* PipelineChannel information for a {@link Pipeline}.
|
|
||||||
*/
|
|
||||||
public class PipelineChannel {
|
|
||||||
@JsonIgnore
|
|
||||||
private String leaderID;
|
|
||||||
@JsonIgnore
|
|
||||||
private Map<String, DatanodeDetails> datanodes;
|
|
||||||
private LifeCycleState lifeCycleState;
|
|
||||||
private ReplicationType type;
|
|
||||||
private ReplicationFactor factor;
|
|
||||||
private String name;
|
|
||||||
// TODO: change to long based id
|
|
||||||
//private long id;
|
|
||||||
|
|
||||||
public PipelineChannel(String leaderID, LifeCycleState lifeCycleState,
|
|
||||||
ReplicationType replicationType, ReplicationFactor replicationFactor,
|
|
||||||
String name) {
|
|
||||||
this.leaderID = leaderID;
|
|
||||||
this.lifeCycleState = lifeCycleState;
|
|
||||||
this.type = replicationType;
|
|
||||||
this.factor = replicationFactor;
|
|
||||||
this.name = name;
|
|
||||||
datanodes = new TreeMap<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getLeaderID() {
|
|
||||||
return leaderID;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<String, DatanodeDetails> getDatanodes() {
|
|
||||||
return datanodes;
|
|
||||||
}
|
|
||||||
|
|
||||||
public LifeCycleState getLifeCycleState() {
|
|
||||||
return lifeCycleState;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ReplicationType getType() {
|
|
||||||
return type;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ReplicationFactor getFactor() {
|
|
||||||
return factor;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getName() {
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void addMember(DatanodeDetails datanodeDetails) {
|
|
||||||
datanodes.put(datanodeDetails.getUuid().toString(),
|
|
||||||
datanodeDetails);
|
|
||||||
}
|
|
||||||
|
|
||||||
@JsonIgnore
|
|
||||||
public HddsProtos.PipelineChannel getProtobufMessage() {
|
|
||||||
HddsProtos.PipelineChannel.Builder builder =
|
|
||||||
HddsProtos.PipelineChannel.newBuilder();
|
|
||||||
for (DatanodeDetails datanode : datanodes.values()) {
|
|
||||||
builder.addMembers(datanode.getProtoBufMessage());
|
|
||||||
}
|
|
||||||
builder.setLeaderID(leaderID);
|
|
||||||
|
|
||||||
if (this.getLifeCycleState() != null) {
|
|
||||||
builder.setState(this.getLifeCycleState());
|
|
||||||
}
|
|
||||||
if (this.getType() != null) {
|
|
||||||
builder.setType(this.getType());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (this.getFactor() != null) {
|
|
||||||
builder.setFactor(this.getFactor());
|
|
||||||
}
|
|
||||||
return builder.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static PipelineChannel getFromProtoBuf(
|
|
||||||
HddsProtos.PipelineChannel transportProtos) {
|
|
||||||
Preconditions.checkNotNull(transportProtos);
|
|
||||||
PipelineChannel pipelineChannel =
|
|
||||||
new PipelineChannel(transportProtos.getLeaderID(),
|
|
||||||
transportProtos.getState(),
|
|
||||||
transportProtos.getType(),
|
|
||||||
transportProtos.getFactor(),
|
|
||||||
transportProtos.getName());
|
|
||||||
|
|
||||||
for (HddsProtos.DatanodeDetailsProto dataID :
|
|
||||||
transportProtos.getMembersList()) {
|
|
||||||
pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID));
|
|
||||||
}
|
|
||||||
return pipelineChannel;
|
|
||||||
}
|
|
||||||
}
|
|
@ -96,7 +96,6 @@ public final class OzoneConsts {
|
|||||||
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
|
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
|
||||||
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
|
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
|
||||||
public static final String BLOCK_DB = "block.db";
|
public static final String BLOCK_DB = "block.db";
|
||||||
public static final String NODEPOOL_DB = "nodepool.db";
|
|
||||||
public static final String OPEN_CONTAINERS_DB = "openContainers.db";
|
public static final String OPEN_CONTAINERS_DB = "openContainers.db";
|
||||||
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
|
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
|
||||||
public static final String KSM_DB_NAME = "ksm.db";
|
public static final String KSM_DB_NAME = "ksm.db";
|
||||||
|
@ -126,7 +126,7 @@ public synchronized void run() {
|
|||||||
try {
|
try {
|
||||||
// Collect task results
|
// Collect task results
|
||||||
BackgroundTaskResult result = serviceTimeout > 0
|
BackgroundTaskResult result = serviceTimeout > 0
|
||||||
? taskResultFuture.get(serviceTimeout, TimeUnit.MILLISECONDS)
|
? taskResultFuture.get(serviceTimeout, unit)
|
||||||
: taskResultFuture.get();
|
: taskResultFuture.get();
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("task execution result size {}", result.getSize());
|
LOG.debug("task execution result size {}", result.getSize());
|
||||||
|
@ -40,7 +40,7 @@ message Port {
|
|||||||
required uint32 value = 2;
|
required uint32 value = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message PipelineChannel {
|
message Pipeline {
|
||||||
required string leaderID = 1;
|
required string leaderID = 1;
|
||||||
repeated DatanodeDetailsProto members = 2;
|
repeated DatanodeDetailsProto members = 2;
|
||||||
optional LifeCycleState state = 3 [default = OPEN];
|
optional LifeCycleState state = 3 [default = OPEN];
|
||||||
@ -49,12 +49,6 @@ message PipelineChannel {
|
|||||||
optional string name = 6;
|
optional string name = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
// A pipeline is composed of PipelineChannel (Ratis/StandAlone) that back a
|
|
||||||
// container.
|
|
||||||
message Pipeline {
|
|
||||||
required PipelineChannel pipelineChannel = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message KeyValue {
|
message KeyValue {
|
||||||
required string key = 1;
|
required string key = 1;
|
||||||
optional string value = 2;
|
optional string value = 2;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user