Merge remote-tracking branch 'apache-commit/trunk' into HDDS-48

This commit is contained in:
Bharat Viswanadham 2018-06-28 14:28:23 -07:00
commit c104525168
412 changed files with 5479 additions and 5278 deletions

View File

@ -196,6 +196,14 @@ by Google Inc, which can be obtained at:
* HOMEPAGE:
* http://code.google.com/p/snappy/
This product contains a modified portion of UnsignedBytes LexicographicalComparator
from Guava v21 project by Google Inc, which can be obtained at:
* LICENSE:
* license/COPYING (Apache License 2.0)
* HOMEPAGE:
* https://github.com/google/guava
This product optionally depends on 'JBoss Marshalling', an alternative Java
serialization API, which can be obtained at:

View File

@ -145,6 +145,8 @@ run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
mkdir -p "./share/hadoop/ozonefs"
cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
# Optional documentation, could be missing
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
@ -153,5 +155,5 @@ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdd
mkdir -p ./share/hadoop/mapreduce
mkdir -p ./share/hadoop/yarn
echo
echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}"
echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
echo

View File

@ -166,10 +166,6 @@
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</exclusion>
<exclusion>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
@ -495,10 +491,6 @@
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>

View File

@ -156,11 +156,6 @@
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>

View File

@ -3189,25 +3189,25 @@ public void addTags(Properties prop) {
if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) {
String systemTags = prop.getProperty(CommonConfigurationKeys
.HADOOP_TAGS_SYSTEM);
Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
TAGS.addAll(Arrays.asList(systemTags.split(",")));
}
// Get all custom tags
if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) {
String customTags = prop.getProperty(CommonConfigurationKeys
.HADOOP_TAGS_CUSTOM);
Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
TAGS.addAll(Arrays.asList(customTags.split(",")));
}
if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) {
String systemTags = prop.getProperty(CommonConfigurationKeys
.HADOOP_SYSTEM_TAGS);
Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
TAGS.addAll(Arrays.asList(systemTags.split(",")));
}
// Get all custom tags
if (prop.containsKey(CommonConfigurationKeys.HADOOP_CUSTOM_TAGS)) {
String customTags = prop.getProperty(CommonConfigurationKeys
.HADOOP_CUSTOM_TAGS);
Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
TAGS.addAll(Arrays.asList(customTags.split(",")));
}
} catch (Exception ex) {

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.conf;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringEscapeUtils;
import java.util.Collection;
import java.util.Enumeration;
@ -72,10 +72,10 @@ private Reconfigurable getReconfigurable(HttpServletRequest req) {
private void printHeader(PrintWriter out, String nodeName) {
out.print("<html><head>");
out.printf("<title>%s Reconfiguration Utility</title>%n",
StringEscapeUtils.escapeHtml(nodeName));
StringEscapeUtils.escapeHtml4(nodeName));
out.print("</head><body>\n");
out.printf("<h1>%s Reconfiguration Utility</h1>%n",
StringEscapeUtils.escapeHtml(nodeName));
StringEscapeUtils.escapeHtml4(nodeName));
}
private void printFooter(PrintWriter out) {
@ -103,20 +103,20 @@ private void printConf(PrintWriter out, Reconfigurable reconf) {
out.print("<tr><td>");
if (!reconf.isPropertyReconfigurable(c.prop)) {
out.print("<font color=\"red\">" +
StringEscapeUtils.escapeHtml(c.prop) + "</font>");
StringEscapeUtils.escapeHtml4(c.prop) + "</font>");
changeOK = false;
} else {
out.print(StringEscapeUtils.escapeHtml(c.prop));
out.print(StringEscapeUtils.escapeHtml4(c.prop));
out.print("<input type=\"hidden\" name=\"" +
StringEscapeUtils.escapeHtml(c.prop) + "\" value=\"" +
StringEscapeUtils.escapeHtml(c.newVal) + "\"/>");
StringEscapeUtils.escapeHtml4(c.prop) + "\" value=\"" +
StringEscapeUtils.escapeHtml4(c.newVal) + "\"/>");
}
out.print("</td><td>" +
(c.oldVal == null ? "<it>default</it>" :
StringEscapeUtils.escapeHtml(c.oldVal)) +
StringEscapeUtils.escapeHtml4(c.oldVal)) +
"</td><td>" +
(c.newVal == null ? "<it>default</it>" :
StringEscapeUtils.escapeHtml(c.newVal)) +
StringEscapeUtils.escapeHtml4(c.newVal)) +
"</td>");
out.print("</tr>\n");
}
@ -147,9 +147,9 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
synchronized(oldConf) {
while (params.hasMoreElements()) {
String rawParam = params.nextElement();
String param = StringEscapeUtils.unescapeHtml(rawParam);
String param = StringEscapeUtils.unescapeHtml4(rawParam);
String value =
StringEscapeUtils.unescapeHtml(req.getParameter(rawParam));
StringEscapeUtils.unescapeHtml4(req.getParameter(rawParam));
if (value != null) {
if (value.equals(newConf.getRaw(param)) || value.equals("default") ||
value.equals("null") || value.isEmpty()) {
@ -157,8 +157,8 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
value.isEmpty()) &&
oldConf.getRaw(param) != null) {
out.println("<p>Changed \"" +
StringEscapeUtils.escapeHtml(param) + "\" from \"" +
StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) +
StringEscapeUtils.escapeHtml4(param) + "\" from \"" +
StringEscapeUtils.escapeHtml4(oldConf.getRaw(param)) +
"\" to default</p>");
reconf.reconfigureProperty(param, null);
} else if (!value.equals("default") && !value.equals("null") &&
@ -168,16 +168,16 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
// change from default or value to different value
if (oldConf.getRaw(param) == null) {
out.println("<p>Changed \"" +
StringEscapeUtils.escapeHtml(param) +
StringEscapeUtils.escapeHtml4(param) +
"\" from default to \"" +
StringEscapeUtils.escapeHtml(value) + "\"</p>");
StringEscapeUtils.escapeHtml4(value) + "\"</p>");
} else {
out.println("<p>Changed \"" +
StringEscapeUtils.escapeHtml(param) + "\" from \"" +
StringEscapeUtils.escapeHtml(oldConf.
StringEscapeUtils.escapeHtml4(param) + "\" from \"" +
StringEscapeUtils.escapeHtml4(oldConf.
getRaw(param)) +
"\" to \"" +
StringEscapeUtils.escapeHtml(value) + "\"</p>");
StringEscapeUtils.escapeHtml4(value) + "\"</p>");
}
reconf.reconfigureProperty(param, value);
} else {
@ -185,10 +185,10 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
}
} else {
// parameter value != newConf value
out.println("<p>\"" + StringEscapeUtils.escapeHtml(param) +
out.println("<p>\"" + StringEscapeUtils.escapeHtml4(param) +
"\" not changed because value has changed from \"" +
StringEscapeUtils.escapeHtml(value) + "\" to \"" +
StringEscapeUtils.escapeHtml(newConf.getRaw(param)) +
StringEscapeUtils.escapeHtml4(value) + "\" to \"" +
StringEscapeUtils.escapeHtml4(newConf.getRaw(param)) +
"\" since approval</p>");
}
}

View File

@ -33,8 +33,8 @@
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;

View File

@ -27,7 +27,7 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider.Metadata;

View File

@ -32,7 +32,9 @@
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@ -40,6 +42,7 @@
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.hadoop.util.JsonSerialization;
import org.apache.hadoop.util.KMSUtil;
import org.apache.http.client.utils.URIBuilder;
import org.slf4j.Logger;
@ -77,7 +80,6 @@
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
@ -130,9 +132,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
private static final ObjectWriter WRITER =
new ObjectMapper().writerWithDefaultPrettyPrinter();
private final Text dtService;
// Allow fallback to default kms server port 9600 for certain tests that do
@ -235,7 +234,7 @@ public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
private static void writeJson(Object obj, OutputStream os)
throws IOException {
Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
WRITER.writeValue(writer, obj);
JsonSerialization.writer().writeValue(writer, obj);
}
/**
@ -543,7 +542,9 @@ private <T> T call(HttpURLConnection conn, Object jsonOutput,
String requestMethod = conn.getRequestMethod();
URL url = conn.getURL();
conn = createConnection(url, requestMethod);
conn.setRequestProperty(CONTENT_TYPE, contentType);
if (contentType != null && !contentType.isEmpty()) {
conn.setRequestProperty(CONTENT_TYPE, contentType);
}
return call(conn, jsonOutput, expectedResponse, klass,
authRetryCount - 1);
}
@ -1087,8 +1088,7 @@ private UserGroupInformation getActualUgi() throws IOException {
actualUgi = currentUgi.getRealUser();
}
if (UserGroupInformation.isSecurityEnabled() &&
!containsKmsDt(actualUgi) &&
!actualUgi.hasKerberosCredentials()) {
!containsKmsDt(actualUgi) && !actualUgi.shouldRelogin()) {
// Use login user is only necessary when Kerberos is enabled
// but the actual user does not have either
// Kerberos credential or KMS delegation token for KMS operations

View File

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* Byte array backed part handle.
*/
public final class BBPartHandle implements PartHandle {
private static final long serialVersionUID = 0x23ce3eb1;
private final byte[] bytes;
private BBPartHandle(ByteBuffer byteBuffer){
this.bytes = byteBuffer.array();
}
public static PartHandle from(ByteBuffer byteBuffer) {
return new BBPartHandle(byteBuffer);
}
@Override
public ByteBuffer bytes() {
return ByteBuffer.wrap(bytes);
}
@Override
public int hashCode() {
return Arrays.hashCode(bytes);
}
@Override
public boolean equals(Object other) {
if (!(other instanceof PartHandle)) {
return false;
}
PartHandle o = (PartHandle) other;
return bytes().equals(o.bytes());
}
}

View File

@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* Byte array backed upload handle.
*/
public final class BBUploadHandle implements UploadHandle {
private static final long serialVersionUID = 0x69d5509b;
private final byte[] bytes;
private BBUploadHandle(ByteBuffer byteBuffer){
this.bytes = byteBuffer.array();
}
public static UploadHandle from(ByteBuffer byteBuffer) {
return new BBUploadHandle(byteBuffer);
}
@Override
public int hashCode() {
return Arrays.hashCode(bytes);
}
@Override
public ByteBuffer bytes() {
return ByteBuffer.wrap(bytes);
}
@Override
public boolean equals(Object other) {
if (!(other instanceof UploadHandle)) {
return false;
}
UploadHandle o = (UploadHandle) other;
return bytes().equals(o.bytes());
}
}

View File

@ -542,7 +542,7 @@ public class CommonConfigurationKeysPublic {
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
* core-default.xml</a>
*/
public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY =
"hadoop.security.groups.shell.command.timeout";
/**
* @see
@ -550,7 +550,7 @@ public class CommonConfigurationKeysPublic {
* core-default.xml</a>
*/
public static final long
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT =
0L;
/**
* @see

View File

@ -115,6 +115,27 @@ public abstract class FSDataOutputStreamBuilder
*/
protected abstract B getThisBuilder();
/**
* Construct from a {@link FileContext}.
*
* @param fc FileContext
* @param p path.
* @throws IOException
*/
FSDataOutputStreamBuilder(@Nonnull FileContext fc,
@Nonnull Path p) throws IOException {
Preconditions.checkNotNull(fc);
Preconditions.checkNotNull(p);
this.fs = null;
this.path = p;
AbstractFileSystem afs = fc.getFSofPath(p);
FsServerDefaults defaults = afs.getServerDefaults(p);
bufferSize = defaults.getFileBufferSize();
replication = defaults.getReplication();
blockSize = defaults.getBlockSize();
}
/**
* Constructor.
*/
@ -131,6 +152,7 @@ protected FSDataOutputStreamBuilder(@Nonnull FileSystem fileSystem,
}
protected FileSystem getFS() {
Preconditions.checkNotNull(fs);
return fs;
}

View File

@ -24,6 +24,7 @@
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet;
@ -35,6 +36,8 @@
import java.util.TreeSet;
import java.util.Map.Entry;
import javax.annotation.Nonnull;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -694,6 +697,69 @@ public FSDataOutputStream next(final AbstractFileSystem fs, final Path p)
}.resolve(this, absF);
}
/**
* {@link FSDataOutputStreamBuilder} for {@liink FileContext}.
*/
private static final class FCDataOutputStreamBuilder extends
FSDataOutputStreamBuilder<
FSDataOutputStream, FCDataOutputStreamBuilder> {
private final FileContext fc;
private FCDataOutputStreamBuilder(
@Nonnull FileContext fc, @Nonnull Path p) throws IOException {
super(fc, p);
this.fc = fc;
Preconditions.checkNotNull(fc);
}
@Override
protected FCDataOutputStreamBuilder getThisBuilder() {
return this;
}
@Override
public FSDataOutputStream build() throws IOException {
final EnumSet<CreateFlag> flags = getFlags();
List<CreateOpts> createOpts = new ArrayList<>(Arrays.asList(
CreateOpts.blockSize(getBlockSize()),
CreateOpts.bufferSize(getBufferSize()),
CreateOpts.repFac(getReplication()),
CreateOpts.perms(getPermission())
));
if (getChecksumOpt() != null) {
createOpts.add(CreateOpts.checksumParam(getChecksumOpt()));
}
if (getProgress() != null) {
createOpts.add(CreateOpts.progress(getProgress()));
}
if (isRecursive()) {
createOpts.add(CreateOpts.createParent());
}
return fc.create(getPath(), flags,
createOpts.toArray(new CreateOpts[0]));
}
}
/**
* Create a {@link FSDataOutputStreamBuilder} for creating or overwriting
* a file on indicated path.
*
* @param f the file path to create builder for.
* @return {@link FSDataOutputStreamBuilder} to build a
* {@link FSDataOutputStream}.
*
* Upon {@link FSDataOutputStreamBuilder#build()} being invoked,
* builder parameters will be verified by {@link FileContext} and
* {@link AbstractFileSystem#create}. And filesystem states will be modified.
*
* Client should expect {@link FSDataOutputStreamBuilder#build()} throw the
* same exceptions as create(Path, EnumSet, CreateOpts...).
*/
public FSDataOutputStreamBuilder<FSDataOutputStream, ?> create(final Path f)
throws IOException {
return new FCDataOutputStreamBuilder(this, f).create();
}
/**
* Make(create) a directory and all the non-existent parents.
*

View File

@ -0,0 +1,132 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import com.google.common.base.Charsets;
import org.apache.commons.compress.utils.IOUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
/**
* A MultipartUploader that uses the basic FileSystem commands.
* This is done in three stages:
* Init - create a temp _multipart directory.
* PutPart - copying the individual parts of the file to the temp directory.
* Complete - use {@link FileSystem#concat} to merge the files; and then delete
* the temp directory.
*/
public class FileSystemMultipartUploader extends MultipartUploader {
private final FileSystem fs;
public FileSystemMultipartUploader(FileSystem fs) {
this.fs = fs;
}
@Override
public UploadHandle initialize(Path filePath) throws IOException {
Path collectorPath = createCollectorPath(filePath);
fs.mkdirs(collectorPath, FsPermission.getDirDefault());
ByteBuffer byteBuffer = ByteBuffer.wrap(
collectorPath.toString().getBytes(Charsets.UTF_8));
return BBUploadHandle.from(byteBuffer);
}
@Override
public PartHandle putPart(Path filePath, InputStream inputStream,
int partNumber, UploadHandle uploadId, long lengthInBytes)
throws IOException {
byte[] uploadIdByteArray = uploadId.toByteArray();
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
uploadIdByteArray.length, Charsets.UTF_8));
Path partPath =
Path.mergePaths(collectorPath, Path.mergePaths(new Path(Path.SEPARATOR),
new Path(Integer.toString(partNumber) + ".part")));
FSDataOutputStreamBuilder outputStream = fs.createFile(partPath);
FSDataOutputStream fsDataOutputStream = outputStream.build();
IOUtils.copy(inputStream, fsDataOutputStream, 4096);
fsDataOutputStream.close();
return BBPartHandle.from(ByteBuffer.wrap(
partPath.toString().getBytes(Charsets.UTF_8)));
}
private Path createCollectorPath(Path filePath) {
return Path.mergePaths(filePath.getParent(),
Path.mergePaths(new Path(filePath.getName().split("\\.")[0]),
Path.mergePaths(new Path("_multipart"),
new Path(Path.SEPARATOR))));
}
@Override
@SuppressWarnings("deprecation") // rename w/ OVERWRITE
public PathHandle complete(Path filePath,
List<Pair<Integer, PartHandle>> handles, UploadHandle multipartUploadId)
throws IOException {
handles.sort(Comparator.comparing(Pair::getKey));
List<Path> partHandles = handles
.stream()
.map(pair -> {
byte[] byteArray = pair.getValue().toByteArray();
return new Path(new String(byteArray, 0, byteArray.length,
Charsets.UTF_8));
})
.collect(Collectors.toList());
Path collectorPath = createCollectorPath(filePath);
Path filePathInsideCollector = Path.mergePaths(collectorPath,
new Path(Path.SEPARATOR + filePath.getName()));
fs.create(filePathInsideCollector).close();
fs.concat(filePathInsideCollector,
partHandles.toArray(new Path[handles.size()]));
fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE);
fs.delete(collectorPath, true);
FileStatus status = fs.getFileStatus(filePath);
return fs.getPathHandle(status);
}
@Override
public void abort(Path filePath, UploadHandle uploadId) throws IOException {
byte[] uploadIdByteArray = uploadId.toByteArray();
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
uploadIdByteArray.length, Charsets.UTF_8));
fs.delete(collectorPath, true);
}
/**
* Factory for creating MultipartUploaderFactory objects for file://
* filesystems.
*/
public static class Factory extends MultipartUploaderFactory {
protected MultipartUploader createMultipartUploader(FileSystem fs,
Configuration conf) {
if (fs.getScheme().equals("file")) {
return new FileSystemMultipartUploader(fs);
}
return null;
}
}
}

View File

@ -23,7 +23,6 @@
import java.util.Arrays;
import java.util.LinkedList;
import org.apache.commons.lang.WordUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@ -275,7 +274,7 @@ private void printInstanceHelp(PrintStream out, Command instance) {
listing = null;
}
for (String descLine : WordUtils.wrap(
for (String descLine : StringUtils.wrap(
line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
out.println(prefix + descLine);
}

View File

@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import com.google.protobuf.ByteString;
import org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Objects;
import java.util.Optional;
/**
* Opaque handle to an entity in a FileSystem.
*/
public class LocalFileSystemPathHandle implements PathHandle {
private final String path;
private final Long mtime;
public LocalFileSystemPathHandle(String path, Optional<Long> mtime) {
this.path = path;
this.mtime = mtime.orElse(null);
}
public LocalFileSystemPathHandle(ByteBuffer bytes) throws IOException {
if (null == bytes) {
throw new IOException("Missing PathHandle");
}
LocalFileSystemPathHandleProto p =
LocalFileSystemPathHandleProto.parseFrom(ByteString.copyFrom(bytes));
path = p.hasPath() ? p.getPath() : null;
mtime = p.hasMtime() ? p.getMtime() : null;
}
public String getPath() {
return path;
}
public void verify(FileStatus stat) throws InvalidPathHandleException {
if (null == stat) {
throw new InvalidPathHandleException("Could not resolve handle");
}
if (mtime != null && mtime != stat.getModificationTime()) {
throw new InvalidPathHandleException("Content changed");
}
}
@Override
public ByteBuffer bytes() {
LocalFileSystemPathHandleProto.Builder b =
LocalFileSystemPathHandleProto.newBuilder();
b.setPath(path);
if (mtime != null) {
b.setMtime(mtime);
}
return b.build().toByteString().asReadOnlyByteBuffer();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
LocalFileSystemPathHandle that = (LocalFileSystemPathHandle) o;
return Objects.equals(path, that.path) &&
Objects.equals(mtime, that.mtime);
}
@Override
public int hashCode() {
return Objects.hash(path, mtime);
}
@Override
public String toString() {
return "LocalFileSystemPathHandle{" +
"path='" + path + '\'' +
", mtime=" + mtime +
'}';
}
}

View File

@ -0,0 +1,90 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* MultipartUploader is an interface for copying files multipart and across
* multiple nodes. Users should:
* 1. Initialize an upload
* 2. Upload parts in any order
* 3. Complete the upload in order to have it materialize in the destination FS.
*
* Implementers should make sure that the complete function should make sure
* that 'complete' will reorder parts if the destination FS doesn't already
* do it for them.
*/
public abstract class MultipartUploader {
public static final Logger LOG =
LoggerFactory.getLogger(MultipartUploader.class);
/**
* Initialize a multipart upload.
* @param filePath Target path for upload.
* @return unique identifier associating part uploads.
* @throws IOException
*/
public abstract UploadHandle initialize(Path filePath) throws IOException;
/**
* Put part as part of a multipart upload. It should be possible to have
* parts uploaded in any order (or in parallel).
* @param filePath Target path for upload (same as {@link #initialize(Path)}).
* @param inputStream Data for this part.
* @param partNumber Index of the part relative to others.
* @param uploadId Identifier from {@link #initialize(Path)}.
* @param lengthInBytes Target length to read from the stream.
* @return unique PartHandle identifier for the uploaded part.
* @throws IOException
*/
public abstract PartHandle putPart(Path filePath, InputStream inputStream,
int partNumber, UploadHandle uploadId, long lengthInBytes)
throws IOException;
/**
* Complete a multipart upload.
* @param filePath Target path for upload (same as {@link #initialize(Path)}.
* @param handles Identifiers with associated part numbers from
* {@link #putPart(Path, InputStream, int, UploadHandle, long)}.
* Depending on the backend, the list order may be significant.
* @param multipartUploadId Identifier from {@link #initialize(Path)}.
* @return unique PathHandle identifier for the uploaded file.
* @throws IOException
*/
public abstract PathHandle complete(Path filePath,
List<Pair<Integer, PartHandle>> handles, UploadHandle multipartUploadId)
throws IOException;
/**
* Aborts a multipart upload.
* @param filePath Target path for upload (same as {@link #initialize(Path)}.
* @param multipartuploadId Identifier from {@link #initialize(Path)}.
* @throws IOException
*/
public abstract void abort(Path filePath, UploadHandle multipartuploadId)
throws IOException;
}

View File

@ -0,0 +1,65 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Iterator;
import java.util.ServiceLoader;
/**
* {@link ServiceLoader}-driven uploader API for storage services supporting
* multipart uploads.
*/
public abstract class MultipartUploaderFactory {
public static final Logger LOG =
LoggerFactory.getLogger(MultipartUploaderFactory.class);
/**
* Multipart Uploaders listed as services.
*/
private static ServiceLoader<MultipartUploaderFactory> serviceLoader =
ServiceLoader.load(MultipartUploaderFactory.class,
MultipartUploaderFactory.class.getClassLoader());
// Iterate through the serviceLoader to avoid lazy loading.
// Lazy loading would require synchronization in concurrent use cases.
static {
Iterator<MultipartUploaderFactory> iterServices = serviceLoader.iterator();
while (iterServices.hasNext()) {
iterServices.next();
}
}
public static MultipartUploader get(FileSystem fs, Configuration conf)
throws IOException {
MultipartUploader mpu = null;
for (MultipartUploaderFactory factory : serviceLoader) {
mpu = factory.createMultipartUploader(fs, conf);
if (mpu != null) {
break;
}
}
return mpu;
}
protected abstract MultipartUploader createMultipartUploader(FileSystem fs,
Configuration conf) throws IOException;
}

View File

@ -55,6 +55,9 @@ public static ChecksumParam checksumParam(
ChecksumOpt csumOpt) {
return new ChecksumParam(csumOpt);
}
public static Progress progress(Progressable prog) {
return new Progress(prog);
}
public static Perms perms(FsPermission perm) {
return new Perms(perm);
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.io.Serializable;
import java.nio.ByteBuffer;
/**
* Opaque, serializable reference to an part id for multipart uploads.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface PartHandle extends Serializable {
/**
* @return Serialized from in bytes.
*/
default byte[] toByteArray() {
ByteBuffer bb = bytes();
byte[] ret = new byte[bb.remaining()];
bb.get(ret);
return ret;
}
ByteBuffer bytes();
@Override
boolean equals(Object other);
}

View File

@ -27,7 +27,7 @@
import java.util.regex.Pattern;
import org.apache.avro.reflect.Stringable;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -40,6 +40,7 @@
import java.nio.file.attribute.FileTime;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Optional;
import java.util.StringTokenizer;
import org.apache.hadoop.classification.InterfaceAudience;
@ -213,6 +214,18 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
new LocalFSFileInputStream(f), bufferSize));
}
@Override
public FSDataInputStream open(PathHandle fd, int bufferSize)
throws IOException {
if (!(fd instanceof LocalFileSystemPathHandle)) {
fd = new LocalFileSystemPathHandle(fd.bytes());
}
LocalFileSystemPathHandle id = (LocalFileSystemPathHandle) fd;
id.verify(getFileStatus(new Path(id.getPath())));
return new FSDataInputStream(new BufferedFSInputStream(
new LocalFSFileInputStream(new Path(id.getPath())), bufferSize));
}
/*********************************************************
* For create()'s FSOutputStream.
*********************************************************/
@ -350,6 +363,18 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
return out;
}
@Override
public void concat(final Path trg, final Path [] psrcs) throws IOException {
final int bufferSize = 4096;
try(FSDataOutputStream out = create(trg)) {
for (Path src : psrcs) {
try(FSDataInputStream in = open(src)) {
IOUtils.copyBytes(in, out, bufferSize, false);
}
}
}
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
// Attempt rename using Java API.
@ -863,6 +888,38 @@ public void setTimes(Path p, long mtime, long atime) throws IOException {
}
}
/**
* Hook to implement support for {@link PathHandle} operations.
* @param stat Referent in the target FileSystem
* @param opts Constraints that determine the validity of the
* {@link PathHandle} reference.
*/
protected PathHandle createPathHandle(FileStatus stat,
Options.HandleOpt... opts) {
if (stat.isDirectory() || stat.isSymlink()) {
throw new IllegalArgumentException("PathHandle only available for files");
}
String authority = stat.getPath().toUri().getAuthority();
if (authority != null && !authority.equals("file://")) {
throw new IllegalArgumentException("Wrong FileSystem: " + stat.getPath());
}
Options.HandleOpt.Data data =
Options.HandleOpt.getOpt(Options.HandleOpt.Data.class, opts)
.orElse(Options.HandleOpt.changed(false));
Options.HandleOpt.Location loc =
Options.HandleOpt.getOpt(Options.HandleOpt.Location.class, opts)
.orElse(Options.HandleOpt.moved(false));
if (loc.allowChange()) {
throw new UnsupportedOperationException("Tracking file movement in " +
"basic FileSystem is not supported");
}
final Path p = stat.getPath();
final Optional<Long> mtime = !data.allowChange()
? Optional.of(stat.getModificationTime())
: Optional.empty();
return new LocalFileSystemPathHandle(p.toString(), mtime);
}
@Override
public boolean supportsSymlinks() {
return true;

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* MultipartUploader for a given file system name/scheme is not supported.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UnsupportedMultipartUploaderException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Constructs exception with the specified detail message.
*
* @param message exception message.
*/
public UnsupportedMultipartUploaderException(final String message) {
super(message);
}
}

View File

@ -0,0 +1,47 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.io.Serializable;
import java.nio.ByteBuffer;
/**
* Opaque, serializable reference to an uploadId for multipart uploads.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface UploadHandle extends Serializable {
/**
* @return Serialized from in bytes.
*/
default byte[] toByteArray() {
ByteBuffer bb = bytes();
byte[] ret = new byte[bb.remaining()];
bb.get(ret);
return ret;
}
ByteBuffer bytes();
@Override
boolean equals(Object other);
}

View File

@ -23,7 +23,7 @@
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.io;
import com.google.common.collect.ComparisonChain;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import java.nio.ByteBuffer;
import java.util.Map;

View File

@ -26,7 +26,6 @@
import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
import com.google.common.primitives.Longs;
import com.google.common.primitives.UnsignedBytes;
/**
@ -195,52 +194,43 @@ public int compareTo(byte[] buffer1, int offset1, int length1,
length1 == length2) {
return 0;
}
final int stride = 8;
int minLength = Math.min(length1, length2);
int minWords = minLength / Longs.BYTES;
int strideLimit = minLength & ~(stride - 1);
int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
int i;
/*
* Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a
* time is no slower than comparing 4 bytes at a time even on 32-bit.
* On the other hand, it is substantially faster on 64-bit.
*/
for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) {
for (i = 0; i < strideLimit; i += stride) {
long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i);
long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i);
long diff = lw ^ rw;
if (diff != 0) {
if (lw != rw) {
if (!littleEndian) {
return lessThanUnsigned(lw, rw) ? -1 : 1;
}
// Use binary search
int n = 0;
int y;
int x = (int) diff;
if (x == 0) {
x = (int) (diff >>> 32);
n = 32;
}
y = x << 16;
if (y == 0) {
n += 16;
} else {
x = y;
}
y = x << 8;
if (y == 0) {
n += 8;
}
return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
/*
* We want to compare only the first index where left[index] !=
* right[index]. This corresponds to the least significant nonzero
* byte in lw ^ rw, since lw and rw are little-endian.
* Long.numberOfTrailingZeros(diff) tells us the least significant
* nonzero bit, and zeroing out the first three bits of L.nTZ gives
* us the shift to get that least significant nonzero byte. This
* comparison logic is based on UnsignedBytes from Guava v21
*/
int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
return ((int) ((lw >>> n) & 0xFF)) - ((int) ((rw >>> n) & 0xFF));
}
}
// The epilogue to cover the last (minLength % 8) elements.
for (int i = minWords * Longs.BYTES; i < minLength; i++) {
for (; i < minLength; i++) {
int result = UnsignedBytes.compare(
buffer1[offset1 + i],
buffer2[offset2 + i]);

View File

@ -22,8 +22,8 @@
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -46,7 +46,7 @@ public final class ErasureCodeNative {
loadLibrary();
} catch (Throwable t) {
problem = "Loading ISA-L failed: " + t.getMessage();
LOG.error("Loading ISA-L failed", t);
LOG.warn(problem);
}
LOADING_FAILURE_REASON = problem;
}

View File

@ -21,7 +21,7 @@
import java.io.IOException;
import java.io.FileDescriptor;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;

View File

@ -17,8 +17,8 @@
*/
package org.apache.hadoop.ipc;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -39,7 +39,7 @@
import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AtomicDoubleArray;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.metrics2.MetricsCollector;
@ -429,7 +429,7 @@ private void decayCurrentCounts() {
updateAverageResponseTime(true);
} catch (Exception ex) {
LOG.error("decayCurrentCounts exception: " +
ExceptionUtils.getFullStackTrace(ex));
ExceptionUtils.getStackTrace(ex));
throw ex;
}
}

View File

@ -32,7 +32,7 @@
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
import org.apache.hadoop.metrics2.util.MBeans;
@ -286,7 +286,7 @@ public int size() {
*/
@Override
public Iterator<E> iterator() {
throw new NotImplementedException();
throw new NotImplementedException("Code is not implemented");
}
/**

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;

View File

@ -37,10 +37,8 @@
import org.apache.commons.configuration2.Configuration;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.commons.configuration2.builder.fluent.Configurations;
import org.apache.commons.configuration2.builder.fluent.Parameters;
import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
import org.apache.commons.configuration2.ex.ConfigurationException;
import org.apache.commons.configuration2.io.FileHandler;
import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsPlugin;
import org.apache.hadoop.metrics2.filter.GlobFilter;
@ -112,12 +110,11 @@ static MetricsConfig create(String prefix, String... fileNames) {
static MetricsConfig loadFirst(String prefix, String... fileNames) {
for (String fname : fileNames) {
try {
Configuration cf = new Configurations().propertiesBuilder(fname)
.configure(new Parameters().properties()
.setFileName(fname)
.setListDelimiterHandler(new DefaultListDelimiterHandler(',')))
.getConfiguration()
.interpolatedConfiguration();
PropertiesConfiguration pcf = new PropertiesConfiguration();
FileHandler fh = new FileHandler(pcf);
fh.setFileName(fname);
fh.load();
Configuration cf = pcf.interpolatedConfiguration();
LOG.info("Loaded properties from {}", fname);
if (LOG.isDebugEnabled()) {
LOG.debug("Properties: {}", toString(cf));

View File

@ -21,7 +21,7 @@
import java.lang.reflect.Method;
import static com.google.common.base.Preconditions.*;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;

View File

@ -21,7 +21,7 @@
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsException;

View File

@ -26,7 +26,7 @@
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;

View File

@ -32,7 +32,7 @@
import java.util.function.Function;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;

View File

@ -37,7 +37,7 @@
import java.util.regex.Pattern;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.commons.lang.time.FastDateFormat;
import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;

View File

@ -28,7 +28,7 @@
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;

View File

@ -28,7 +28,7 @@
import java.nio.channels.ReadableByteChannel;
import java.nio.ByteBuffer;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.CloseableReferenceCount;

View File

@ -32,7 +32,7 @@
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.annotations.VisibleForTesting;

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.StringTokenizer;
@ -26,7 +25,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -52,7 +51,8 @@ public class ShellBasedUnixGroupsMapping extends Configured
protected static final Logger LOG =
LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
private long timeout = 0L;
private long timeout = CommonConfigurationKeys.
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT;
private static final List<String> EMPTY_GROUPS = new LinkedList<>();
@Override
@ -61,10 +61,10 @@ public void setConf(Configuration conf) {
if (conf != null) {
timeout = conf.getTimeDuration(
CommonConfigurationKeys.
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
CommonConfigurationKeys.
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT,
TimeUnit.SECONDS);
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
}
}

View File

@ -831,7 +831,9 @@ private long getRefreshTime(KerberosTicket tgt) {
return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
}
private boolean shouldRelogin() {
@InterfaceAudience.Private
@InterfaceStability.Unstable
public boolean shouldRelogin() {
return hasKerberosCredentials() && isHadoopLogin();
}

View File

@ -27,7 +27,7 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.CommandShell;

View File

@ -34,7 +34,7 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import java.util.stream.Collectors;

View File

@ -26,7 +26,7 @@
import java.util.Date;
import java.util.ServiceLoader;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.security.token.delegation.web;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.net.NetUtils;
@ -31,6 +29,7 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.hadoop.util.JsonSerialization;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -56,9 +55,6 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
private static final String CONTENT_TYPE = "Content-Type";
private static final String APPLICATION_JSON_MIME = "application/json";
private static final ObjectReader READER =
new ObjectMapper().readerFor(Map.class);
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
@ -328,7 +324,7 @@ private Map doDelegationTokenOperation(URL url,
if (contentType != null &&
contentType.contains(APPLICATION_JSON_MIME)) {
try {
ret = READER.readValue(conn.getInputStream());
ret = JsonSerialization.mapReader().readValue(conn.getInputStream());
} catch (Exception ex) {
throw new AuthenticationException(String.format(
"'%s' did not handle the '%s' delegation token operation: %s",

View File

@ -20,8 +20,7 @@
import java.util.ArrayList;
import java.util.LinkedList;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.WordUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
/**
@ -103,7 +102,8 @@ String[] getRow(int idx) {
// Line-wrap if it's too long
String[] lines = new String[] {raw};
if (wrap) {
lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n");
lines = org.apache.hadoop.util.StringUtils.wrap(lines[0], wrapWidth,
"\n", true).split("\n");
}
for (int i=0; i<lines.length; i++) {
if (justification == Justification.LEFT) {

View File

@ -17,9 +17,6 @@
*/
package org.apache.hadoop.util;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -56,11 +53,6 @@ public class HttpExceptionUtils {
private static final String ENTER = System.getProperty("line.separator");
private static final ObjectReader READER =
new ObjectMapper().readerFor(Map.class);
private static final ObjectWriter WRITER =
new ObjectMapper().writerWithDefaultPrettyPrinter();
/**
* Creates a HTTP servlet response serializing the exception in it as JSON.
*
@ -82,7 +74,7 @@ public static void createServletExceptionResponse(
Map<String, Object> jsonResponse = new LinkedHashMap<String, Object>();
jsonResponse.put(ERROR_JSON, json);
Writer writer = response.getWriter();
WRITER.writeValue(writer, jsonResponse);
JsonSerialization.writer().writeValue(writer, jsonResponse);
writer.flush();
}
@ -150,7 +142,7 @@ public static void validateResponse(HttpURLConnection conn,
InputStream es = null;
try {
es = conn.getErrorStream();
Map json = READER.readValue(es);
Map json = JsonSerialization.mapReader().readValue(es);
json = (Map) json.get(ERROR_JSON);
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);

View File

@ -25,14 +25,18 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -65,6 +69,26 @@ public class JsonSerialization<T> {
private final Class<T> classType;
private final ObjectMapper mapper;
private static final ObjectWriter WRITER =
new ObjectMapper().writerWithDefaultPrettyPrinter();
private static final ObjectReader MAP_READER =
new ObjectMapper().readerFor(Map.class);
/**
* @return an ObjectWriter which pretty-prints its output
*/
public static ObjectWriter writer() {
return WRITER;
}
/**
* @return an ObjectReader which returns simple Maps.
*/
public static ObjectReader mapReader() {
return MAP_READER;
}
/**
* Create an instance bound to a specific type.
* @param classType class to marshall

View File

@ -1191,7 +1191,7 @@ public ShellCommandExecutor(String[] execString, File dir,
/**
* Returns the timeout value set for the executor's sub-commands.
* @return The timeout value in seconds
* @return The timeout value in milliseconds
*/
@VisibleForTesting
public long getTimeoutInterval() {

View File

@ -35,7 +35,7 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang3.SystemUtils;
import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -987,7 +987,7 @@ public static String camelize(String s) {
String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR, '_');
for (String word : words)
sb.append(org.apache.commons.lang.StringUtils.capitalize(word));
sb.append(org.apache.commons.lang3.StringUtils.capitalize(word));
return sb.toString();
}
@ -1183,4 +1183,64 @@ public static boolean isAlpha(String str) {
return true;
}
/**
* Same as WordUtils#wrap in commons-lang 2.6. Unlike commons-lang3, leading
* spaces on the first line are NOT stripped.
*
* @param str the String to be word wrapped, may be null
* @param wrapLength the column to wrap the words at, less than 1 is treated
* as 1
* @param newLineStr the string to insert for a new line,
* <code>null</code> uses the system property line separator
* @param wrapLongWords true if long words (such as URLs) should be wrapped
* @return a line with newlines inserted, <code>null</code> if null input
*/
public static String wrap(String str, int wrapLength, String newLineStr,
boolean wrapLongWords) {
if(str == null) {
return null;
} else {
if(newLineStr == null) {
newLineStr = System.lineSeparator();
}
if(wrapLength < 1) {
wrapLength = 1;
}
int inputLineLength = str.length();
int offset = 0;
StringBuffer wrappedLine = new StringBuffer(inputLineLength + 32);
while(inputLineLength - offset > wrapLength) {
if(str.charAt(offset) == 32) {
++offset;
} else {
int spaceToWrapAt = str.lastIndexOf(32, wrapLength + offset);
if(spaceToWrapAt >= offset) {
wrappedLine.append(str.substring(offset, spaceToWrapAt));
wrappedLine.append(newLineStr);
offset = spaceToWrapAt + 1;
} else if(wrapLongWords) {
wrappedLine.append(str.substring(offset, wrapLength + offset));
wrappedLine.append(newLineStr);
offset += wrapLength;
} else {
spaceToWrapAt = str.indexOf(32, wrapLength + offset);
if(spaceToWrapAt >= 0) {
wrappedLine.append(str.substring(offset, spaceToWrapAt));
wrappedLine.append(newLineStr);
offset = spaceToWrapAt + 1;
} else {
wrappedLine.append(str.substring(offset));
offset = inputLineLength;
}
}
}
}
wrappedLine.append(str.substring(offset));
return wrappedLine.toString();
}
}
}

View File

@ -216,6 +216,21 @@ private void readProcMemInfoFile() {
readProcMemInfoFile(false);
}
/**
*
* Wrapper for Long.parseLong() that returns zero if the value is
* invalid. Under some circumstances, swapFree in /proc/meminfo can
* go negative, reported as a very large decimal value.
*/
private long safeParseLong(String strVal) {
long parsedVal;
try {
parsedVal = Long.parseLong(strVal);
} catch (NumberFormatException nfe) {
parsedVal = 0;
}
return parsedVal;
}
/**
* Read /proc/meminfo, parse and compute memory information.
* @param readAgain if false, read only on the first time
@ -252,9 +267,9 @@ private void readProcMemInfoFile(boolean readAgain) {
} else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
swapSize = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(MEMFREE_STRING)) {
ramSizeFree = Long.parseLong(mat.group(2));
ramSizeFree = safeParseLong(mat.group(2));
} else if (mat.group(1).equals(SWAPFREE_STRING)) {
swapSizeFree = Long.parseLong(mat.group(2));
swapSizeFree = safeParseLong(mat.group(2));
} else if (mat.group(1).equals(INACTIVE_STRING)) {
inactiveSize = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(INACTIVEFILE_STRING)) {

View File

@ -68,3 +68,11 @@ message FileStatusProto {
optional bytes ec_data = 17;
optional uint32 flags = 18 [default = 0];
}
/**
* Placeholder type for consistent basic FileSystem operations.
*/
message LocalFileSystemPathHandleProto {
optional uint64 mtime = 1;
optional string path = 2;
}

View File

@ -0,0 +1,16 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
org.apache.hadoop.fs.FileSystemMultipartUploader$Factory

View File

@ -62,7 +62,7 @@
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.conf;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;

View File

@ -20,7 +20,7 @@
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.junit.Assume;

View File

@ -0,0 +1,143 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public abstract class AbstractSystemMultipartUploaderTest {
abstract FileSystem getFS() throws IOException;
abstract Path getBaseTestPath();
@Test
public void testMultipartUpload() throws Exception {
FileSystem fs = getFS();
Path file = new Path(getBaseTestPath(), "some-file");
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
UploadHandle uploadHandle = mpu.initialize(file);
List<Pair<Integer, PartHandle>> partHandles = new ArrayList<>();
StringBuilder sb = new StringBuilder();
for (int i = 1; i <= 100; ++i) {
String contents = "ThisIsPart" + i + "\n";
sb.append(contents);
int len = contents.getBytes().length;
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
partHandles.add(Pair.of(i, partHandle));
}
PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
byte[] fdData = IOUtils.toByteArray(fs.open(fd));
byte[] fileData = IOUtils.toByteArray(fs.open(file));
String readString = new String(fdData);
assertEquals(sb.toString(), readString);
assertArrayEquals(fdData, fileData);
}
@Test
public void testMultipartUploadReverseOrder() throws Exception {
FileSystem fs = getFS();
Path file = new Path(getBaseTestPath(), "some-file");
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
UploadHandle uploadHandle = mpu.initialize(file);
List<Pair<Integer, PartHandle>> partHandles = new ArrayList<>();
StringBuilder sb = new StringBuilder();
for (int i = 1; i <= 100; ++i) {
String contents = "ThisIsPart" + i + "\n";
sb.append(contents);
}
for (int i = 100; i > 0; --i) {
String contents = "ThisIsPart" + i + "\n";
int len = contents.getBytes().length;
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
partHandles.add(Pair.of(i, partHandle));
}
PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
byte[] fdData = IOUtils.toByteArray(fs.open(fd));
byte[] fileData = IOUtils.toByteArray(fs.open(file));
String readString = new String(fdData);
assertEquals(sb.toString(), readString);
assertArrayEquals(fdData, fileData);
}
@Test
public void testMultipartUploadReverseOrderNoNContiguousPartNumbers()
throws Exception {
FileSystem fs = getFS();
Path file = new Path(getBaseTestPath(), "some-file");
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
UploadHandle uploadHandle = mpu.initialize(file);
List<Pair<Integer, PartHandle>> partHandles = new ArrayList<>();
StringBuilder sb = new StringBuilder();
for (int i = 2; i <= 200; i += 2) {
String contents = "ThisIsPart" + i + "\n";
sb.append(contents);
}
for (int i = 200; i > 0; i -= 2) {
String contents = "ThisIsPart" + i + "\n";
int len = contents.getBytes().length;
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
partHandles.add(Pair.of(i, partHandle));
}
PathHandle fd = mpu.complete(file, partHandles, uploadHandle);
byte[] fdData = IOUtils.toByteArray(fs.open(fd));
byte[] fileData = IOUtils.toByteArray(fs.open(file));
String readString = new String(fdData);
assertEquals(sb.toString(), readString);
assertArrayEquals(fdData, fileData);
}
@Test
public void testMultipartUploadAbort() throws Exception {
FileSystem fs = getFS();
Path file = new Path(getBaseTestPath(), "some-file");
MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
UploadHandle uploadHandle = mpu.initialize(file);
for (int i = 100; i >= 50; --i) {
String contents = "ThisIsPart" + i + "\n";
int len = contents.getBytes().length;
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
PartHandle partHandle = mpu.putPart(file, is, i, uploadHandle, len);
}
mpu.abort(file, uploadHandle);
String contents = "ThisIsPart49\n";
int len = contents.getBytes().length;
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
try {
mpu.putPart(file, is, 49, uploadHandle, len);
fail("putPart should have thrown an exception");
} catch (IOException ok) {
// ignore
}
}
}

View File

@ -19,7 +19,7 @@
import java.io.IOException;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.test.GenericTestUtils;

View File

@ -811,6 +811,48 @@ public void testCreateFlagAppendCreateOverwrite() throws IOException {
Assert.fail("Excepted exception not thrown");
}
@Test
public void testBuilderCreateNonExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testBuilderCreateNonExistingFile");
FSDataOutputStream out = fc.create(p).build();
writeData(fc, p, out, data, data.length);
}
@Test
public void testBuilderCreateExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testBuilderCreateExistingFile");
createFile(p);
FSDataOutputStream out = fc.create(p).overwrite(true).build();
writeData(fc, p, out, data, data.length);
}
@Test
public void testBuilderCreateAppendNonExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testBuilderCreateAppendNonExistingFile");
FSDataOutputStream out = fc.create(p).append().build();
writeData(fc, p, out, data, data.length);
}
@Test
public void testBuilderCreateAppendExistingFile() throws IOException {
Path p = getTestRootPath(fc, "test/testBuilderCreateAppendExistingFile");
createFile(p);
FSDataOutputStream out = fc.create(p).append().build();
writeData(fc, p, out, data, 2 * data.length);
}
@Test
public void testBuilderCreateRecursive() throws IOException {
Path p = getTestRootPath(fc, "test/parent/no/exist/file1");
try (FSDataOutputStream out = fc.create(p).build()) {
fail("Should throw FileNotFoundException on non-exist directory");
} catch (FileNotFoundException e) {
}
FSDataOutputStream out = fc.create(p).recursive().build();
writeData(fc, p, out, data, data.length);
}
private static void writeData(FileContext fc, Path p, FSDataOutputStream out,
byte[] data, long expectedLen) throws IOException {
out.write(data, 0, data.length);

View File

@ -22,7 +22,6 @@
import java.io.FileNotFoundException;
import java.util.EnumSet;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
import org.apache.hadoop.io.IOUtils;

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.fs;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.fs;
import org.apache.commons.lang.math.RandomUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
import org.junit.Before;
@ -67,15 +67,15 @@ public class TestFileSystemStorageStatistics {
@Before
public void setup() {
statistics.incrementBytesRead(RandomUtils.nextInt(100));
statistics.incrementBytesWritten(RandomUtils.nextInt(100));
statistics.incrementLargeReadOps(RandomUtils.nextInt(100));
statistics.incrementWriteOps(RandomUtils.nextInt(100));
statistics.incrementBytesRead(RandomUtils.nextInt(0, 100));
statistics.incrementBytesWritten(RandomUtils.nextInt(0, 100));
statistics.incrementLargeReadOps(RandomUtils.nextInt(0, 100));
statistics.incrementWriteOps(RandomUtils.nextInt(0, 100));
statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100));
statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100));
statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100));
statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100));
statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(0, 100));
statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(0, 100));
statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(0, 100));
statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(0, 100));
}
@Test

View File

@ -689,17 +689,18 @@ public void testFSOutputStreamBuilder() throws Exception {
// and permission
FSDataOutputStreamBuilder builder =
fileSys.createFile(path);
builder.build();
Assert.assertEquals("Should be default block size",
builder.getBlockSize(), fileSys.getDefaultBlockSize());
Assert.assertEquals("Should be default replication factor",
builder.getReplication(), fileSys.getDefaultReplication());
Assert.assertEquals("Should be default buffer size",
builder.getBufferSize(),
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
IO_FILE_BUFFER_SIZE_DEFAULT));
Assert.assertEquals("Should be default permission",
builder.getPermission(), FsPermission.getFileDefault());
try (FSDataOutputStream stream = builder.build()) {
Assert.assertEquals("Should be default block size",
builder.getBlockSize(), fileSys.getDefaultBlockSize());
Assert.assertEquals("Should be default replication factor",
builder.getReplication(), fileSys.getDefaultReplication());
Assert.assertEquals("Should be default buffer size",
builder.getBufferSize(),
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
IO_FILE_BUFFER_SIZE_DEFAULT));
Assert.assertEquals("Should be default permission",
builder.getPermission(), FsPermission.getFileDefault());
}
// Test set 0 to replication, block size and buffer size
builder = fileSys.createFile(path);

View File

@ -0,0 +1,65 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.test.GenericTestUtils.getRandomizedTestDir;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.File;
import java.io.IOException;
/**
* Test the FileSystemMultipartUploader on local file system.
*/
public class TestLocalFileSystemMultipartUploader
extends AbstractSystemMultipartUploaderTest {
private static FileSystem fs;
private File tmp;
@BeforeClass
public static void init() throws IOException {
fs = LocalFileSystem.getLocal(new Configuration());
}
@Before
public void setup() throws IOException {
tmp = getRandomizedTestDir();
tmp.mkdirs();
}
@After
public void tearDown() throws IOException {
tmp.delete();
}
@Override
public FileSystem getFS() {
return fs;
}
@Override
public Path getBaseTestPath() {
return new Path(tmp.getAbsolutePath());
}
}

View File

@ -123,6 +123,12 @@ public void testChanged() throws IOException {
HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts)
.orElseThrow(IllegalArgumentException::new);
FileStatus stat = testFile(B1);
try {
// Temporary workaround while RawLocalFS supports only second precision
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new IOException(e);
}
// modify the file by appending data
appendFile(getFileSystem(), stat.getPath(), B2);
byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length);

View File

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.rawlocal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.localfs.LocalFSContract;
import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract;
public class TestRawlocalContractPathHandle
extends AbstractContractPathHandleTest {
public TestRawlocalContractPathHandle(String testname,
Options.HandleOpt[] opts, boolean serialized) {
super(testname, opts, serialized);
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RawlocalFSContract(conf);
}
}

View File

@ -17,8 +17,8 @@
*/
package org.apache.hadoop.fs.shell;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang.math.RandomUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
@ -56,11 +56,11 @@ public static int initialize(Path dir) throws Exception {
fs.mkdirs(toDirPath);
int numTotalFiles = 0;
int numDirs = RandomUtils.nextInt(5);
int numDirs = RandomUtils.nextInt(0, 5);
for (int dirCount = 0; dirCount < numDirs; ++dirCount) {
Path subDirPath = new Path(fromDirPath, "subdir" + dirCount);
fs.mkdirs(subDirPath);
int numFiles = RandomUtils.nextInt(10);
int numFiles = RandomUtils.nextInt(0, 10);
for (int fileCount = 0; fileCount < numFiles; ++fileCount) {
numTotalFiles++;
Path subFile = new Path(subDirPath, "file" + fileCount);
@ -115,7 +115,7 @@ public void testCopyFromLocalWithThreads() throws Exception {
Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
int numFiles = TestCopyFromLocal.initialize(dir);
int maxThreads = Runtime.getRuntime().availableProcessors() * 2;
int randThreads = RandomUtils.nextInt(maxThreads - 1) + 1;
int randThreads = RandomUtils.nextInt(0, maxThreads - 1) + 1;
String numThreads = Integer.toString(randThreads);
run(new TestMultiThreadedCopy(randThreads,
randThreads == 1 ? 0 : numFiles), "-t", numThreads,

View File

@ -26,7 +26,7 @@
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;

View File

@ -20,7 +20,7 @@
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.metrics.RpcMetrics;

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.ipc;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;

View File

@ -38,7 +38,7 @@
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.KerberosAuthException;

View File

@ -43,7 +43,7 @@
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket.DomainChannel;
import org.apache.hadoop.test.GenericTestUtils;

View File

@ -173,6 +173,37 @@ public void testGetNumericGroupsResolvable() throws Exception {
assertTrue(groups.contains("zzz"));
}
public long getTimeoutInterval(String timeout) {
Configuration conf = new Configuration();
String userName = "foobarnonexistinguser";
conf.set(
CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
timeout);
TestDelayedGroupCommand mapping = ReflectionUtils
.newInstance(TestDelayedGroupCommand.class, conf);
ShellCommandExecutor executor = mapping.createGroupExecutor(userName);
return executor.getTimeoutInterval();
}
@Test
public void testShellTimeOutConf() {
// Test a 1 second max-runtime timeout
assertEquals(
"Expected the group names executor to carry the configured timeout",
1000L, getTimeoutInterval("1s"));
// Test a 1 minute max-runtime timeout
assertEquals(
"Expected the group names executor to carry the configured timeout",
60000L, getTimeoutInterval("1m"));
// Test a 1 millisecond max-runtime timeout
assertEquals(
"Expected the group names executor to carry the configured timeout",
1L, getTimeoutInterval("1"));
}
private class TestGroupResolvable
extends ShellBasedUnixGroupsMapping {
/**
@ -222,7 +253,7 @@ public void testGetGroupsResolvable() throws Exception {
private static class TestDelayedGroupCommand
extends ShellBasedUnixGroupsMapping {
private Long timeoutSecs = 2L;
private Long timeoutSecs = 1L;
TestDelayedGroupCommand() {
super();
@ -249,12 +280,12 @@ public void testFiniteGroupResolutionTime() throws Exception {
String userName = "foobarnonexistinguser";
String commandTimeoutMessage =
"ran longer than the configured timeout limit";
long testTimeout = 1L;
long testTimeout = 500L;
// Test a 1 second max-runtime timeout
conf.setLong(
CommonConfigurationKeys.
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY,
testTimeout);
TestDelayedGroupCommand mapping =
@ -306,7 +337,7 @@ public void testFiniteGroupResolutionTime() throws Exception {
conf = new Configuration();
long defaultTimeout =
CommonConfigurationKeys.
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT;
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT;
mapping =
ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);

View File

@ -21,7 +21,7 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.service.launcher;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceOperations;

View File

@ -40,7 +40,7 @@
import java.util.regex.Pattern;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileUtil;
@ -344,7 +344,7 @@ public static void assertExceptionContains(String expectedText,
throw new AssertionError(E_NULL_THROWABLE_STRING, t);
}
if (expectedText != null && !msg.contains(expectedText)) {
String prefix = org.apache.commons.lang.StringUtils.isEmpty(message)
String prefix = org.apache.commons.lang3.StringUtils.isEmpty(message)
? "" : (message + ": ");
throw new AssertionError(
String.format("%s Expected to find '%s' %s: %s",

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.util;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.slf4j.LoggerFactory;
import org.junit.Assert;
import org.junit.Test;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.util;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang3.SystemUtils;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;

View File

@ -161,6 +161,36 @@ int readDiskBlockInformation(String diskName, int defSector) {
"DirectMap2M: 2027520 kB\n" +
"DirectMap1G: 132120576 kB\n";
static final String MEMINFO_FORMAT3 =
"MemTotal: %d kB\n" +
"MemFree: %s kB\n" +
"Buffers: 138244 kB\n" +
"Cached: 947780 kB\n" +
"SwapCached: 142880 kB\n" +
"Active: 3229888 kB\n" +
"Inactive: %d kB\n" +
"SwapTotal: %d kB\n" +
"SwapFree: %s kB\n" +
"Dirty: 122012 kB\n" +
"Writeback: 0 kB\n" +
"AnonPages: 2710792 kB\n" +
"Mapped: 24740 kB\n" +
"Slab: 132528 kB\n" +
"SReclaimable: 105096 kB\n" +
"SUnreclaim: 27432 kB\n" +
"PageTables: 11448 kB\n" +
"NFS_Unstable: 0 kB\n" +
"Bounce: 0 kB\n" +
"CommitLimit: 4125904 kB\n" +
"Committed_AS: 4143556 kB\n" +
"VmallocTotal: 34359738367 kB\n" +
"VmallocUsed: 1632 kB\n" +
"VmallocChunk: 34359736375 kB\n" +
"HugePages_Total: %d\n" +
"HugePages_Free: 0\n" +
"HugePages_Rsvd: 0\n" +
"Hugepagesize: 2048 kB";
static final String CPUINFO_FORMAT =
"processor : %s\n" +
"vendor_id : AuthenticAMD\n" +
@ -384,6 +414,36 @@ public void parsingProcMemFile2() throws IOException {
(nrHugePages * 2048) + swapTotal));
}
/**
* Test parsing /proc/meminfo
* @throws IOException
*/
@Test
public void parsingProcMemFileWithBadValues() throws IOException {
long memTotal = 4058864L;
long memFree = 0L; // bad value should return 0
long inactive = 567732L;
long swapTotal = 2096472L;
long swapFree = 0L; // bad value should return 0
int nrHugePages = 10;
String badFreeValue = "18446744073709551596";
File tempFile = new File(FAKE_MEMFILE);
tempFile.deleteOnExit();
FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
fWriter.write(String.format(MEMINFO_FORMAT3,
memTotal, badFreeValue, inactive, swapTotal, badFreeValue, nrHugePages));
fWriter.close();
assertEquals(plugin.getAvailablePhysicalMemorySize(),
1024L * (memFree + inactive));
assertEquals(plugin.getAvailableVirtualMemorySize(),
1024L * (memFree + inactive + swapFree));
assertEquals(plugin.getPhysicalMemorySize(),
1024L * (memTotal - (nrHugePages * 2048)));
assertEquals(plugin.getVirtualMemorySize(),
1024L * (memTotal - (nrHugePages * 2048) + swapTotal));
}
@Test
public void testCoreCounts() throws IOException {

View File

@ -122,4 +122,9 @@
<value>true</value>
</property>
<property>
<name>fs.contract.supports-content-check</name>
<value>true</value>
</property>
</configuration>

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
trap "echo SIGTERM trapped!" SIGTERM
trap "echo SIGINT trapped!" SIGINT
echo "$$" > "$1"
while true; do
sleep 1.3
done

View File

@ -17,10 +17,9 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.http.JettyUtils;
import org.apache.hadoop.util.JsonSerialization;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
@ -67,8 +66,7 @@ public void writeTo(Object obj, Class<?> aClass, Type type,
OutputStream outputStream) throws IOException, WebApplicationException {
Writer writer = new OutputStreamWriter(outputStream, Charset
.forName("UTF-8"));
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
JsonSerialization.writer().writeValue(writer, obj);
}
}

View File

@ -265,6 +265,11 @@
<artifactId>hadoop-ozone-docs</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-filesystem</artifactId>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<plugins>

View File

@ -16,18 +16,6 @@
version: "3"
services:
namenode:
image: apache/hadoop-runner
hostname: namenode
volumes:
- ../../ozone:/opt/hadoop
ports:
- 9870:9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -16,19 +16,6 @@
version: "3"
services:
namenode:
image: apache/hadoop-runner
hostname: namenode
volumes:
- ../../ozone:/opt/hadoop
- ./jmxpromo.jar:/opt/jmxpromo.jar
ports:
- 9870:9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -244,32 +244,6 @@ public final class ScmConfigKeys {
public static final String
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
/**
* Don't start processing a pool if we have not had a minimum number of
* seconds from the last processing.
*/
public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
"ozone.scm.container.report.processing.interval";
public static final String
OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
/**
* This determines the total number of pools to be processed in parallel.
*/
public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
"ozone.scm.max.nodepool.processing.threads";
public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1;
/**
* These 2 settings control the number of threads in executor pool and time
* outs for thw container reports from all nodes.
*/
public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
"ozone.scm.max.container.report.threads";
public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
"ozone.scm.container.reports.wait.timeout";
public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
"5m";
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
"ozone.scm.block.deletion.max.retry";

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
@ -30,6 +31,7 @@
import org.apache.hadoop.util.Time;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import static java.lang.Math.max;
@ -63,6 +65,13 @@ public class ContainerInfo
private String owner;
private long containerID;
private long deleteTransactionId;
/**
* Allows you to maintain private data on ContainerInfo. This is not
* serialized via protobuf, just allows us to maintain some private data.
*/
@JsonIgnore
private byte[] data;
ContainerInfo(
long containerID,
HddsProtos.LifeCycleState state,
@ -295,6 +304,29 @@ public String toJsonString() throws IOException {
return WRITER.writeValueAsString(this);
}
/**
* Returns private data that is set on this containerInfo.
*
* @return blob, the user can interpret it any way they like.
*/
public byte[] getData() {
if (this.data != null) {
return Arrays.copyOf(this.data, this.data.length);
} else {
return null;
}
}
/**
* Set private data on ContainerInfo object.
*
* @param data -- private data.
*/
public void setData(byte[] data) {
if (data != null) {
this.data = Arrays.copyOf(data, data.length);
}
}
/**
* Builder class for ContainerInfo.
*/

View File

@ -27,14 +27,14 @@
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map;
import java.util.TreeMap;
import java.util.List;
/**
@ -46,7 +46,7 @@ public class Pipeline {
static {
ObjectMapper mapper = new ObjectMapper();
String[] ignorableFieldNames = {"data"};
String[] ignorableFieldNames = {"leaderID", "datanodes"};
FilterProvider filters = new SimpleFilterProvider()
.addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter
.serializeAllExcept(ignorableFieldNames));
@ -57,38 +57,66 @@ public class Pipeline {
WRITER = mapper.writer(filters);
}
private PipelineChannel pipelineChannel;
/**
* Allows you to maintain private data on pipelines. This is not serialized
* via protobuf, just allows us to maintain some private data.
*/
@JsonIgnore
private byte[] data;
private String leaderID;
@JsonIgnore
private Map<String, DatanodeDetails> datanodes;
private HddsProtos.LifeCycleState lifeCycleState;
private HddsProtos.ReplicationType type;
private HddsProtos.ReplicationFactor factor;
private String name;
// TODO: change to long based id
//private long id;
/**
* Constructs a new pipeline data structure.
*
* @param pipelineChannel - transport information for this container
* @param leaderID - Leader datanode id
* @param lifeCycleState - Pipeline State
* @param replicationType - Replication protocol
* @param replicationFactor - replication count on datanodes
* @param name - pipelineName
*/
public Pipeline(PipelineChannel pipelineChannel) {
this.pipelineChannel = pipelineChannel;
data = null;
public Pipeline(String leaderID, HddsProtos.LifeCycleState lifeCycleState,
HddsProtos.ReplicationType replicationType,
HddsProtos.ReplicationFactor replicationFactor, String name) {
this.leaderID = leaderID;
this.lifeCycleState = lifeCycleState;
this.type = replicationType;
this.factor = replicationFactor;
this.name = name;
datanodes = new TreeMap<>();
}
/**
* Gets pipeline object from protobuf.
*
* @param pipeline - ProtoBuf definition for the pipeline.
* @param pipelineProto - ProtoBuf definition for the pipeline.
* @return Pipeline Object
*/
public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) {
Preconditions.checkNotNull(pipeline);
PipelineChannel pipelineChannel =
PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel());
return new Pipeline(pipelineChannel);
public static Pipeline getFromProtoBuf(
HddsProtos.Pipeline pipelineProto) {
Preconditions.checkNotNull(pipelineProto);
Pipeline pipeline =
new Pipeline(pipelineProto.getLeaderID(),
pipelineProto.getState(),
pipelineProto.getType(),
pipelineProto.getFactor(),
pipelineProto.getName());
for (HddsProtos.DatanodeDetailsProto dataID :
pipelineProto.getMembersList()) {
pipeline.addMember(DatanodeDetails.getFromProtoBuf(dataID));
}
return pipeline;
}
/**
* returns the replication count.
* @return Replication Factor
*/
public HddsProtos.ReplicationFactor getFactor() {
return pipelineChannel.getFactor();
return factor;
}
/**
@ -98,19 +126,34 @@ public HddsProtos.ReplicationFactor getFactor() {
*/
@JsonIgnore
public DatanodeDetails getLeader() {
return pipelineChannel.getDatanodes().get(pipelineChannel.getLeaderID());
return getDatanodes().get(leaderID);
}
public void addMember(DatanodeDetails datanodeDetails) {
datanodes.put(datanodeDetails.getUuid().toString(),
datanodeDetails);
}
public Map<String, DatanodeDetails> getDatanodes() {
return datanodes;
}
/**
* Returns the leader host.
*
* @return First Machine.
*/
public String getLeaderHost() {
return pipelineChannel.getDatanodes()
.get(pipelineChannel.getLeaderID()).getHostName();
return getDatanodes()
.get(leaderID).getHostName();
}
/**
*
* @return lead
*/
public String getLeaderID() {
return leaderID;
}
/**
* Returns all machines that make up this pipeline.
*
@ -118,7 +161,7 @@ public String getLeaderHost() {
*/
@JsonIgnore
public List<DatanodeDetails> getMachines() {
return new ArrayList<>(pipelineChannel.getDatanodes().values());
return new ArrayList<>(getDatanodes().values());
}
/**
@ -128,7 +171,7 @@ public List<DatanodeDetails> getMachines() {
*/
public List<String> getDatanodeHosts() {
List<String> dataHosts = new ArrayList<>();
for (DatanodeDetails id : pipelineChannel.getDatanodes().values()) {
for (DatanodeDetails id :getDatanodes().values()) {
dataHosts.add(id.getHostName());
}
return dataHosts;
@ -143,46 +186,31 @@ public List<String> getDatanodeHosts() {
public HddsProtos.Pipeline getProtobufMessage() {
HddsProtos.Pipeline.Builder builder =
HddsProtos.Pipeline.newBuilder();
builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage());
for (DatanodeDetails datanode : datanodes.values()) {
builder.addMembers(datanode.getProtoBufMessage());
}
builder.setLeaderID(leaderID);
if (this.getLifeCycleState() != null) {
builder.setState(this.getLifeCycleState());
}
if (this.getType() != null) {
builder.setType(this.getType());
}
if (this.getFactor() != null) {
builder.setFactor(this.getFactor());
}
return builder.build();
}
/**
* Returns private data that is set on this pipeline.
*
* @return blob, the user can interpret it any way they like.
*/
public byte[] getData() {
if (this.data != null) {
return Arrays.copyOf(this.data, this.data.length);
} else {
return null;
}
}
@VisibleForTesting
public PipelineChannel getPipelineChannel() {
return pipelineChannel;
}
/**
* Set private data on pipeline.
*
* @param data -- private data.
*/
public void setData(byte[] data) {
if (data != null) {
this.data = Arrays.copyOf(data, data.length);
}
}
/**
* Gets the State of the pipeline.
*
* @return - LifeCycleStates.
*/
public HddsProtos.LifeCycleState getLifeCycleState() {
return pipelineChannel.getLifeCycleState();
return lifeCycleState;
}
/**
@ -191,7 +219,7 @@ public HddsProtos.LifeCycleState getLifeCycleState() {
* @return - Name of the pipeline
*/
public String getPipelineName() {
return pipelineChannel.getName();
return name;
}
/**
@ -200,16 +228,16 @@ public String getPipelineName() {
* @return type - Standalone, Ratis, Chained.
*/
public HddsProtos.ReplicationType getType() {
return pipelineChannel.getType();
return type;
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append("[");
pipelineChannel.getDatanodes().keySet().stream()
getDatanodes().keySet().stream()
.forEach(id -> b.
append(id.endsWith(pipelineChannel.getLeaderID()) ? "*" + id : id));
append(id.endsWith(getLeaderID()) ? "*" + id : id));
b.append(" name:").append(getPipelineName());
if (getType() != null) {
b.append(" type:").append(getType().toString());

View File

@ -1,124 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import java.util.Map;
import java.util.TreeMap;
/**
* PipelineChannel information for a {@link Pipeline}.
*/
public class PipelineChannel {
@JsonIgnore
private String leaderID;
@JsonIgnore
private Map<String, DatanodeDetails> datanodes;
private LifeCycleState lifeCycleState;
private ReplicationType type;
private ReplicationFactor factor;
private String name;
// TODO: change to long based id
//private long id;
public PipelineChannel(String leaderID, LifeCycleState lifeCycleState,
ReplicationType replicationType, ReplicationFactor replicationFactor,
String name) {
this.leaderID = leaderID;
this.lifeCycleState = lifeCycleState;
this.type = replicationType;
this.factor = replicationFactor;
this.name = name;
datanodes = new TreeMap<>();
}
public String getLeaderID() {
return leaderID;
}
public Map<String, DatanodeDetails> getDatanodes() {
return datanodes;
}
public LifeCycleState getLifeCycleState() {
return lifeCycleState;
}
public ReplicationType getType() {
return type;
}
public ReplicationFactor getFactor() {
return factor;
}
public String getName() {
return name;
}
public void addMember(DatanodeDetails datanodeDetails) {
datanodes.put(datanodeDetails.getUuid().toString(),
datanodeDetails);
}
@JsonIgnore
public HddsProtos.PipelineChannel getProtobufMessage() {
HddsProtos.PipelineChannel.Builder builder =
HddsProtos.PipelineChannel.newBuilder();
for (DatanodeDetails datanode : datanodes.values()) {
builder.addMembers(datanode.getProtoBufMessage());
}
builder.setLeaderID(leaderID);
if (this.getLifeCycleState() != null) {
builder.setState(this.getLifeCycleState());
}
if (this.getType() != null) {
builder.setType(this.getType());
}
if (this.getFactor() != null) {
builder.setFactor(this.getFactor());
}
return builder.build();
}
public static PipelineChannel getFromProtoBuf(
HddsProtos.PipelineChannel transportProtos) {
Preconditions.checkNotNull(transportProtos);
PipelineChannel pipelineChannel =
new PipelineChannel(transportProtos.getLeaderID(),
transportProtos.getState(),
transportProtos.getType(),
transportProtos.getFactor(),
transportProtos.getName());
for (HddsProtos.DatanodeDetailsProto dataID :
transportProtos.getMembersList()) {
pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID));
}
return pipelineChannel;
}
}

View File

@ -96,7 +96,6 @@ public final class OzoneConsts {
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
public static final String BLOCK_DB = "block.db";
public static final String NODEPOOL_DB = "nodepool.db";
public static final String OPEN_CONTAINERS_DB = "openContainers.db";
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
public static final String KSM_DB_NAME = "ksm.db";

View File

@ -126,7 +126,7 @@ public synchronized void run() {
try {
// Collect task results
BackgroundTaskResult result = serviceTimeout > 0
? taskResultFuture.get(serviceTimeout, TimeUnit.MILLISECONDS)
? taskResultFuture.get(serviceTimeout, unit)
: taskResultFuture.get();
if (LOG.isDebugEnabled()) {
LOG.debug("task execution result size {}", result.getSize());

View File

@ -40,7 +40,7 @@ message Port {
required uint32 value = 2;
}
message PipelineChannel {
message Pipeline {
required string leaderID = 1;
repeated DatanodeDetailsProto members = 2;
optional LifeCycleState state = 3 [default = OPEN];
@ -49,12 +49,6 @@ message PipelineChannel {
optional string name = 6;
}
// A pipeline is composed of PipelineChannel (Ratis/StandAlone) that back a
// container.
message Pipeline {
required PipelineChannel pipelineChannel = 2;
}
message KeyValue {
required string key = 1;
optional string value = 2;

Some files were not shown because too many files have changed in this diff Show More