- * This class is a tool for migrating data from an older to a newer version - * of an S3 filesystem. - *
- *- * All files in the filesystem are migrated by re-writing the block metadata - * - no datafiles are touched. - *
- */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public class MigrationTool extends Configured implements Tool { - - private S3Service s3Service; - private S3Bucket bucket; - - public static void main(String[] args) throws Exception { - int res = ToolRunner.run(new MigrationTool(), args); - System.exit(res); - } - - @Override - public int run(String[] args) throws Exception { - - if (args.length == 0) { - System.err.println("Usage: MigrationTool- * Extracts AWS credentials from the filesystem URI or configuration. - *
- */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class S3Credentials { - - private String accessKey; - private String secretAccessKey; - - /** - * @param uri bucket URI optionally containing username and password. - * @param conf configuration - * @throws IllegalArgumentException if credentials for S3 cannot be - * determined. - * @throws IOException if credential providers are misconfigured and we have - * to talk to them. - */ - public void initialize(URI uri, Configuration conf) throws IOException { - if (uri.getHost() == null) { - throw new IllegalArgumentException("Invalid hostname in URI " + uri); - } - S3xLoginHelper.Login login = - S3xLoginHelper.extractLoginDetailsWithWarnings(uri); - if (login.hasLogin()) { - accessKey = login.getUser(); - secretAccessKey = login.getPassword(); - } - String scheme = uri.getScheme(); - String accessKeyProperty = String.format("fs.%s.awsAccessKeyId", scheme); - String secretAccessKeyProperty = - String.format("fs.%s.awsSecretAccessKey", scheme); - if (accessKey == null) { - accessKey = conf.getTrimmed(accessKeyProperty); - } - if (secretAccessKey == null) { - final char[] pass = conf.getPassword(secretAccessKeyProperty); - if (pass != null) { - secretAccessKey = (new String(pass)).trim(); - } - } - if (accessKey == null && secretAccessKey == null) { - throw new IllegalArgumentException("AWS " + - "Access Key ID and Secret Access " + - "Key must be specified " + - "by setting the " + - accessKeyProperty + " and " + - secretAccessKeyProperty + - " properties (respectively)."); - } else if (accessKey == null) { - throw new IllegalArgumentException("AWS " + - "Access Key ID must be specified " + - "by setting the " + - accessKeyProperty + " property."); - } else if (secretAccessKey == null) { - throw new IllegalArgumentException("AWS " + - "Secret Access Key must be " + - "specified by setting the " + - secretAccessKeyProperty + - " property."); - } - - } - - public String getAccessKey() { - return accessKey; - } - - public String getSecretAccessKey() { - return secretAccessKey; - } -} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java deleted file mode 100644 index 6a49d1a083..0000000000 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java +++ /dev/null @@ -1,516 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URI; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.ParentNotDirectoryException; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.s3native.NativeS3FileSystem; -import org.apache.hadoop.fs.s3native.S3xLoginHelper; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.io.retry.RetryProxy; -import org.apache.hadoop.util.Progressable; - -/** - * A block-based {@link FileSystem} backed by - * Amazon S3. - * - * @see NativeS3FileSystem - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class S3FileSystem extends FileSystem { - - private URI uri; - - private FileSystemStore store; - - private Path workingDir; - - public S3FileSystem() { - // set store in initialize() - } - - public S3FileSystem(FileSystemStore store) { - this.store = store; - } - - /** - * Return the protocol scheme for the FileSystem. - * - * @returns3
- */
- @Override
- public String getScheme() {
- return "s3";
- }
-
- @Override
- public URI getUri() {
- return uri;
- }
-
- @Override
- public void initialize(URI uri, Configuration conf) throws IOException {
- super.initialize(uri, conf);
- if (store == null) {
- store = createDefaultStore(conf);
- }
- store.initialize(uri, conf);
- setConf(conf);
- this.uri = S3xLoginHelper.buildFSURI(uri);
- this.workingDir =
- new Path("/user", System.getProperty("user.name")).makeQualified(this);
- }
-
- private static FileSystemStore createDefaultStore(Configuration conf) {
- FileSystemStore store = new Jets3tFileSystemStore();
-
- RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
- conf.getInt("fs.s3.maxRetries", 4),
- conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
- MapA distributed, block-based implementation of {@link -org.apache.hadoop.fs.FileSystem} that uses Amazon S3 -as a backing store.
- --Files are stored in S3 as blocks (represented by -{@link org.apache.hadoop.fs.s3.Block}), which have an ID and a length. -Block metadata is stored in S3 as a small record (represented by -{@link org.apache.hadoop.fs.s3.INode}) using the URL-encoded -path string as a key. Inodes record the file type (regular file or directory) and the list of blocks. -This design makes it easy to seek to any given position in a file by reading the inode data to compute -which block to access, then using S3's support for -HTTP Range headers -to start streaming from the correct position. -Renames are also efficient since only the inode is moved (by a DELETE followed by a PUT since -S3 does not support renames). -
--For a single file /dir1/file1 which takes two blocks of storage, the file structure in S3 -would be something like this: -
--/ -/dir1 -/dir1/file1 -block-6415776850131549260 -block-3026438247347758425 --
-Inodes start with a leading /
, while blocks are prefixed with block-
.
-
* A note about directories. S3 of course has no "native" support for them.
* The idiom we choose then is: for any directory created by this class,
@@ -85,8 +91,6 @@
* is never returned.
*
*
- *
- * @see org.apache.hadoop.fs.s3.S3FileSystem
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
@@ -98,7 +102,12 @@ public class NativeS3FileSystem extends FileSystem {
private static final String FOLDER_SUFFIX = "_$folder$";
static final String PATH_DELIMITER = Path.SEPARATOR;
private static final int S3_MAX_LISTING_LENGTH = 1000;
-
+
+ static {
+ // Add the deprecated config keys
+ addDeprecatedConfigKeys();
+ }
+
static class NativeS3FsInputStream extends FSInputStream {
private NativeFileSystemStore store;
@@ -257,8 +266,10 @@ public NativeS3FsOutputStream(Configuration conf,
}
private File newBackupFile() throws IOException {
- if (lDirAlloc == null) {
- lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
+ if (conf.get(S3_NATIVE_BUFFER_DIR_KEY, null) != null) {
+ lDirAlloc = new LocalDirAllocator(S3_NATIVE_BUFFER_DIR_KEY);
+ } else {
+ lDirAlloc = new LocalDirAllocator(S3_NATIVE_BUFFER_DIR_DEFAULT);
}
File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
result.deleteOnExit();
@@ -342,8 +353,9 @@ private static NativeFileSystemStore createDefaultStore(Configuration conf) {
NativeFileSystemStore store = new Jets3tNativeFileSystemStore();
RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
- conf.getInt("fs.s3.maxRetries", 4),
- conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
+ conf.getInt(S3_NATIVE_MAX_RETRIES_KEY, S3_NATIVE_MAX_RETRIES_DEFAUL),
+ conf.getLong(S3_NATIVE_SLEEP_TIME_KEY, S3_NATIVE_SLEEP_TIME_DEFAULT),
+ TimeUnit.SECONDS);
Map
+ * Extracts AWS credentials from the filesystem URI or configuration.
+ *