HADOOP-4952. Add new improved file system interface FileContext for the application writer. Contributed by Sanjay Radia.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@816398 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7ee09e0939
commit
0294c49df6
@ -193,6 +193,9 @@ Trunk (unreleased changes)
|
||||
HADOOP-6246. Add backward compatibility support to use deprecated decimal
|
||||
umask from old configuration. (Jakob Homan via suresh)
|
||||
|
||||
HADOOP-4952. Add new improved file system interface FileContext for the
|
||||
application writer (Sanjay Radia via suresh)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-4565. Added CombineFileInputFormat to use data locality information
|
||||
|
1423
src/java/org/apache/hadoop/fs/FileContext.java
Normal file
1423
src/java/org/apache/hadoop/fs/FileContext.java
Normal file
File diff suppressed because it is too large
Load Diff
@ -23,7 +23,6 @@
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
@ -244,7 +243,7 @@ public static void closeAll() throws IOException {
|
||||
/** Make sure that a path specifies a FileSystem. */
|
||||
public Path makeQualified(Path path) {
|
||||
checkPath(path);
|
||||
return path.makeQualified(this);
|
||||
return path.makeQualified(this.getUri(), this.getWorkingDirectory());
|
||||
}
|
||||
|
||||
/** create a file with the provided permission
|
||||
@ -363,6 +362,26 @@ public BlockLocation[] getFileBlockLocations(FileStatus file,
|
||||
String[] host = { "localhost" };
|
||||
return new BlockLocation[] { new BlockLocation(name, host, 0, file.getLen()) };
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return an array containing hostnames, offset and size of
|
||||
* portions of the given file. For a nonexistent
|
||||
* file or regions, null will be returned.
|
||||
*
|
||||
* This call is most helpful with DFS, where it returns
|
||||
* hostnames of machines that contain the given file.
|
||||
*
|
||||
* The FileSystem will simply return an elt containing 'localhost'.
|
||||
*/
|
||||
public BlockLocation[] getFileBlockLocations(Path p,
|
||||
long start, long len) throws IOException {
|
||||
if (p == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
FileStatus file = getFileStatus(p);
|
||||
return getFileBlockLocations(file, start, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a set of server default configuration values
|
||||
@ -555,7 +574,7 @@ public FSDataOutputStream create(Path f,
|
||||
* Opens an FSDataOutputStream at the indicated Path with write-progress
|
||||
* reporting.
|
||||
* @param f the file name to open.
|
||||
* @param permission
|
||||
* @param permission - applied against umask
|
||||
* @param flag determines the semantic of this create.
|
||||
* @param bufferSize the size of the buffer to be used.
|
||||
* @param replication required block replication for the file.
|
||||
@ -569,6 +588,46 @@ public abstract FSDataOutputStream create(Path f, FsPermission permission,
|
||||
EnumSet<CreateFlag> flag, int bufferSize, short replication, long blockSize,
|
||||
Progressable progress) throws IOException ;
|
||||
|
||||
/*
|
||||
* This version of the create method assumes that the permission
|
||||
* of create does not matter.
|
||||
* It has been added to support the FileContext that processes the permission
|
||||
* with umask before calling this method.
|
||||
* This a temporary method added to support the transition from FileSystem
|
||||
* to FileContext for user applications.
|
||||
*/
|
||||
@Deprecated
|
||||
protected FSDataOutputStream primitiveCreate(Path f,
|
||||
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
|
||||
short replication, long blockSize, Progressable progress,
|
||||
int bytesPerChecksum) throws IOException {
|
||||
|
||||
// Default impl assumes that permissions do not matter and
|
||||
// nor does the bytesPerChecksum hence
|
||||
// calling the regular create is good enough.
|
||||
// FSs that implement permissions should override this.
|
||||
|
||||
return this.create(f, absolutePermission, flag, bufferSize, replication,
|
||||
blockSize, progress);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This version of the mkdirs method assumes that the permission.
|
||||
* It has been added to support the FileContext that processes the the permission
|
||||
* with umask before calling this method.
|
||||
* This a temporary method added to support the transition from FileSystem
|
||||
* to FileContext for user applications.
|
||||
*/
|
||||
@Deprecated
|
||||
protected boolean primitiveMkdir(Path f, FsPermission absolutePermission)
|
||||
throws IOException {
|
||||
// Default impl is to assume that permissions do not matter and hence
|
||||
// calling the regular mkdirs is good enough.
|
||||
// FSs that implement permissions should override this.
|
||||
return this.mkdirs(f, absolutePermission);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates the given Path as a brand-new zero-length file. If
|
||||
@ -1142,8 +1201,8 @@ private void error(String s, String pattern, int pos) throws IOException {
|
||||
* The default implementation returns "/user/$USER/".
|
||||
*/
|
||||
public Path getHomeDirectory() {
|
||||
return new Path("/user/"+System.getProperty("user.name"))
|
||||
.makeQualified(this);
|
||||
return this.makeQualified(
|
||||
new Path("/user/"+System.getProperty("user.name")));
|
||||
}
|
||||
|
||||
|
||||
@ -1160,6 +1219,23 @@ public Path getHomeDirectory() {
|
||||
* @return the directory pathname
|
||||
*/
|
||||
public abstract Path getWorkingDirectory();
|
||||
|
||||
|
||||
/**
|
||||
* Note: with the new FilesContext class, getWorkingDirectory()
|
||||
* will be removed.
|
||||
* The working directory is implemented in FilesContext.
|
||||
*
|
||||
* Some file systems like LocalFileSystem have an initial workingDir
|
||||
* that we use as the starting workingDir. For other file systems
|
||||
* like HDFS there is no built in notion of an inital workingDir.
|
||||
*
|
||||
* @return if there is built in notion of workingDir then it
|
||||
* is returned; else a null is returned.
|
||||
*/
|
||||
protected Path getInitialWorkingDirectory() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call {@link #mkdirs(Path, FsPermission)} with default permission.
|
||||
|
@ -167,7 +167,11 @@ public void setWorkingDirectory(Path newDir) {
|
||||
public Path getWorkingDirectory() {
|
||||
return fs.getWorkingDirectory();
|
||||
}
|
||||
|
||||
|
||||
protected Path getInitialWorkingDirectory() {
|
||||
return fs.getInitialWorkingDirectory();
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public FsStatus getStatus(Path p) throws IOException {
|
||||
@ -276,4 +280,19 @@ public void setPermission(Path p, FsPermission permission
|
||||
) throws IOException {
|
||||
fs.setPermission(p, permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FSDataOutputStream primitiveCreate(Path f,
|
||||
FsPermission absolutePermission, EnumSet<CreateFlag> flag,
|
||||
int bufferSize, short replication, long blockSize, Progressable progress, int bytesPerChecksum)
|
||||
throws IOException {
|
||||
return fs.primitiveCreate(f, absolutePermission, flag,
|
||||
bufferSize, replication, blockSize, progress, bytesPerChecksum);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean primitiveMkdir(Path f, FsPermission abdolutePermission)
|
||||
throws IOException {
|
||||
return fs.primitiveMkdir(f, abdolutePermission);
|
||||
}
|
||||
}
|
||||
|
113
src/java/org/apache/hadoop/fs/FsConfig.java
Normal file
113
src/java/org/apache/hadoop/fs/FsConfig.java
Normal file
@ -0,0 +1,113 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* This class is thin layer to manage the FS related keys in
|
||||
* a configuration object.
|
||||
* It provides convenience static method to set and get the keys from a
|
||||
* a configuration.
|
||||
*
|
||||
*/
|
||||
|
||||
final class FsConfig {
|
||||
private FsConfig() {}
|
||||
|
||||
// Configuration keys and default values in the config file
|
||||
// TBD note we should deprecate the keys constants elsewhere
|
||||
|
||||
|
||||
// The Keys
|
||||
static final String FS_DEFAULT_NAME_KEY = "fs.default.name";
|
||||
static final String FS_HOME_DIR_ROOT_KEY = "fs.homeDir";
|
||||
static final String FS_REPLICATION_FACTOR_KEY = "dfs.replication";
|
||||
static final String FS_BLOCK_SIZE_KEY = "dfs.block.size";
|
||||
static final String IO_BUFFER_SIZE_KEY ="io.file.buffer.size";
|
||||
|
||||
|
||||
// The default values
|
||||
// Default values of SERVER_DEFAULT(-1) implies use the ones from
|
||||
// the target file system where files are created.
|
||||
static final String FS_DEFAULT_NAME = "file:///";
|
||||
static final String FS_HOME_DIR_ROOT = "/user"; // relative to FS_DEFAULT
|
||||
static final short FS_DEFAULT_REPLICATION_FACTOR = 3;
|
||||
static final long FS_DEFAULT_BLOCK_SIZE = 32 * 1024 * 1024;
|
||||
static final int IO_BUFFER_SIZE =4096;
|
||||
|
||||
|
||||
|
||||
public static String getDefaultFsURI(final Configuration conf) {
|
||||
return conf.get(FS_DEFAULT_NAME_KEY, FS_DEFAULT_NAME);
|
||||
}
|
||||
|
||||
public static String getHomeDir(final Configuration conf) {
|
||||
return conf.get(FS_HOME_DIR_ROOT_KEY, FS_HOME_DIR_ROOT);
|
||||
}
|
||||
|
||||
public static short getDefaultReplicationFactor(final Configuration conf) {
|
||||
return (short)
|
||||
conf.getInt(FS_REPLICATION_FACTOR_KEY, FS_DEFAULT_REPLICATION_FACTOR);
|
||||
}
|
||||
|
||||
public static long getDefaultBlockSize(final Configuration conf) {
|
||||
return conf.getLong(FS_BLOCK_SIZE_KEY, FS_DEFAULT_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
|
||||
public static int getDefaultIOBuffersize(final Configuration conf) {
|
||||
return conf.getInt(IO_BUFFER_SIZE_KEY, IO_BUFFER_SIZE);
|
||||
}
|
||||
|
||||
public static Class<?> getImplClass(URI uri, Configuration conf) {
|
||||
String scheme = uri.getScheme();
|
||||
if (scheme == null) {
|
||||
throw new IllegalArgumentException("No scheme");
|
||||
}
|
||||
return conf.getClass("fs." + uri.getScheme() + ".impl", null);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The Setters: see the note on the javdoc for the class above.
|
||||
*/
|
||||
|
||||
public static void setDefaultFS(final Configuration conf, String uri) {
|
||||
conf.set(FS_DEFAULT_NAME_KEY, uri);
|
||||
}
|
||||
|
||||
public static void setHomeDir(final Configuration conf, String path) {
|
||||
conf.set(FS_HOME_DIR_ROOT_KEY, path);
|
||||
}
|
||||
|
||||
public static void setDefaultReplicationFactor(final Configuration conf,
|
||||
short rf) {
|
||||
conf.setInt(FS_REPLICATION_FACTOR_KEY, rf);
|
||||
}
|
||||
|
||||
public static void setDefaultBlockSize(final Configuration conf, long bs) {
|
||||
conf.setLong(FS_BLOCK_SIZE_KEY, bs);
|
||||
}
|
||||
|
||||
public static void setDefaultIOBuffersize(final Configuration conf, int bs) {
|
||||
conf.setInt(IO_BUFFER_SIZE_KEY, bs);
|
||||
}
|
||||
}
|
@ -126,6 +126,13 @@ public Path(String pathString) {
|
||||
initialize(scheme, authority, path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a path from a URI
|
||||
*/
|
||||
public Path(URI aUri) {
|
||||
uri = aUri;
|
||||
}
|
||||
|
||||
/** Construct a Path from components. */
|
||||
public Path(String scheme, String authority, String path) {
|
||||
checkPathArg( path );
|
||||
@ -175,10 +182,23 @@ public FileSystem getFileSystem(Configuration conf) throws IOException {
|
||||
return FileSystem.get(this.toUri(), conf);
|
||||
}
|
||||
|
||||
/** True if the directory of this path is absolute. */
|
||||
public boolean isAbsolute() {
|
||||
/**
|
||||
* True if the path component (i.e. directory) of this URI is absolute.
|
||||
*/
|
||||
public boolean isUriPathAbsolute() {
|
||||
int start = hasWindowsDrive(uri.getPath(), true) ? 3 : 0;
|
||||
return uri.getPath().startsWith(SEPARATOR, start);
|
||||
}
|
||||
|
||||
/** True if the directory of this path is absolute. */
|
||||
/**
|
||||
* There is some ambiguity here. An absolute path is a slash
|
||||
* relative name without a scheme or an authority.
|
||||
* So either this method was incorrectly named or its
|
||||
* implementation is incorrect.
|
||||
*/
|
||||
public boolean isAbsolute() {
|
||||
return isUriPathAbsolute();
|
||||
}
|
||||
|
||||
/** Returns the final component of this path.*/
|
||||
@ -265,29 +285,41 @@ public int depth() {
|
||||
return depth;
|
||||
}
|
||||
|
||||
/** Returns a qualified path object. */
|
||||
|
||||
/**
|
||||
* Returns a qualified path object.
|
||||
*
|
||||
* Deprecated - use {@link #makeQualified(URI, Path)}
|
||||
*/
|
||||
|
||||
@Deprecated
|
||||
public Path makeQualified(FileSystem fs) {
|
||||
return makeQualified(fs.getUri(), fs.getWorkingDirectory());
|
||||
}
|
||||
|
||||
|
||||
/** Returns a qualified path object. */
|
||||
public Path makeQualified(URI defaultUri, Path workingDir ) {
|
||||
Path path = this;
|
||||
if (!isAbsolute()) {
|
||||
path = new Path(fs.getWorkingDirectory(), this);
|
||||
path = new Path(workingDir, this);
|
||||
}
|
||||
|
||||
URI pathUri = path.toUri();
|
||||
URI fsUri = fs.getUri();
|
||||
|
||||
String scheme = pathUri.getScheme();
|
||||
String authority = pathUri.getAuthority();
|
||||
|
||||
if (scheme != null &&
|
||||
(authority != null || fsUri.getAuthority() == null))
|
||||
(authority != null || defaultUri.getAuthority() == null))
|
||||
return path;
|
||||
|
||||
if (scheme == null) {
|
||||
scheme = fsUri.getScheme();
|
||||
scheme = defaultUri.getScheme();
|
||||
}
|
||||
|
||||
if (authority == null) {
|
||||
authority = fsUri.getAuthority();
|
||||
authority = defaultUri.getAuthority();
|
||||
if (authority == null) {
|
||||
authority = "";
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ public class RawLocalFileSystem extends FileSystem {
|
||||
private Path workingDir;
|
||||
|
||||
public RawLocalFileSystem() {
|
||||
workingDir = new Path(System.getProperty("user.dir")).makeQualified(this);
|
||||
workingDir = getInitialWorkingDirectory();
|
||||
}
|
||||
|
||||
/** Convert a path to a File. */
|
||||
@ -96,10 +96,10 @@ public int read(byte[] data, int offset, int length) throws IOException {
|
||||
}
|
||||
|
||||
/*******************************************************
|
||||
* For open()'s FSInputStream
|
||||
* For open()'s FSInputStream.
|
||||
*******************************************************/
|
||||
class LocalFSFileInputStream extends FSInputStream {
|
||||
FileInputStream fis;
|
||||
private FileInputStream fis;
|
||||
private long position;
|
||||
|
||||
public LocalFSFileInputStream(Path f) throws IOException {
|
||||
@ -181,7 +181,7 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
|
||||
* For create()'s FSOutputStream.
|
||||
*********************************************************/
|
||||
class LocalFSFileOutputStream extends OutputStream implements Syncable {
|
||||
FileOutputStream fos;
|
||||
private FileOutputStream fos;
|
||||
|
||||
private LocalFSFileOutputStream(Path f, boolean append) throws IOException {
|
||||
this.fos = new FileOutputStream(pathToFile(f), append);
|
||||
@ -229,7 +229,7 @@ public FSDataOutputStream append(Path f, int bufferSize,
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
|
||||
short replication, long blockSize, Progressable progress)
|
||||
short replication, long blockSize, Progressable progress)
|
||||
throws IOException {
|
||||
if (exists(f) && !overwrite) {
|
||||
throw new IOException("File already exists:"+f);
|
||||
@ -245,23 +245,38 @@ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public FSDataOutputStream create(Path f, FsPermission permission,
|
||||
EnumSet<CreateFlag> flag, int bufferSize, short replication, long blockSize,
|
||||
Progressable progress) throws IOException {
|
||||
EnumSet<CreateFlag> flag, int bufferSize, short replication, long blockSize,
|
||||
Progressable progress) throws IOException {
|
||||
return primitiveCreate(f,
|
||||
permission.applyUMask(FsPermission.getUMask(getConf())), flag,
|
||||
bufferSize, replication, blockSize, progress, -1);
|
||||
|
||||
if(flag.contains(CreateFlag.APPEND)){
|
||||
if (!exists(f)){
|
||||
if(flag.contains(CreateFlag.CREATE))
|
||||
return create(f, false, bufferSize, replication, blockSize, progress);
|
||||
}
|
||||
return append(f, bufferSize, progress);
|
||||
}
|
||||
|
||||
FSDataOutputStream out = create(f,
|
||||
flag.contains(CreateFlag.OVERWRITE), bufferSize, replication, blockSize, progress);
|
||||
setPermission(f, permission);
|
||||
return out;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected FSDataOutputStream primitiveCreate(Path f,
|
||||
FsPermission absolutePermission, EnumSet<CreateFlag> flag,
|
||||
int bufferSize, short replication, long blockSize, Progressable progress,
|
||||
int bytesPerChecksum) throws IOException {
|
||||
|
||||
if(flag.contains(CreateFlag.APPEND)){
|
||||
if (!exists(f)){
|
||||
if(flag.contains(CreateFlag.CREATE)) {
|
||||
return create(f, false, bufferSize, replication, blockSize, null);
|
||||
}
|
||||
}
|
||||
return append(f, bufferSize, null);
|
||||
}
|
||||
|
||||
FSDataOutputStream out = create(f, flag.contains(CreateFlag.OVERWRITE),
|
||||
bufferSize, replication, blockSize, progress);
|
||||
setPermission(f, absolutePermission);
|
||||
return out;
|
||||
}
|
||||
|
||||
public boolean rename(Path src, Path dst) throws IOException {
|
||||
if (pathToFile(src).renameTo(pathToFile(dst))) {
|
||||
return true;
|
||||
@ -289,7 +304,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
|
||||
}
|
||||
if (localf.isFile()) {
|
||||
return new FileStatus[] {
|
||||
new RawLocalFileStatus(localf, getDefaultBlockSize(), this) };
|
||||
new RawLocalFileStatus(localf, getDefaultBlockSize(), this) };
|
||||
}
|
||||
|
||||
String[] names = localf.list();
|
||||
@ -328,14 +343,25 @@ public boolean mkdirs(Path f) throws IOException {
|
||||
@Override
|
||||
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
|
||||
boolean b = mkdirs(f);
|
||||
if(b)
|
||||
if(b) {
|
||||
setPermission(f, permission);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean primitiveMkdir(Path f, FsPermission absolutePermission)
|
||||
throws IOException {
|
||||
boolean b = mkdirs(f);
|
||||
setPermission(f, absolutePermission);
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Path getHomeDirectory() {
|
||||
return new Path(System.getProperty("user.home")).makeQualified(this);
|
||||
return this.makeQualified(new Path(System.getProperty("user.home")));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -350,6 +376,11 @@ public void setWorkingDirectory(Path newDir) {
|
||||
public Path getWorkingDirectory() {
|
||||
return workingDir;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Path getInitialWorkingDirectory() {
|
||||
return this.makeQualified(new Path(System.getProperty("user.dir")));
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
@ -391,7 +422,7 @@ public FileStatus getFileStatus(Path f) throws IOException {
|
||||
if (path.exists()) {
|
||||
return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
|
||||
} else {
|
||||
throw new FileNotFoundException( "File " + f + " does not exist.");
|
||||
throw new FileNotFoundException("File " + f + " does not exist.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -406,7 +437,7 @@ private boolean isPermissionLoaded() {
|
||||
|
||||
RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
|
||||
super(f.length(), f.isDirectory(), 1, defaultBlockSize,
|
||||
f.lastModified(), new Path(f.getPath()).makeQualified(fs));
|
||||
f.lastModified(), fs.makeQualified(new Path(f.getPath())));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -482,8 +513,8 @@ public void write(DataOutput out) throws IOException {
|
||||
* Use the command chown to set owner.
|
||||
*/
|
||||
@Override
|
||||
public void setOwner(Path p, String username, String groupname
|
||||
) throws IOException {
|
||||
public void setOwner(Path p, String username, String groupname)
|
||||
throws IOException {
|
||||
if (username == null && groupname == null) {
|
||||
throw new IOException("username == null && groupname == null");
|
||||
}
|
||||
@ -501,8 +532,8 @@ public void setOwner(Path p, String username, String groupname
|
||||
* Use the command chmod to set permission.
|
||||
*/
|
||||
@Override
|
||||
public void setPermission(Path p, FsPermission permission
|
||||
) throws IOException {
|
||||
public void setPermission(Path p, FsPermission permission)
|
||||
throws IOException {
|
||||
execCommand(pathToFile(p), Shell.SET_PERMISSION_COMMAND,
|
||||
String.format("%05o", permission.toShort()));
|
||||
}
|
||||
@ -514,4 +545,5 @@ private static String execCommand(File f, String... cmd) throws IOException {
|
||||
String output = Shell.execCommand(args);
|
||||
return output;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,572 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Random;
|
||||
|
||||
import org.junit.*;
|
||||
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.FileContext.CreateOpts;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A collection of tests for the {@link FileContext}.
|
||||
* This test should be used for testing an instance of FileContext
|
||||
* that has been initialized to a specific default FileSystem such a
|
||||
* LocalFileSystem, HDFS,S3, etc.
|
||||
* </p>
|
||||
* <p>
|
||||
* To test a given {@link FileSystem} implementation create a subclass of this
|
||||
* test and override {@link #setUp()} to initialize the <code>fc</code>
|
||||
* {@link FileContext} instance variable.
|
||||
*
|
||||
* Since this a junit 4 you can also do a single setup before
|
||||
* the start of any tests.
|
||||
* E.g.
|
||||
* @BeforeClass public static void clusterSetupAtBegining()
|
||||
* @AfterClass public static void ClusterShutdownAtEnd()
|
||||
* </p>
|
||||
*/
|
||||
public abstract class FileContextMainOperationsBaseTest {
|
||||
|
||||
|
||||
private static String TEST_ROOT_DIR =
|
||||
System.getProperty("test.build.data", "/tmp");
|
||||
|
||||
protected Path getTestRootPath(String pathString) {
|
||||
return fc.makeQualified(new Path(TEST_ROOT_DIR, pathString));
|
||||
}
|
||||
|
||||
|
||||
protected static FileContext fc;
|
||||
private static byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
|
||||
{
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] = (byte) (i % 10);
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fc.mkdirs(getTestRootPath("test"), FileContext.DEFAULT_PERM);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
fc.delete(getTestRootPath("test"), true);
|
||||
}
|
||||
|
||||
protected static int getBlockSize() {
|
||||
return 1024;
|
||||
}
|
||||
|
||||
protected Path getDefaultWorkingDirectory() throws IOException {
|
||||
return getTestRootPath("/user/" + System.getProperty("user.name")).makeQualified(
|
||||
fc.getDefaultFileSystem().getUri(), fc.getWorkingDirectory());
|
||||
}
|
||||
|
||||
protected boolean renameSupported() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFsStatus() throws Exception {
|
||||
FsStatus fsStatus = fc.getFsStatus(null);
|
||||
Assert.assertNotNull(fsStatus);
|
||||
//used, free and capacity are non-negative longs
|
||||
Assert.assertTrue(fsStatus.getUsed() >= 0);
|
||||
Assert.assertTrue(fsStatus.getRemaining() >= 0);
|
||||
Assert.assertTrue(fsStatus.getCapacity() >= 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWorkingDirectory() throws Exception {
|
||||
|
||||
Path workDir = getDefaultWorkingDirectory();
|
||||
Assert.assertEquals(workDir, fc.getWorkingDirectory());
|
||||
|
||||
fc.setWorkingDirectory(new Path("."));
|
||||
Assert.assertEquals(workDir, fc.getWorkingDirectory());
|
||||
|
||||
fc.setWorkingDirectory(new Path(".."));
|
||||
Assert.assertEquals(workDir.getParent(), fc.getWorkingDirectory());
|
||||
|
||||
// cd using a relative path
|
||||
Path relativeDir = new Path("existingDir1");
|
||||
Path absoluteDir = new Path(workDir.getParent(),"existingDir1");
|
||||
fc.mkdirs(absoluteDir, FileContext.DEFAULT_PERM);
|
||||
fc.setWorkingDirectory(relativeDir);
|
||||
Assert.assertEquals(absoluteDir,
|
||||
fc.getWorkingDirectory());
|
||||
// cd using a absolute path
|
||||
absoluteDir = getTestRootPath("test/existingDir2");
|
||||
fc.mkdirs(absoluteDir, FileContext.DEFAULT_PERM);
|
||||
fc.setWorkingDirectory(absoluteDir);
|
||||
Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
|
||||
|
||||
// Now open a file relative to the wd we just set above.
|
||||
Path absolutePath = new Path(absoluteDir, "foo");
|
||||
fc.create(absolutePath, EnumSet.of(CreateFlag.CREATE)).close();
|
||||
fc.open(new Path("foo")).close();
|
||||
|
||||
absoluteDir = getTestRootPath("nonexistingPath");
|
||||
try {
|
||||
fc.setWorkingDirectory(absoluteDir);
|
||||
Assert.fail("cd to non existing dir should have failed");
|
||||
} catch (Exception e) {
|
||||
// Exception as expected
|
||||
}
|
||||
|
||||
// Try a URI
|
||||
absoluteDir = new Path("file:///tmp/existingDir");
|
||||
fc.mkdirs(absoluteDir, FileContext.DEFAULT_PERM);
|
||||
fc.setWorkingDirectory(absoluteDir);
|
||||
Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMkdirs() throws Exception {
|
||||
Path testDir = getTestRootPath("test/hadoop");
|
||||
Assert.assertFalse(fc.exists(testDir));
|
||||
Assert.assertFalse(fc.isFile(testDir));
|
||||
|
||||
Assert.assertTrue(fc.mkdirs(testDir, FsPermission.getDefault()));
|
||||
|
||||
Assert.assertTrue(fc.exists(testDir));
|
||||
Assert.assertFalse(fc.isFile(testDir));
|
||||
|
||||
Assert.assertTrue(fc.mkdirs(testDir, FsPermission.getDefault()));
|
||||
|
||||
Assert.assertTrue(fc.exists(testDir));
|
||||
Assert.assertFalse(fc.isFile(testDir));
|
||||
|
||||
Path parentDir = testDir.getParent();
|
||||
Assert.assertTrue(fc.exists(parentDir));
|
||||
Assert.assertFalse(fc.isFile(parentDir));
|
||||
|
||||
Path grandparentDir = parentDir.getParent();
|
||||
Assert.assertTrue(fc.exists(grandparentDir));
|
||||
Assert.assertFalse(fc.isFile(grandparentDir));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
|
||||
Path testDir = getTestRootPath("test/hadoop");
|
||||
Assert.assertFalse(fc.exists(testDir));
|
||||
Assert.assertTrue(fc.mkdirs(testDir, FsPermission.getDefault()));
|
||||
Assert.assertTrue(fc.exists(testDir));
|
||||
|
||||
createFile(getTestRootPath("test/hadoop/file"));
|
||||
|
||||
Path testSubDir = getTestRootPath("test/hadoop/file/subdir");
|
||||
try {
|
||||
fc.mkdirs(testSubDir, FsPermission.getDefault());
|
||||
Assert.fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// expected
|
||||
}
|
||||
Assert.assertFalse(fc.exists(testSubDir));
|
||||
|
||||
Path testDeepSubDir = getTestRootPath("test/hadoop/file/deep/sub/dir");
|
||||
try {
|
||||
fc.mkdirs(testDeepSubDir, FsPermission.getDefault());
|
||||
Assert.fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// expected
|
||||
}
|
||||
Assert.assertFalse(fc.exists(testDeepSubDir));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetFileStatusThrowsExceptionForNonExistentFile()
|
||||
throws Exception {
|
||||
try {
|
||||
fc.getFileStatus(getTestRootPath("test/hadoop/file"));
|
||||
Assert.fail("Should throw FileNotFoundException");
|
||||
} catch (FileNotFoundException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testListStatusThrowsExceptionForNonExistentFile()
|
||||
throws Exception {
|
||||
try {
|
||||
fc.listStatus(getTestRootPath("test/hadoop/file"));
|
||||
Assert.fail("Should throw FileNotFoundException");
|
||||
} catch (FileNotFoundException fnfe) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListStatus() throws Exception {
|
||||
Path[] testDirs = { getTestRootPath("test/hadoop/a"),
|
||||
getTestRootPath("test/hadoop/b"),
|
||||
getTestRootPath("test/hadoop/c/1"), };
|
||||
Assert.assertFalse(fc.exists(testDirs[0]));
|
||||
|
||||
for (Path path : testDirs) {
|
||||
Assert.assertTrue(fc.mkdirs(path, FsPermission.getDefault()));
|
||||
}
|
||||
|
||||
FileStatus[] paths = fc.listStatus(getTestRootPath("test"));
|
||||
Assert.assertEquals(1, paths.length);
|
||||
Assert.assertEquals(getTestRootPath("test/hadoop"), paths[0].getPath());
|
||||
|
||||
paths = fc.listStatus(getTestRootPath("test/hadoop"));
|
||||
Assert.assertEquals(3, paths.length);
|
||||
|
||||
|
||||
Assert.assertTrue(getTestRootPath("test/hadoop/a").equals(paths[0].getPath()) ||
|
||||
getTestRootPath("test/hadoop/a").equals(paths[1].getPath()) ||
|
||||
getTestRootPath("test/hadoop/a").equals(paths[2].getPath()));
|
||||
Assert.assertTrue(getTestRootPath("test/hadoop/b").equals(paths[0].getPath()) ||
|
||||
getTestRootPath("test/hadoop/b").equals(paths[1].getPath()) ||
|
||||
getTestRootPath("test/hadoop/b").equals(paths[2].getPath()));
|
||||
Assert.assertTrue(getTestRootPath("test/hadoop/c").equals(paths[0].getPath()) ||
|
||||
getTestRootPath("test/hadoop/c").equals(paths[1].getPath()) ||
|
||||
getTestRootPath("test/hadoop/c").equals(paths[2].getPath()));
|
||||
|
||||
|
||||
paths = fc.listStatus(getTestRootPath("test/hadoop/a"));
|
||||
Assert.assertEquals(0, paths.length);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteReadAndDeleteEmptyFile() throws Exception {
|
||||
writeReadAndDelete(0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteReadAndDeleteHalfABlock() throws Exception {
|
||||
writeReadAndDelete(getBlockSize() / 2);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteReadAndDeleteOneBlock() throws Exception {
|
||||
writeReadAndDelete(getBlockSize());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
|
||||
writeReadAndDelete(getBlockSize() + (getBlockSize() / 2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteReadAndDeleteTwoBlocks() throws Exception {
|
||||
writeReadAndDelete(getBlockSize() * 2);
|
||||
}
|
||||
|
||||
private void writeReadAndDelete(int len) throws IOException {
|
||||
Path path = getTestRootPath("test/hadoop/file");
|
||||
|
||||
fc.mkdirs(path.getParent(), FsPermission.getDefault());
|
||||
|
||||
FSDataOutputStream out = fc.create(path, EnumSet.of(CreateFlag.CREATE),
|
||||
CreateOpts.repFac((short) 1), CreateOpts.blockSize(getBlockSize()));
|
||||
out.write(data, 0, len);
|
||||
out.close();
|
||||
|
||||
Assert.assertTrue("Exists", fc.exists(path));
|
||||
Assert.assertEquals("Length", len, fc.getFileStatus(path).getLen());
|
||||
|
||||
FSDataInputStream in = fc.open(path);
|
||||
byte[] buf = new byte[len];
|
||||
in.readFully(0, buf);
|
||||
in.close();
|
||||
|
||||
Assert.assertEquals(len, buf.length);
|
||||
for (int i = 0; i < buf.length; i++) {
|
||||
Assert.assertEquals("Position " + i, data[i], buf[i]);
|
||||
}
|
||||
|
||||
Assert.assertTrue("Deleted", fc.delete(path, false));
|
||||
|
||||
Assert.assertFalse("No longer exists", fc.exists(path));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOverwrite() throws IOException {
|
||||
Path path = getTestRootPath("test/hadoop/file");
|
||||
|
||||
fc.mkdirs(path.getParent(), FsPermission.getDefault());
|
||||
|
||||
createFile(path);
|
||||
|
||||
Assert.assertTrue("Exists", fc.exists(path));
|
||||
Assert.assertEquals("Length", data.length, fc.getFileStatus(path).getLen());
|
||||
|
||||
try {
|
||||
fc.create(path, EnumSet.of(CreateFlag.CREATE));
|
||||
Assert.fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
FSDataOutputStream out = fc.create(path,EnumSet.of(CreateFlag.OVERWRITE));
|
||||
out.write(data, 0, data.length);
|
||||
out.close();
|
||||
|
||||
Assert.assertTrue("Exists", fc.exists(path));
|
||||
Assert.assertEquals("Length", data.length, fc.getFileStatus(path).getLen());
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteInNonExistentDirectory() throws IOException {
|
||||
Path path = getTestRootPath("test/hadoop/file");
|
||||
Assert.assertFalse("Parent doesn't exist", fc.exists(path.getParent()));
|
||||
createFile(path);
|
||||
|
||||
Assert.assertTrue("Exists", fc.exists(path));
|
||||
Assert.assertEquals("Length", data.length, fc.getFileStatus(path).getLen());
|
||||
Assert.assertTrue("Parent exists", fc.exists(path.getParent()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteNonExistentFile() throws IOException {
|
||||
Path path = getTestRootPath("test/hadoop/file");
|
||||
Assert.assertFalse("Doesn't exist", fc.exists(path));
|
||||
Assert.assertFalse("No deletion", fc.delete(path, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteRecursively() throws IOException {
|
||||
Path dir = getTestRootPath("test/hadoop");
|
||||
Path file = getTestRootPath("test/hadoop/file");
|
||||
Path subdir = getTestRootPath("test/hadoop/subdir");
|
||||
|
||||
createFile(file);
|
||||
Assert.assertTrue("Created subdir", fc.mkdirs(subdir, FsPermission.getDefault()));
|
||||
|
||||
Assert.assertTrue("File exists", fc.exists(file));
|
||||
Assert.assertTrue("Dir exists", fc.exists(dir));
|
||||
Assert.assertTrue("Subdir exists", fc.exists(subdir));
|
||||
|
||||
try {
|
||||
fc.delete(dir, false);
|
||||
Assert.fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// expected
|
||||
}
|
||||
Assert.assertTrue("File still exists", fc.exists(file));
|
||||
Assert.assertTrue("Dir still exists", fc.exists(dir));
|
||||
Assert.assertTrue("Subdir still exists", fc.exists(subdir));
|
||||
|
||||
Assert.assertTrue("Deleted", fc.delete(dir, true));
|
||||
Assert.assertFalse("File doesn't exist", fc.exists(file));
|
||||
Assert.assertFalse("Dir doesn't exist", fc.exists(dir));
|
||||
Assert.assertFalse("Subdir doesn't exist", fc.exists(subdir));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteEmptyDirectory() throws IOException {
|
||||
Path dir = getTestRootPath("test/hadoop");
|
||||
Assert.assertTrue(fc.mkdirs(dir, FsPermission.getDefault()));
|
||||
Assert.assertTrue("Dir exists", fc.exists(dir));
|
||||
Assert.assertTrue("Deleted", fc.delete(dir, false));
|
||||
Assert.assertFalse("Dir doesn't exist", fc.exists(dir));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameNonExistentPath() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
Path src = getTestRootPath("test/hadoop/NonExistingPath");
|
||||
Path dst = getTestRootPath("test/new/newpath");
|
||||
try {
|
||||
fc.rename(src, dst);
|
||||
Assert.assertTrue("rename of non existing path should have Assert.failed",
|
||||
false);
|
||||
} catch (Exception e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameFileMoveToNonExistentDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = getTestRootPath("test/NonExisting/foo");
|
||||
rename(src, dst, false, true, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameFileMoveToExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = getTestRootPath("test/Existing/newfile");
|
||||
fc.mkdirs(dst.getParent(), FsPermission.getDefault());
|
||||
rename(src, dst, true, false, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameFileAsExistingFile() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = getTestRootPath("test/existing/existingFile");
|
||||
createFile(dst);
|
||||
rename(src, dst, true, false, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameFileAsExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = getTestRootPath("test/existing/existingDir");
|
||||
fc.mkdirs(dst, FsPermission.getDefault());
|
||||
rename(src, dst, true, false, true);
|
||||
Assert.assertTrue("Destination changed",
|
||||
fc.exists(getTestRootPath("test/existing/existingDir/file")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameDirectoryMoveToNonExistentDirectory()
|
||||
throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/dir");
|
||||
fc.mkdirs(src, FsPermission.getDefault());
|
||||
Path dst = getTestRootPath("test/nonExisting/newdir");
|
||||
rename(src, dst, false, true, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameDirectoryMoveToExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/dir");
|
||||
fc.mkdirs(src, FsPermission.getDefault());
|
||||
createFile(getTestRootPath("test/hadoop/dir/file1"));
|
||||
createFile(getTestRootPath("test/hadoop/dir/subdir/file2"));
|
||||
|
||||
Path dst = getTestRootPath("test/new/newdir");
|
||||
fc.mkdirs(dst.getParent(), FsPermission.getDefault());
|
||||
rename(src, dst, true, false, true);
|
||||
|
||||
Assert.assertFalse("Nested file1 exists",
|
||||
fc.exists(getTestRootPath("test/hadoop/dir/file1")));
|
||||
Assert.assertFalse("Nested file2 exists",
|
||||
fc.exists(getTestRootPath("test/hadoop/dir/subdir/file2")));
|
||||
Assert.assertTrue("Renamed nested file1 exists",
|
||||
fc.exists(getTestRootPath("test/new/newdir/file1")));
|
||||
Assert.assertTrue("Renamed nested exists",
|
||||
fc.exists(getTestRootPath("test/new/newdir/subdir/file2")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameDirectoryAsExistingFile() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/dir");
|
||||
fc.mkdirs(src, FsPermission.getDefault());
|
||||
Path dst = getTestRootPath("test/new/newfile");
|
||||
createFile(dst);
|
||||
rename(src, dst, false, true, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameDirectoryAsExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = getTestRootPath("test/hadoop/dir");
|
||||
fc.mkdirs(src, FsPermission.getDefault());
|
||||
createFile(getTestRootPath("test/hadoop/dir/file1"));
|
||||
createFile(getTestRootPath("test/hadoop/dir/subdir/file2"));
|
||||
|
||||
Path dst = getTestRootPath("test/new/newdir");
|
||||
fc.mkdirs(dst, FsPermission.getDefault());
|
||||
rename(src, dst, true, false, true);
|
||||
Assert.assertTrue("Destination changed",
|
||||
fc.exists(getTestRootPath("test/new/newdir/dir")));
|
||||
Assert.assertFalse("Nested file1 exists",
|
||||
fc.exists(getTestRootPath("test/hadoop/dir/file1")));
|
||||
Assert.assertFalse("Nested file2 exists",
|
||||
fc.exists(getTestRootPath("test/hadoop/dir/subdir/file2")));
|
||||
Assert.assertTrue("Renamed nested file1 exists",
|
||||
fc.exists(getTestRootPath("test/new/newdir/dir/file1")));
|
||||
Assert.assertTrue("Renamed nested exists",
|
||||
fc.exists(getTestRootPath("test/new/newdir/dir/subdir/file2")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInputStreamClosedTwice() throws IOException {
|
||||
//HADOOP-4760 according to Closeable#close() closing already-closed
|
||||
//streams should have no effect.
|
||||
Path src = getTestRootPath("test/hadoop/file");
|
||||
createFile(src);
|
||||
FSDataInputStream in = fc.open(src);
|
||||
in.close();
|
||||
in.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOutputStreamClosedTwice() throws IOException {
|
||||
//HADOOP-4760 according to Closeable#close() closing already-closed
|
||||
//streams should have no effect.
|
||||
Path src = getTestRootPath("test/hadoop/file");
|
||||
FSDataOutputStream out = fc.create(src, EnumSet.of(CreateFlag.CREATE));
|
||||
out.writeChar('H'); //write some data
|
||||
out.close();
|
||||
out.close();
|
||||
}
|
||||
|
||||
protected void createFile(Path path) throws IOException {
|
||||
FSDataOutputStream out = fc.create(path, EnumSet.of(CreateFlag.CREATE));
|
||||
out.write(data, 0, data.length);
|
||||
out.close();
|
||||
}
|
||||
|
||||
private void rename(Path src, Path dst, boolean renameShouldSucceed,
|
||||
boolean srcExists, boolean dstExists) throws IOException {
|
||||
try {
|
||||
fc.rename(src, dst);
|
||||
if (!renameShouldSucceed)
|
||||
Assert.fail("rename should have thrown exception");
|
||||
} catch (Exception e) {
|
||||
if (renameShouldSucceed)
|
||||
Assert.fail("rename should have suceeded, but threw exception");
|
||||
}
|
||||
|
||||
Assert.assertEquals("Source exists", srcExists, fc.exists(src));
|
||||
Assert.assertEquals("Destination exists", dstExists, fc.exists(dst));
|
||||
}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestLocalFSFileContextMainOperations extends FileContextMainOperationsBaseTest {
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fc = FileContext.getLocalFSFileContext();
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
static Path wd = null;
|
||||
protected Path getDefaultWorkingDirectory() throws IOException {
|
||||
if (wd == null)
|
||||
wd = FileSystem.getLocal(new Configuration()).getWorkingDirectory();
|
||||
return wd;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test
|
||||
public void testRenameFileMoveToNonExistentDirectory() throws Exception {
|
||||
// ignore base class test till hadoop-6240 is fixed
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test
|
||||
public void testRenameDirectoryMoveToNonExistentDirectory()
|
||||
throws Exception {
|
||||
// ignore base class test till hadoop-6240 is fixed
|
||||
}
|
||||
@Override
|
||||
@Test
|
||||
public void testRenameDirectoryAsExistingDirectory() throws Exception {
|
||||
// ignore base class test till hadoop-6240 is fixed
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user