HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault methods that don't take a Path argument. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1342495 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-05-25 04:10:15 +00:00
parent 39661c06f7
commit 22cb0ec82a
10 changed files with 29 additions and 19 deletions

View File

@ -173,6 +173,9 @@ Release 2.0.1-alpha - UNRELEASED
HADOOP-8398. Cleanup BlockLocation. (eli) HADOOP-8398. Cleanup BlockLocation. (eli)
HADOOP-8422. Deprecate FileSystem#getDefault* and getServerDefault
methods that don't take a Path argument. (eli)
BUG FIXES BUG FIXES
HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname

View File

@ -615,7 +615,9 @@ public BlockLocation[] getFileBlockLocations(Path p,
* Return a set of server default configuration values * Return a set of server default configuration values
* @return server default configuration values * @return server default configuration values
* @throws IOException * @throws IOException
* @deprecated use {@link #getServerDefaults(Path)} instead
*/ */
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException { public FsServerDefaults getServerDefaults() throws IOException {
Configuration conf = getConf(); Configuration conf = getConf();
return new FsServerDefaults(getDefaultBlockSize(), return new FsServerDefaults(getDefaultBlockSize(),
@ -1939,8 +1941,12 @@ public long getBlockSize(Path f) throws IOException {
return getFileStatus(f).getBlockSize(); return getFileStatus(f).getBlockSize();
} }
/** Return the number of bytes that large input files should be optimally /**
* be split into to minimize i/o time. */ * Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
* @deprecated use {@link #getDefaultBlockSize(Path)} instead
*/
@Deprecated
public long getDefaultBlockSize() { public long getDefaultBlockSize() {
// default to 32MB: large enough to minimize the impact of seeks // default to 32MB: large enough to minimize the impact of seeks
return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024); return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
@ -1958,7 +1964,9 @@ public long getDefaultBlockSize(Path f) {
/** /**
* Get the default replication. * Get the default replication.
* @deprecated use {@link #getDefaultReplication(Path)} instead
*/ */
@Deprecated
public short getDefaultReplication() { return 1; } public short getDefaultReplication() { return 1; }
/** /**

View File

@ -316,7 +316,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
} }
if (localf.isFile()) { if (localf.isFile()) {
return new FileStatus[] { return new FileStatus[] {
new RawLocalFileStatus(localf, getDefaultBlockSize(), this) }; new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
} }
String[] names = localf.list(); String[] names = localf.list();
@ -444,7 +444,7 @@ public String toString() {
public FileStatus getFileStatus(Path f) throws IOException { public FileStatus getFileStatus(Path f) throws IOException {
File path = pathToFile(f); File path = pathToFile(f);
if (path.exists()) { if (path.exists()) {
return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this); return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(f), this);
} else { } else {
throw new FileNotFoundException("File " + f + " does not exist"); throw new FileNotFoundException("File " + f + " does not exist");
} }

View File

@ -113,7 +113,7 @@ public static long createFile(FileSystem fSys, Path path, int numBlocks,
public static long createFile(FileSystem fSys, Path path, int numBlocks, public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, boolean createParent) throws IOException { int blockSize, boolean createParent) throws IOException {
return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(), true); return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(path), true);
} }
public static long createFile(FileSystem fSys, Path path, int numBlocks, public static long createFile(FileSystem fSys, Path path, int numBlocks,

View File

@ -47,11 +47,9 @@ protected void tearDown() throws Exception {
} }
public void testBlockSize() throws Exception { public void testBlockSize() throws Exception {
long newBlockSize = fs.getDefaultBlockSize() * 2;
fs.getConf().setLong("fs.s3.block.size", newBlockSize);
Path file = path("/test/hadoop/file"); Path file = path("/test/hadoop/file");
long newBlockSize = fs.getDefaultBlockSize(file) * 2;
fs.getConf().setLong("fs.s3.block.size", newBlockSize);
createFile(file); createFile(file);
assertEquals("Double default block size", newBlockSize, assertEquals("Double default block size", newBlockSize,
fs.getFileStatus(file).getBlockSize()); fs.getFileStatus(file).getBlockSize());

View File

@ -141,11 +141,11 @@ public void testEmptyFile() throws Exception {
public void testBlockSize() throws Exception { public void testBlockSize() throws Exception {
Path file = path("/test/hadoop/file"); Path file = path("/test/hadoop/file");
createFile(file); createFile(file);
assertEquals("Default block size", fs.getDefaultBlockSize(), assertEquals("Default block size", fs.getDefaultBlockSize(file),
fs.getFileStatus(file).getBlockSize()); fs.getFileStatus(file).getBlockSize());
// Block size is determined at read time // Block size is determined at read time
long newBlockSize = fs.getDefaultBlockSize() * 2; long newBlockSize = fs.getDefaultBlockSize(file) * 2;
fs.getConf().setLong("fs.s3n.block.size", newBlockSize); fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
assertEquals("Double default block size", newBlockSize, assertEquals("Double default block size", newBlockSize,
fs.getFileStatus(file).getBlockSize()); fs.getFileStatus(file).getBlockSize());

View File

@ -288,10 +288,10 @@ public FSCreate(InputStream is, String path, String perm, boolean override, shor
@Override @Override
public Void execute(FileSystem fs) throws IOException { public Void execute(FileSystem fs) throws IOException {
if (replication == -1) { if (replication == -1) {
replication = fs.getDefaultReplication(); replication = fs.getDefaultReplication(path);
} }
if (blockSize == -1) { if (blockSize == -1) {
blockSize = fs.getDefaultBlockSize(); blockSize = fs.getDefaultBlockSize(path);
} }
FsPermission fsPermission = getPermission(permission); FsPermission fsPermission = getPermission(permission);
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096); int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);

View File

@ -202,7 +202,7 @@ private void create(OpType op, Path name, short umask,
case CREATE: case CREATE:
FSDataOutputStream out = fs.create(name, permission, true, FSDataOutputStream out = fs.create(name, permission, true,
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
out.close(); out.close();
break; break;
case MKDIRS: case MKDIRS:

View File

@ -875,8 +875,8 @@ public void testMultipleFilesSmallerThanOneBlock() throws Exception {
// 6kb block // 6kb block
// 192kb quota // 192kb quota
final int FILE_SIZE = 1024; final int FILE_SIZE = 1024;
final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(); final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(dir);
assertEquals(6 * 1024, fs.getDefaultBlockSize()); assertEquals(6 * 1024, fs.getDefaultBlockSize(dir));
assertEquals(192 * 1024, QUOTA_SIZE); assertEquals(192 * 1024, QUOTA_SIZE);
// Create the dir and set the quota. We need to enable the quota before // Create the dir and set the quota. We need to enable the quota before
@ -903,7 +903,7 @@ public void testMultipleFilesSmallerThanOneBlock() throws Exception {
assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3, assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3,
c.getSpaceConsumed()); c.getSpaceConsumed());
assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3), assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3),
3 * (fs.getDefaultBlockSize() - FILE_SIZE)); 3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
// Now check that trying to create another file violates the quota // Now check that trying to create another file violates the quota
try { try {

View File

@ -123,9 +123,10 @@ public void testCreate() throws Exception {
checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm); checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm);
FsPermission filePerm = new FsPermission((short)0444); FsPermission filePerm = new FsPermission((short)0444);
FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm, Path p = new Path("/b1/b2/b3.txt");
FSDataOutputStream out = fs.create(p, filePerm,
true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); fs.getDefaultReplication(p), fs.getDefaultBlockSize(p), null);
out.write(123); out.write(123);
out.close(); out.close();
checkPermission(fs, "/b1", inheritPerm); checkPermission(fs, "/b1", inheritPerm);