diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index 2f22ea0925..8575439f8b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -172,7 +172,7 @@ public long getLen() { * @return true if this is a file */ public boolean isFile() { - return !isdir && !isSymlink(); + return !isDirectory() && !isSymlink(); } /** @@ -182,20 +182,20 @@ public boolean isFile() { public boolean isDirectory() { return isdir; } - + /** - * Old interface, instead use the explicit {@link FileStatus#isFile()}, - * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} + * Old interface, instead use the explicit {@link FileStatus#isFile()}, + * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} * @return true if this is a directory. - * @deprecated Use {@link FileStatus#isFile()}, - * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} + * @deprecated Use {@link FileStatus#isFile()}, + * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} * instead. */ @Deprecated - public boolean isDir() { - return isdir; + public final boolean isDir() { + return isDirectory(); } - + /** * Is this a symbolic link? * @return true if this is a symbolic link @@ -448,7 +448,6 @@ public void readFields(DataInput in) throws IOException { FileStatus other = PBHelper.convert(proto); isdir = other.isDirectory(); length = other.getLen(); - isdir = other.isDirectory(); block_replication = other.getReplication(); blocksize = other.getBlockSize(); modification_time = other.getModificationTime(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java index e0f62e453b..ce03ced7b8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java @@ -61,13 +61,7 @@ public boolean isFile() { public boolean isDirectory() { return myFs.isDirectory(); } - - @Override - @SuppressWarnings("deprecation") - public boolean isDir() { - return myFs.isDirectory(); - } - + @Override public boolean isSymlink() { return myFs.isSymlink(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java index 4e681a7217..c7c8b2874b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java @@ -49,12 +49,6 @@ public boolean isDirectory() { return myFs.isDirectory(); } - @Override - @SuppressWarnings("deprecation") - public boolean isDir() { - return myFs.isDirectory(); - } - @Override public boolean isSymlink() { return myFs.isSymlink(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index a0d2c5a35b..5b1a68767e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -2095,7 +2095,7 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if (fs == null) return null; FileType fType = FileType.IS_FILE; - if (fs.isDir()) { + if (fs.isDirectory()) { fType = FileType.IS_DIR; } else if (fs.isSymlink()) { fType = FileType.IS_SYMLINK; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java index cc17394197..abaa5cad64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java @@ -65,7 +65,9 @@ public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus( * client takes only the lower 32bit of the fileId and treats it as signed * int. When the 32th bit is 1, the client considers it invalid. */ - NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG; + NfsFileType fileType = fs.isDirectory() + ? NfsFileType.NFSDIR + : NfsFileType.NFSREG; fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType; int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1; long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs @@ -98,7 +100,7 @@ public static WccAttr getWccAttr(DFSClient client, String fileIdPath) return null; } - long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat + long size = fstat.isDirectory() ? getDirSize(fstat.getChildrenNum()) : fstat .getLen(); return new WccAttr(size, new NfsTime(fstat.getModificationTime()), new NfsTime(fstat.getModificationTime())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index d6bb71d0d9..7a6aa89fde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -1208,7 +1208,7 @@ REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, if (fstat == null) { return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } - if (fstat.isDir()) { + if (fstat.isDirectory()) { return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc); } @@ -1289,7 +1289,7 @@ RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, if (fstat == null) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } - if (!fstat.isDir()) { + if (!fstat.isDirectory()) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc); } @@ -1565,7 +1565,7 @@ public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READDIR3Response(Nfs3Status.NFS3ERR_STALE); } - if (!dirStatus.isDir()) { + if (!dirStatus.isDirectory()) { LOG.error("Can't readdir for regular file, fileId: " + handle.getFileId()); return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR); @@ -1732,7 +1732,7 @@ READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE); } - if (!dirStatus.isDir()) { + if (!dirStatus.isDirectory()) { LOG.error("Can't readdirplus for regular file, fileId: " + handle.getFileId()); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 1a2c889e41..b653f4fccc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -348,7 +348,7 @@ private void processPath(String fullPath, Result result) { private void processRecursively(String parent, HdfsFileStatus status, Result result) { String fullPath = status.getFullName(parent); - if (status.isDir()) { + if (status.isDirectory()) { if (!fullPath.endsWith(Path.SEPARATOR)) { fullPath = fullPath + Path.SEPARATOR; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index eddab3fa22..5872955fc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -471,7 +471,7 @@ private void listCorruptFileBlocks() throws IOException { void check(String parent, HdfsFileStatus file, Result replRes, Result ecRes) throws IOException { String path = file.getFullName(parent); - if (file.isDir()) { + if (file.isDirectory()) { checkDir(path, replRes, ecRes); return; } @@ -1115,7 +1115,7 @@ private void lostFoundInit(DFSClient dfs) { if (lfStatus == null) { // not exists lfInitedOk = dfs.mkdirs(lfName, null, true); lostFound = lfName; - } else if (!lfStatus.isDir()) { // exists but not a directory + } else if (!lfStatus.isDirectory()) { // exists but not a directory LOG.warn("Cannot use /lost+found : a regular file with this name exists."); lfInitedOk = false; } else { // exists and is a directory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 5dee6e0f08..e42e08cf77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -51,7 +51,6 @@ import org.apache.log4j.Logger; import org.junit.Test; -import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent; import static org.junit.Assert.*; /** @@ -572,7 +571,7 @@ static void recoverAllLeases(DFSClient dfs, Path path) throws IOException { String pathStr = path.toString(); HdfsFileStatus status = dfs.getFileInfo(pathStr); - if (!status.isDir()) { + if (!status.isDirectory()) { for (int retries = 10; retries > 0; retries--) { if (dfs.recoverLease(pathStr)) { return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 077997f68a..764a0dbddd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -283,7 +283,7 @@ private void verifyNamespace() throws Exception { private void verifyRecursively(final Path parent, final HdfsFileStatus status) throws Exception { - if (status.isDir()) { + if (status.isDirectory()) { Path fullPath = parent == null ? new Path("/") : status.getFullPath(parent); DirectoryListing children = dfs.getClient().listPaths( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 29a6064c70..94172bbe69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -450,7 +450,7 @@ public void testCompression() throws IOException { namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true); NamenodeProtocols nnRpc = namenode.getRpcServer(); - assertTrue(nnRpc.getFileInfo("/test").isDir()); + assertTrue(nnRpc.getFileInfo("/test").isDirectory()); nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); nnRpc.saveNamespace(0, 0); namenode.stop(); @@ -481,7 +481,7 @@ public void testCompression() throws IOException { private void checkNameSpace(Configuration conf) throws IOException { NameNode namenode = new NameNode(conf); NamenodeProtocols nnRpc = namenode.getRpcServer(); - assertTrue(nnRpc.getFileInfo("/test").isDir()); + assertTrue(nnRpc.getFileInfo("/test").isDirectory()); nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); nnRpc.saveNamespace(0, 0); namenode.stop(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 5a2aff90a7..b3bb3dda20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -124,7 +124,7 @@ public void testTailer() throws IOException, InterruptedException, for (int i = 0; i < DIRS_TO_MAKE / 2; i++) { assertTrue(NameNodeAdapter.getFileInfo(nn2, - getDirPath(i), false).isDir()); + getDirPath(i), false).isDirectory()); } for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) { @@ -137,7 +137,7 @@ public void testTailer() throws IOException, InterruptedException, for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) { assertTrue(NameNodeAdapter.getFileInfo(nn2, - getDirPath(i), false).isDir()); + getDirPath(i), false).isDirectory()); } } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java index 38c2b2da4a..93c717c3eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java @@ -205,7 +205,7 @@ public void testFailuretoReadEdits() throws Exception { TEST_DIR1, false)); // Should have been successfully created. assertTrue(NameNodeAdapter.getFileInfo(nn1, - TEST_DIR2, false).isDir()); + TEST_DIR2, false).isDirectory()); // Null because it hasn't been created yet. assertNull(NameNodeAdapter.getFileInfo(nn1, TEST_DIR3, false)); @@ -219,10 +219,10 @@ public void testFailuretoReadEdits() throws Exception { TEST_DIR1, false)); // Should have been successfully created. assertTrue(NameNodeAdapter.getFileInfo(nn1, - TEST_DIR2, false).isDir()); + TEST_DIR2, false).isDirectory()); // Should now have been successfully created. assertTrue(NameNodeAdapter.getFileInfo(nn1, - TEST_DIR3, false).isDir()); + TEST_DIR3, false).isDirectory()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index 856ed8fbc8..8eeb853c77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -128,7 +128,7 @@ private void assertCanStartHaNameNodes(String pathSuffix) HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1)); assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), - newPath.toString(), false).isDir()); + newPath.toString(), false).isDirectory()); } finally { if (fs != null) { fs.close(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java index 1fca5c982b..cc97a14d3b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java @@ -154,7 +154,7 @@ public void initialize(URI name, Configuration conf) throws IOException { @Override public BlockLocation[] getFileBlockLocations( FileStatus stat, long start, long len) throws IOException { - if (stat.isDir()) { + if (stat.isDirectory()) { return null; } System.out.println("File " + stat.getPath()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java index 9d5d6a22c3..544d6ab4fc 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java @@ -73,20 +73,20 @@ public void testImplicitFolderListed() throws Exception { FileStatus[] obtained = fs.listStatus(new Path("/root/b")); assertNotNull(obtained); assertEquals(1, obtained.length); - assertFalse(obtained[0].isDir()); + assertFalse(obtained[0].isDirectory()); assertEquals("/root/b", obtained[0].getPath().toUri().getPath()); // List the directory obtained = fs.listStatus(new Path("/root")); assertNotNull(obtained); assertEquals(1, obtained.length); - assertFalse(obtained[0].isDir()); + assertFalse(obtained[0].isDirectory()); assertEquals("/root/b", obtained[0].getPath().toUri().getPath()); // Get the directory's file status FileStatus dirStatus = fs.getFileStatus(new Path("/root")); assertNotNull(dirStatus); - assertTrue(dirStatus.isDir()); + assertTrue(dirStatus.isDirectory()); assertEquals("/root", dirStatus.getPath().toUri().getPath()); } @@ -114,7 +114,7 @@ public void testFileAndImplicitFolderSameName() throws Exception { FileStatus[] listResult = fs.listStatus(new Path("/root/b")); // File should win. assertEquals(1, listResult.length); - assertFalse(listResult[0].isDir()); + assertFalse(listResult[0].isDirectory()); try { // Trying to delete root/b/c would cause a dilemma for WASB, so // it should throw. diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java index d010d08cdc..725cae1e3b 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java @@ -71,7 +71,7 @@ public SwiftFileStatus(long length, * @return true if the status is considered to be a file */ @Override - public boolean isDir() { + public boolean isDirectory() { return super.isDirectory() || getLen() == 0; } @@ -79,19 +79,11 @@ public boolean isDir() { * A entry is a file if it is not a directory. * By implementing it and not marking as an override this * subclass builds and runs in both Hadoop versions. - * @return the opposite value to {@link #isDir()} + * @return the opposite value to {@link #isDirectory()} */ @Override public boolean isFile() { - return !isDir(); - } - - /** - * Directory test - * @return true if the file is considered to be a directory - */ - public boolean isDirectory() { - return isDir(); + return !this.isDirectory(); } @Override @@ -100,7 +92,7 @@ public String toString() { sb.append(getClass().getSimpleName()); sb.append("{ "); sb.append("path=").append(getPath()); - sb.append("; isDirectory=").append(isDir()); + sb.append("; isDirectory=").append(isDirectory()); sb.append("; length=").append(getLen()); sb.append("; blocksize=").append(getBlockSize()); sb.append("; modification_time=").append(getModificationTime()); diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java index f2ecb0f1da..a44051ac56 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java @@ -578,7 +578,7 @@ public void rename(Path src, Path dst) //enum the child entries and everything underneath List childStats = listDirectory(srcObject, true, true); - boolean srcIsFile = !srcMetadata.isDir(); + boolean srcIsFile = !srcMetadata.isDirectory(); if (srcIsFile) { //source is a simple file OR a partitioned file @@ -945,7 +945,7 @@ public boolean delete(Path absolutePath, boolean recursive) throws IOException { //>1 entry implies directory with children. Run through them, // but first check for the recursive flag and reject it *unless it looks // like a partitioned file (len > 0 && has children) - if (!fileStatus.isDir()) { + if (!fileStatus.isDirectory()) { LOG.debug("Multiple child entries but entry has data: assume partitioned"); } else if (!recursive) { //if there are children, unless this is a recursive operation, fail immediately diff --git a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java index 21fe918e7f..9b4ba5e8c6 100644 --- a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java +++ b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java @@ -87,7 +87,7 @@ public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception { assertEquals("Wrong number of elements in file status " + statusString, 1, statuses.length); SwiftFileStatus stat = (SwiftFileStatus) statuses[0]; - assertTrue("isDir(): Not a directory: " + stat, stat.isDir()); + assertTrue("isDir(): Not a directory: " + stat, stat.isDirectory()); extraStatusAssertions(stat); } @@ -135,7 +135,7 @@ public void testMultiByteFilesAreFiles() throws Exception { SwiftTestUtils.writeTextFile(fs, src, "testMultiByteFilesAreFiles", false); assertIsFile(src); FileStatus status = fs.getFileStatus(src); - assertFalse(status.isDir()); + assertFalse(status.isDirectory()); } } diff --git a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java index b42abcddf5..419d0303a0 100644 --- a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java +++ b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java @@ -228,7 +228,7 @@ private FileStatus validatePathLen(Path path, int len) throws IOException { status.getLen()); String fileInfo = qualifiedPath + " " + status; assertFalse("File claims to be a directory " + fileInfo, - status.isDir()); + status.isDirectory()); FileStatus listedFileStat = resolveChild(parentDirListing, qualifiedPath); assertNotNull("Did not find " + path + " in " + parentDirLS,