From 5af572b6443715b7a741296c1bd520a1840f9a7c Mon Sep 17 00:00:00 2001 From: Mingliang Liu Date: Wed, 9 Nov 2016 14:48:56 -0800 Subject: [PATCH] HADOOP-13427. Eliminate needless uses of FileSystem#{exists(), isFile(), isDirectory()}. Contributed by Steve Loughran and Mingliang Liu --- .../crypto/key/JavaKeyStoreProvider.java | 31 +++++++++---------- .../java/org/apache/hadoop/fs/FileUtil.java | 19 ++++++++---- .../apache/hadoop/fs/RawLocalFileSystem.java | 12 +++---- .../apache/hadoop/fs/TrashPolicyDefault.java | 5 ++- .../hadoop/util/GenericOptionsParser.java | 17 +++++----- .../hadoop/hdfs/DistributedFileSystem.java | 3 +- .../apache/hadoop/hdfs/client/HdfsAdmin.java | 9 ++++-- .../jobhistory/JobHistoryEventHandler.java | 8 +---- .../hadoop/mapreduce/JobResourceUploader.java | 14 ++++----- .../hadoop/mapreduce/JobSubmissionFiles.java | 5 +-- .../lib/output/FileOutputCommitter.java | 12 +++---- .../output/PartialFileOutputCommitter.java | 2 +- .../mapreduce/lib/partition/InputSampler.java | 6 ++-- .../TestPreemptableFileOutputCommitter.java | 2 +- ...toryServerFileSystemStateStoreService.java | 2 +- .../examples/terasort/TeraOutputFormat.java | 4 ++- .../hadoop/tools/HadoopArchiveLogs.java | 4 +-- .../apache/hadoop/tools/HadoopArchives.java | 17 +++------- .../org/apache/hadoop/fs/azure/WasbFsck.java | 8 +++-- .../org/apache/hadoop/tools/DistCpSync.java | 4 +-- .../hadoop/tools/SimpleCopyListing.java | 16 ++++++---- .../hadoop/tools/mapred/CopyCommitter.java | 4 +-- .../hadoop/tools/mapred/CopyMapper.java | 5 +-- .../mapred/RetriableFileCopyCommand.java | 2 +- .../apache/hadoop/tools/util/DistCpUtils.java | 4 +-- .../org/apache/hadoop/tools/DistTool.java | 10 +++--- .../hadoop/fs/swift/util/SwiftTestUtils.java | 9 ++++-- .../hadoop/tools/rumen/state/StatePool.java | 25 +++++++-------- .../FileSystemBasedConfigurationProvider.java | 4 +-- .../api/impl/FileSystemTimelineWriter.java | 17 ++++------ .../nodelabels/FileSystemNodeLabelsStore.java | 26 ++++++++++------ .../NonAppendableFSNodeLabelStore.java | 8 ++--- .../TestFileSystemNodeLabelsStore.java | 4 +-- .../FileSystemApplicationHistoryStore.java | 16 +++++----- ...TestFileSystemApplicationHistoryStore.java | 7 +++-- .../sharedcache/SharedCacheUploader.java | 6 ++-- .../store/InMemorySCMStore.java | 8 +++-- 37 files changed, 171 insertions(+), 184 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java index 1827c272dc..5beda0d2d2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java @@ -36,6 +36,7 @@ import javax.crypto.spec.SecretKeySpec; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; @@ -167,9 +168,9 @@ private void locateKeystore() throws IOException { // rewrite the keystore in flush() permissions = perm; } catch (KeyStoreException e) { - throw new IOException("Can't create keystore", e); + throw new IOException("Can't create keystore: " + e, e); } catch (GeneralSecurityException e) { - throw new IOException("Can't load keystore " + path, e); + throw new IOException("Can't load keystore " + path + " : " + e , e); } } @@ -190,9 +191,7 @@ private FsPermission tryLoadFromPath(Path path, Path backupPath) try { perm = loadFromPath(path, password); // Remove _OLD if exists - if (fs.exists(backupPath)) { - fs.delete(backupPath, true); - } + fs.delete(backupPath, true); LOG.debug("KeyStore loaded successfully !!"); } catch (IOException ioe) { // If file is corrupted for some reason other than @@ -260,9 +259,7 @@ private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete) LOG.debug(String.format("KeyStore loaded successfully from '%s'!!", pathToLoad)); } - if (fs.exists(pathToDelete)) { - fs.delete(pathToDelete, true); - } + fs.delete(pathToDelete, true); } catch (IOException e) { // Check for password issue : don't want to trash file due // to wrong password @@ -539,13 +536,15 @@ public void flush() throws IOException { return; } // Might exist if a backup has been restored etc. - if (fs.exists(newPath)) { + try { renameOrFail(newPath, new Path(newPath.toString() + "_ORPHANED_" + System.currentTimeMillis())); + } catch (FileNotFoundException ignored) { } - if (fs.exists(oldPath)) { + try { renameOrFail(oldPath, new Path(oldPath.toString() + "_ORPHANED_" + System.currentTimeMillis())); + } catch (FileNotFoundException ignored) { } // put all of the updates into the keystore for(Map.Entry entry: cache.entrySet()) { @@ -601,9 +600,7 @@ private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException { // Rename _NEW to CURRENT renameOrFail(newPath, path); // Delete _OLD - if (fs.exists(oldPath)) { - fs.delete(oldPath, true); - } + fs.delete(oldPath, true); } protected void writeToNew(Path newPath) throws IOException { @@ -623,12 +620,12 @@ protected void writeToNew(Path newPath) throws IOException { protected boolean backupToOld(Path oldPath) throws IOException { - boolean fileExisted = false; - if (fs.exists(path)) { + try { renameOrFail(path, oldPath); - fileExisted = true; + return true; + } catch (FileNotFoundException e) { + return false; } - return fileExisted; } private void revertFromOld(Path oldPath, boolean fileExisted) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index ea6249e47f..bb70cbd26e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -22,6 +22,7 @@ import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; @@ -326,14 +327,15 @@ public static boolean copy(FileSystem srcFS, Path[] srcs, return copy(srcFS, srcs[0], dstFS, dst, deleteSource, overwrite, conf); // Check if dest is directory - if (!dstFS.exists(dst)) { - throw new IOException("`" + dst +"': specified destination directory " + - "does not exist"); - } else { + try { FileStatus sdst = dstFS.getFileStatus(dst); if (!sdst.isDirectory()) throw new IOException("copying multiple files, but last argument `" + dst + "' is not a directory"); + } catch (FileNotFoundException e) { + throw new IOException( + "`" + dst + "': specified destination directory " + + "does not exist", e); } for (Path src : srcs) { @@ -481,8 +483,13 @@ private static boolean copy(FileSystem srcFS, FileStatus srcStatus, private static Path checkDest(String srcName, FileSystem dstFS, Path dst, boolean overwrite) throws IOException { - if (dstFS.exists(dst)) { - FileStatus sdst = dstFS.getFileStatus(dst); + FileStatus sdst; + try { + sdst = dstFS.getFileStatus(dst); + } catch (FileNotFoundException e) { + sdst = null; + } + if (null != sdst) { if (sdst.isDirectory()) { if (null == srcName) { throw new IOException("Target " + dst + " is a directory"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index 5e6cb05962..7bf429eed4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -208,9 +208,7 @@ public FileDescriptor getFileDescriptor() throws IOException { @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { - if (!exists(f)) { - throw new FileNotFoundException(f.toString()); - } + getFileStatus(f); return new FSDataInputStream(new BufferedFSInputStream( new LocalFSFileInputStream(f), bufferSize)); } @@ -278,9 +276,6 @@ public void write(int b) throws IOException { @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { - if (!exists(f)) { - throw new FileNotFoundException("File " + f + " not found"); - } FileStatus status = getFileStatus(f); if (status.isDirectory()) { throw new IOException("Cannot append to a diretory (=" + f + " )"); @@ -387,17 +382,18 @@ public final boolean handleEmptyDstDirectoryOnWindows(Path src, File srcFile, // platforms (notably Windows) do not provide this behavior, so the Java API // call renameTo(dstFile) fails. Delete destination and attempt rename // again. - if (this.exists(dst)) { + try { FileStatus sdst = this.getFileStatus(dst); if (sdst.isDirectory() && dstFile.list().length == 0) { if (LOG.isDebugEnabled()) { LOG.debug("Deleting empty destination and renaming " + src + " to " + - dst); + dst); } if (this.delete(dst, false) && srcFile.renameTo(dstFile)) { return true; } } + } catch (FileNotFoundException ignored) { } return false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java index 4f4c937b44..c65e16ae5f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java @@ -121,9 +121,8 @@ public boolean moveToTrash(Path path) throws IOException { if (!path.isAbsolute()) // make path absolute path = new Path(fs.getWorkingDirectory(), path); - if (!fs.exists(path)) // check that path exists - throw new FileNotFoundException(path.toString()); - + // check that path exists + fs.getFileStatus(path); String qpath = fs.makeQualified(path).toString(); Path trashRoot = fs.getTrashRoot(path); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java index d98de56587..eed910c9a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java @@ -41,6 +41,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -328,9 +329,7 @@ private void processGeneralOptions(CommandLine line) throws IOException { // check if the local file exists FileSystem localFs = FileSystem.getLocal(conf); Path p = localFs.makeQualified(new Path(fileName)); - if (!localFs.exists(p)) { - throw new FileNotFoundException("File "+fileName+" does not exist."); - } + localFs.getFileStatus(p); if(LOG.isDebugEnabled()) { LOG.debug("setting conf tokensFile: " + fileName); } @@ -437,9 +436,7 @@ private String validateFiles(String files, boolean expandWildcard) if (pathURI.getScheme() == null) { //default to the local file system //check if the file exists or not first - if (!localFs.exists(path)) { - throw new FileNotFoundException("File " + tmp + " does not exist."); - } + localFs.getFileStatus(path); if (isWildcard) { expandWildcard(finalPaths, path, localFs); } else { @@ -452,9 +449,8 @@ private String validateFiles(String files, boolean expandWildcard) // these files to the file system ResourceManager is running // on. FileSystem fs = path.getFileSystem(conf); - if (!fs.exists(path)) { - throw new FileNotFoundException("File " + tmp + " does not exist."); - } + // existence check + fs.getFileStatus(path); if (isWildcard) { expandWildcard(finalPaths, path, fs); } else { @@ -476,7 +472,8 @@ private boolean matchesCurrentDirectory(String path) { private void expandWildcard(List finalPaths, Path path, FileSystem fs) throws IOException { - if (!fs.isDirectory(path)) { + FileStatus status = fs.getFileStatus(path); + if (!status.isDirectory()) { throw new FileNotFoundException(path + " is not a directory."); } // get all the jars in the directory diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 18a29e8b09..4f97896a19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2530,8 +2530,9 @@ public Collection getTrashRoots(boolean allUsers) { } else { Path userTrash = new Path(ezTrashRoot, System.getProperty( "user.name")); - if (exists(userTrash)) { + try { ret.add(getFileStatus(userTrash)); + } catch (FileNotFoundException ignored) { } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index b12fe01b37..550806441a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -512,10 +513,10 @@ private void provisionEZTrash(Path path) throws IOException { Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX); - if (dfs.exists(trashPath)) { + try { + FileStatus trashFileStatus = dfs.getFileStatus(trashPath); String errMessage = "Will not provision new trash directory for " + "encryption zone " + ez.getPath() + ". Path already exists."; - FileStatus trashFileStatus = dfs.getFileStatus(trashPath); if (!trashFileStatus.isDirectory()) { errMessage += "\r\n" + "Warning: " + trashPath.toString() + " is not a directory"; @@ -525,7 +526,9 @@ private void provisionEZTrash(Path path) throws IOException { "Warning: the permission of " + trashPath.toString() + " is not " + TRASH_PERMISSION; } - throw new IOException(errMessage); + throw new FileAlreadyExistsException(errMessage); + } catch (FileNotFoundException ignored) { + // no trash path } // Update the permission bits diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 04033564a8..0cc605c528 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -1622,9 +1622,7 @@ private void moveToDoneNow(Path fromPath, Path toPath) throws IOException { if (stagingDirFS.exists(fromPath)) { LOG.info("Copying " + fromPath.toString() + " to " + toPath.toString()); // TODO temporarily removing the existing dst - if (doneDirFS.exists(toPath)) { - doneDirFS.delete(toPath, true); - } + doneDirFS.delete(toPath, true); boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath, false, getConfig()); @@ -1637,10 +1635,6 @@ private void moveToDoneNow(Path fromPath, Path toPath) throws IOException { } } - boolean pathExists(FileSystem fileSys, Path path) throws IOException { - return fileSys.exists(path); - } - private String getTempFileName(String srcFile) { return srcFile + "_tmp"; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java index 15dbc13fb9..4c48ff48c5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java @@ -435,9 +435,11 @@ private void copyLog4jPropertyFile(Job job, Path submitJobDir, LOG.debug("default FileSystem: " + jtFs.getUri()); FsPermission mapredSysPerms = new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION); - if (!jtFs.exists(submitJobDir)) { + try { + jtFs.getFileStatus(submitJobDir); + } catch (FileNotFoundException e) { throw new IOException("Cannot find job submission directory! " - + "It should just be created, so something wrong here."); + + "It should just be created, so something wrong here.", e); } Path fileDir = JobSubmissionFiles.getJobLog4jFile(submitJobDir); @@ -488,9 +490,7 @@ private String validateFilePath(String file, Configuration conf) if (pathURI.getScheme() == null) { // default to the local file system // check if the file exists or not first - if (!localFs.exists(path)) { - throw new FileNotFoundException("File " + file + " does not exist."); - } + localFs.getFileStatus(path); finalPath = path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()) .toString(); @@ -500,9 +500,7 @@ private String validateFilePath(String file, Configuration conf) // these files to the file system ResourceManager is running // on. FileSystem fs = path.getFileSystem(conf); - if (!fs.exists(path)) { - throw new FileNotFoundException("File " + file + " does not exist."); - } + fs.getFileStatus(path); finalPath = path.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java index 9dd45c3af6..ae914c3e53 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapreduce; +import java.io.FileNotFoundException; import java.io.IOException; import com.google.common.annotations.VisibleForTesting; @@ -130,7 +131,7 @@ public static Path getStagingDir(Cluster cluster, Configuration conf, Path stagingArea = cluster.getStagingAreaDir(); FileSystem fs = stagingArea.getFileSystem(conf); UserGroupInformation currentUser = realUser.getCurrentUser(); - if (fs.exists(stagingArea)) { + try { FileStatus fsStatus = fs.getFileStatus(stagingArea); String fileOwner = fsStatus.getOwner(); if (!(fileOwner.equals(currentUser.getShortUserName()) || fileOwner @@ -156,7 +157,7 @@ public static Path getStagingDir(Cluster cluster, Configuration conf, "to correct value " + JOB_DIR_PERMISSION); fs.setPermission(stagingArea, JOB_DIR_PERMISSION); } - } else { + } catch (FileNotFoundException e) { fs.mkdirs(stagingArea, new FsPermission(JOB_DIR_PERMISSION)); } return stagingArea; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java index d6610c4be1..9e750be2fe 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java @@ -674,10 +674,9 @@ public void recoverTask(TaskAttemptContext context) if (algorithmVersion == 1) { if (fs.exists(previousCommittedTaskPath)) { Path committedTaskPath = getCommittedTaskPath(context); - if (fs.exists(committedTaskPath)) { - if (!fs.delete(committedTaskPath, true)) { - throw new IOException("Could not delete "+committedTaskPath); - } + if (!fs.delete(committedTaskPath, true) && + fs.exists(committedTaskPath)) { + throw new IOException("Could not delete " + committedTaskPath); } //Rename can fail if the parent directory does not yet exist. Path committedParent = committedTaskPath.getParent(); @@ -693,11 +692,12 @@ public void recoverTask(TaskAttemptContext context) // essentially a no-op, but for backwards compatibility // after upgrade to the new fileOutputCommitter, // check if there are any output left in committedTaskPath - if (fs.exists(previousCommittedTaskPath)) { + try { + FileStatus from = fs.getFileStatus(previousCommittedTaskPath); LOG.info("Recovering task for upgrading scenario, moving files from " + previousCommittedTaskPath + " to " + outputPath); - FileStatus from = fs.getFileStatus(previousCommittedTaskPath); mergePaths(fs, from, outputPath); + } catch (FileNotFoundException ignored) { } LOG.info("Done recovering task " + attemptId); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java index 1d15370ea4..238a2eacd4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java @@ -97,7 +97,7 @@ public void cleanUpPartialOutputForTask(TaskAttemptContext context) for (int i = 0; i < taid.getId(); ++i) { TaskAttemptID oldId = new TaskAttemptID(tid, i); Path pTask = new Path(pCommit, oldId.toString()); - if (fs.exists(pTask) && !fs.delete(pTask, true)) { + if (!fs.delete(pTask, true) && fs.exists(pTask)) { throw new IOException("Failed to delete " + pTask); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java index cce9f37838..df4e9195ec 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java @@ -322,10 +322,8 @@ public static void writePartitionFile(Job job, Sampler sampler) Arrays.sort(samples, comparator); Path dst = new Path(TotalOrderPartitioner.getPartitionFile(conf)); FileSystem fs = dst.getFileSystem(conf); - if (fs.exists(dst)) { - fs.delete(dst, false); - } - SequenceFile.Writer writer = SequenceFile.createWriter(fs, + fs.delete(dst, false); + SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, dst, job.getMapOutputKeyClass(), NullWritable.class); NullWritable nullValue = NullWritable.get(); float stepSize = samples.length / (float) numPartitions; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java index 09ac286ef2..e989bf4c38 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java @@ -80,7 +80,7 @@ public void testPartialOutputCleanup() foc.cleanUpPartialOutputForTask(context); verify(fs).delete(eq(p0), eq(true)); verify(fs).delete(eq(p1), eq(true)); - verify(fs, never()).delete(eq(p3), eq(true)); + verify(fs, times(1)).delete(eq(p3), eq(true)); verify(fs, never()).delete(eq(p2), eq(true)); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java index 9902f5ea8f..47d6583338 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java @@ -182,7 +182,7 @@ public void storeTokenMasterKey(DelegationKey key) throws IOException { Path keyPath = new Path(tokenKeysStatePath, TOKEN_MASTER_KEY_FILE_PREFIX + key.getKeyId()); if (fs.exists(keyPath)) { - throw new IOException(keyPath + " already exists"); + throw new FileAlreadyExistsException(keyPath + " already exists"); } ByteArrayOutputStream memStream = new ByteArrayOutputStream(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java index 73c446d7e7..96580b1e75 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java @@ -18,6 +18,7 @@ package org.apache.hadoop.examples.terasort; +import java.io.FileNotFoundException; import java.io.IOException; import org.apache.commons.logging.Log; @@ -115,7 +116,7 @@ public void checkOutputSpecs(JobContext job final FileSystem fs = outDir.getFileSystem(jobConf); - if (fs.exists(outDir)) { + try { // existing output dir is considered empty iff its only content is the // partition file. // @@ -131,6 +132,7 @@ public void checkOutputSpecs(JobContext job throw new FileAlreadyExistsException("Output directory " + outDir + " already exists"); } + } catch (FileNotFoundException ignored) { } } diff --git a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java index 2e44070136..4a7aad99f8 100644 --- a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java +++ b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java @@ -178,9 +178,7 @@ public int run(String[] args) throws Exception { } finally { if (fs != null) { // Cleanup working directory - if (fs.exists(workingDir)) { - fs.delete(workingDir, true); - } + fs.delete(workingDir, true); fs.close(); } } diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index ee148503f1..c2097dc673 100644 --- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -20,7 +20,6 @@ import java.io.DataInput; import java.io.DataOutput; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; @@ -149,9 +148,7 @@ private static void checkPaths(Configuration conf, List paths) throws IOException { for (Path p : paths) { FileSystem fs = p.getFileSystem(conf); - if (!fs.exists(p)) { - throw new FileNotFoundException("Source " + p + " does not exist."); - } + fs.getFileStatus(p); } } @@ -619,9 +616,7 @@ public void configure(JobConf conf) { try { destFs = tmpOutput.getFileSystem(conf); //this was a stale copy - if (destFs.exists(tmpOutput)) { - destFs.delete(tmpOutput, false); - } + destFs.delete(tmpOutput, false); partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096), destFs.getDefaultReplication(tmpOutput), blockSize); } catch(IOException ie) { @@ -747,12 +742,8 @@ public void configure(JobConf conf) { replication = conf.getInt(HAR_REPLICATION_LABEL, 3); try { fs = masterIndex.getFileSystem(conf); - if (fs.exists(masterIndex)) { - fs.delete(masterIndex, false); - } - if (fs.exists(index)) { - fs.delete(index, false); - } + fs.delete(masterIndex, false); + fs.delete(index, false); indexStream = fs.create(index); outStream = fs.create(masterIndex); String version = VERSION + " \n"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java index d3115505b5..f512489a8a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.azure; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -139,12 +140,15 @@ private boolean recursiveCheckChildPathName(FileSystem fs, Path p) if (p == null) { return true; } - if (!fs.exists(p)) { + FileStatus status; + try { + status = fs.getFileStatus(p); + } catch (FileNotFoundException e) { System.out.println("Path " + p + " does not exist!"); return true; } - if (fs.isFile(p)) { + if (status.isFile()) { if (containsColon(p)) { System.out.println("Warning: file " + p + " has a colon in its name."); return false; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java index f1fae11bd2..bcae96a8d8 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java @@ -328,9 +328,7 @@ private void moveToTarget(DiffInfo[] diffs, Arrays.sort(diffs, DiffInfo.targetComparator); for (DiffInfo diff : diffs) { if (diff.getTarget() != null) { - if (!targetFs.exists(diff.getTarget().getParent())) { - targetFs.mkdirs(diff.getTarget().getParent()); - } + targetFs.mkdirs(diff.getTarget().getParent()); targetFs.rename(diff.getTmp(), diff.getTarget()); } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index 0002d4f56b..105e4f2fe1 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -126,7 +126,13 @@ protected void validatePaths(DistCpOptions options) Path targetPath = options.getTargetPath(); FileSystem targetFS = targetPath.getFileSystem(getConf()); - boolean targetIsFile = targetFS.isFile(targetPath); + boolean targetExists = false; + boolean targetIsFile = false; + try { + targetIsFile = targetFS.getFileStatus(targetPath).isFile(); + targetExists = true; + } catch (FileNotFoundException ignored) { + } targetPath = targetFS.makeQualified(targetPath); final boolean targetIsReservedRaw = Path.getPathWithoutSchemeAndAuthority(targetPath).toString(). @@ -147,7 +153,7 @@ protected void validatePaths(DistCpOptions options) } } - if (options.shouldAtomicCommit() && targetFS.exists(targetPath)) { + if (options.shouldAtomicCommit() && targetExists) { throw new InvalidInputException("Target path for atomic-commit already exists: " + targetPath + ". Cannot atomic-commit to pre-existing target-path."); } @@ -448,7 +454,7 @@ private Path computeSourceRootPath(FileStatus sourceStatus, && !sourceStatus.isDirectory(); if (solitaryFile) { - if (targetFS.isFile(target) || !targetPathExists) { + if (!targetPathExists || targetFS.isFile(target)) { return sourceStatus.getPath(); } else { return sourceStatus.getPath().getParent(); @@ -495,9 +501,7 @@ private Path makeQualified(Path path) throws IOException { private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { FileSystem fs = pathToListFile.getFileSystem(getConf()); - if (fs.exists(pathToListFile)) { - fs.delete(pathToListFile, false); - } + fs.delete(pathToListFile, false); return SequenceFile.createWriter(getConf(), SequenceFile.Writer.file(pathToListFile), SequenceFile.Writer.keyClass(Text.class), diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java index dd653b297d..75cefb488a 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java @@ -279,8 +279,8 @@ private void deleteMissing(Configuration conf) throws IOException { if (srcAvailable && trgtRelPath.equals(srcRelPath)) continue; // Target doesn't exist at source. Delete. - boolean result = (!targetFS.exists(trgtFileStatus.getPath()) || - targetFS.delete(trgtFileStatus.getPath(), true)); + boolean result = targetFS.delete(trgtFileStatus.getPath(), true) + || !targetFS.exists(trgtFileStatus.getPath()); if (result) { LOG.info("Deleted " + trgtFileStatus.getPath() + " - Missing at source"); deletedEntries++; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java index c6f6052d3a..e1873f17e4 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java @@ -113,8 +113,9 @@ public void setup(Context context) throws IOException, InterruptedException { DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); targetFS = targetFinalPath.getFileSystem(conf); - if (targetFS.exists(targetFinalPath) && targetFS.isFile(targetFinalPath)) { - overWrite = true; // When target is an existing file, overwrite it. + try { + overWrite = overWrite || targetFS.getFileStatus(targetFinalPath).isFile(); + } catch (FileNotFoundException ignored) { } startEpoch = System.currentTimeMillis(); diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java index 4ad530d02c..82b3b62117 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java @@ -140,7 +140,7 @@ private long doCopy(CopyListingFileStatus source, Path target, // note that for append case, it is possible that we append partial data // and then fail. In that case, for the next retry, we either reuse the // partial appended data if it is good or we overwrite the whole file - if (!toAppend && targetFS.exists(targetPath)) { + if (!toAppend) { targetFS.delete(targetPath, false); } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java index 1784c5de51..c308e6f1f9 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java @@ -356,9 +356,7 @@ public static Path sortListing(FileSystem fs, Configuration conf, Path sourceLis CopyListingFileStatus.class, conf); Path output = new Path(sourceListing.toString() + "_sorted"); - if (fs.exists(output)) { - fs.delete(output, false); - } + fs.delete(output, false); sorter.sort(sourceListing, output); return output; diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java index 2c89cb084d..cdd7caceaa 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java @@ -20,7 +20,6 @@ import java.io.BufferedReader; import java.io.DataInput; import java.io.DataOutput; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.Charset; @@ -68,11 +67,10 @@ protected static void checkSource(Configuration conf, List srcs List ioes = new ArrayList(); for(Path p : srcs) { try { - if (!p.getFileSystem(conf).exists(p)) { - ioes.add(new FileNotFoundException("Source "+p+" does not exist.")); - } + p.getFileSystem(conf).getFileStatus(p); + } catch(IOException e) { + ioes.add(e); } - catch(IOException e) {ioes.add(e);} } if (!ioes.isEmpty()) { throw new InvalidInputException(ioes); @@ -113,4 +111,4 @@ public static class DuplicationException extends IOException { public static final int ERROR_CODE = -2; DuplicationException(String message) {super(message);} } -} \ No newline at end of file +} diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java index b4dc5eb5a2..f91ba3013c 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java @@ -489,10 +489,13 @@ public static byte[] dataset(int len, int base, int modulo) { */ public static void assertPathExists(FileSystem fileSystem, String message, Path path) throws IOException { - if (!fileSystem.exists(path)) { + try { + fileSystem.getFileStatus(path); + } catch (FileNotFoundException e) { //failure, report it - fail(message + ": not found " + path + " in " + path.getParent()); - ls(fileSystem, path.getParent()); + throw (IOException)new FileNotFoundException(message + ": not found " + + path + " in " + path.getParent() + ": " + e + " -- " + + ls(fileSystem, path.getParent())).initCause(e); } } diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java index 6e6c8591e6..41eb90d3d4 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java @@ -21,6 +21,7 @@ import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; +import java.io.FileNotFoundException; import java.io.IOException; import java.text.DateFormat; import java.text.SimpleDateFormat; @@ -184,27 +185,23 @@ private void reload() throws Exception { if (reload) { // Reload persisted entries Path stateFilename = new Path(persistDirPath, COMMIT_STATE_FILENAME); - FileSystem fs = stateFilename.getFileSystem(conf); - if (fs.exists(stateFilename)) { - reloadState(stateFilename, conf); - } else { - throw new RuntimeException("No latest state persist directory found!" + if (!reloadState(stateFilename, conf)) { + throw new RuntimeException("No latest state persist directory found!" + " Disable persistence and run."); } } } - private void reloadState(Path stateFile, Configuration conf) - throws Exception { - FileSystem fs = stateFile.getFileSystem(conf); - if (fs.exists(stateFile)) { + private boolean reloadState(Path stateFile, Configuration configuration) + throws Exception { + FileSystem fs = stateFile.getFileSystem(configuration); + try (FSDataInputStream in = fs.open(stateFile)) { System.out.println("Reading state from " + stateFile.toString()); - FSDataInputStream in = fs.open(stateFile); - read(in); - in.close(); - } else { + return true; + } catch (FileNotFoundException e) { System.out.println("No state information found for " + stateFile); + return false; } } @@ -334,4 +331,4 @@ public void setStates(HashMap states) { //TODO Should we do a clone? this.pool = states; } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java index bf50cadd3d..cef03b9b05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java @@ -74,9 +74,7 @@ public synchronized void initInternal(Configuration bootstrapConf) new Path(bootstrapConf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE, YarnConfiguration.DEFAULT_FS_BASED_RM_CONF_STORE)); fs = configDir.getFileSystem(bootstrapConf); - if (!fs.exists(configDir)) { - fs.mkdirs(configDir); - } + fs.mkdirs(configDir); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java index b1284e146c..3070cc64af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.client.api.impl; import java.io.Closeable; -import java.io.FileNotFoundException; import java.io.Flushable; import java.io.IOException; import java.net.URI; @@ -114,10 +113,8 @@ public FileSystemTimelineWriter(Configuration conf, .TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR_DEFAULT)); fs = FileSystem.newInstance(activePath.toUri(), fsConf); - if (!fs.exists(activePath)) { - throw new FileNotFoundException(activePath + " does not exist"); - } - + // raise FileNotFoundException if the path is not found + fs.getFileStatus(activePath); summaryEntityTypes = new HashSet( conf.getStringCollection(YarnConfiguration .TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SUMMARY_ENTITY_TYPES)); @@ -985,9 +982,8 @@ private Path createAttemptDir(ApplicationAttemptId appAttemptId) Path appDir = createApplicationDir(appAttemptId.getApplicationId()); Path attemptDir = new Path(appDir, appAttemptId.toString()); - if (!fs.exists(attemptDir)) { - FileSystem.mkdirs(fs, attemptDir, new FsPermission( - APP_LOG_DIR_PERMISSIONS)); + if (FileSystem.mkdirs(fs, attemptDir, + new FsPermission(APP_LOG_DIR_PERMISSIONS))) { if (LOG.isDebugEnabled()) { LOG.debug("New attempt directory created - " + attemptDir); } @@ -998,9 +994,8 @@ private Path createAttemptDir(ApplicationAttemptId appAttemptId) private Path createApplicationDir(ApplicationId appId) throws IOException { Path appDir = new Path(activePath, appId.toString()); - if (!fs.exists(appDir)) { - FileSystem.mkdirs(fs, appDir, - new FsPermission(APP_LOG_DIR_PERMISSIONS)); + if (FileSystem.mkdirs(fs, appDir, + new FsPermission(APP_LOG_DIR_PERMISSIONS))) { if (LOG.isDebugEnabled()) { LOG.debug("New app directory created - " + appDir); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java index 9a2b8bec3b..0ec4ea42f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.nodelabels; import java.io.EOFException; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collection; import java.util.List; @@ -83,9 +84,7 @@ public void init(Configuration conf) throws Exception { setFileSystem(conf); // mkdir of root dir path - if (!fs.exists(fsWorkingPath)) { - fs.mkdirs(fsWorkingPath); - } + fs.mkdirs(fsWorkingPath); } @Override @@ -155,12 +154,15 @@ protected void loadFromMirror(Path newMirrorPath, Path oldMirrorPath) throws IOException { // If mirror.new exists, read from mirror.new, FSDataInputStream is = null; - if (fs.exists(newMirrorPath)) { + try { is = fs.open(newMirrorPath); - } else if (fs.exists(oldMirrorPath)) { - is = fs.open(oldMirrorPath); - } + } catch (FileNotFoundException e) { + try { + is = fs.open(oldMirrorPath); + } catch (FileNotFoundException ignored) { + } + } if (null != is) { List labels = new AddToClusterNodeLabelsRequestPBImpl( AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)) @@ -204,8 +206,13 @@ public void recover() throws YarnException, // Open and process editlog editLogPath = new Path(fsWorkingPath, EDITLOG_FILENAME); - if (fs.exists(editLogPath)) { - FSDataInputStream is = fs.open(editLogPath); + FSDataInputStream is; + try { + is = fs.open(editLogPath); + } catch (FileNotFoundException e) { + is = null; + } + if (null != is) { while (true) { try { @@ -250,6 +257,7 @@ public void recover() throws YarnException, break; } } + is.close(); } // Serialize current mirror to mirror.writing diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java index 6be571589d..989f027940 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java @@ -116,16 +116,12 @@ private void writeNewMirror() throws IOException { // Rename mirror.new.tmp to mirror.new (will remove .new if it's existed) Path newPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new"); - if (fs.exists(newPath)) { - fs.delete(newPath, false); - } + fs.delete(newPath, false); fs.rename(newTmpPath, newPath); // Remove existing mirror and rename mirror.new to mirror Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME); - if (fs.exists(mirrorPath)) { - fs.delete(mirrorPath, false); - } + fs.delete(mirrorPath, false); fs.rename(newPath, mirrorPath); } finally { readLock.unlock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java index 82e4e11785..ed2f4aa6c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java @@ -348,10 +348,10 @@ void setFileSystem(Configuration conf) throws IOException { }; mockStore.setNodeLabelsManager(mgr); mockStore.fs = mockFs; - verifyMkdirsCount(mockStore, true, 0); - verifyMkdirsCount(mockStore, false, 1); verifyMkdirsCount(mockStore, true, 1); verifyMkdirsCount(mockStore, false, 2); + verifyMkdirsCount(mockStore, true, 3); + verifyMkdirsCount(mockStore, false, 4); } private void verifyMkdirsCount(FileSystemNodeLabelsStore store, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java index bb52b5566e..be7bc6df0e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java @@ -22,6 +22,7 @@ import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -123,12 +124,7 @@ public void serviceStart() throws Exception { rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); try { fs = getFileSystem(fsWorkingPath, conf); - - if (!fs.isDirectory(rootDirPath)) { - fs.mkdirs(rootDirPath); - fs.setPermission(rootDirPath, ROOT_DIR_UMASK); - } - + fs.mkdirs(rootDirPath, ROOT_DIR_UMASK); } catch (IOException e) { LOG.error("Error when initializing FileSystemHistoryStorage", e); throw e; @@ -659,9 +655,11 @@ private HistoryFileWriter getHistoryFileWriter(ApplicationId appId) private HistoryFileReader getHistoryFileReader(ApplicationId appId) throws IOException { Path applicationHistoryFile = new Path(rootDirPath, appId.toString()); - if (!fs.exists(applicationHistoryFile)) { - throw new IOException("History file for application " + appId - + " is not found"); + try { + fs.getFileStatus(applicationHistoryFile); + } catch (FileNotFoundException e) { + throw (FileNotFoundException) new FileNotFoundException("History file for" + + " application " + appId + " is not found: " + e).initCause(e); } // The history file is still under writing if (outstandingWriters.containsKey(appId)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java index c91d9f5a6d..bd6bea3998 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java @@ -27,6 +27,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -279,8 +280,8 @@ public void testInitExistingWorkingDirectoryInSafeMode() throws Exception { } // Make sure that directory creation was not attempted - verify(fs, times(1)).isDirectory(any(Path.class)); - verify(fs, times(0)).mkdirs(any(Path.class)); + verify(fs, never()).isDirectory(any(Path.class)); + verify(fs, times(1)).mkdirs(any(Path.class)); } @Test @@ -301,7 +302,7 @@ public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception { } // Make sure that directory creation was attempted - verify(fs, times(1)).isDirectory(any(Path.class)); + verify(fs, never()).isDirectory(any(Path.class)); verify(fs, times(1)).mkdirs(any(Path.class)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java index b034e7a209..e077f8994b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java @@ -192,10 +192,12 @@ Path getActualPath() throws IOException { private void deleteTempFile(Path tempPath) { try { - if (tempPath != null && fs.exists(tempPath)) { + if (tempPath != null) { fs.delete(tempPath, false); } - } catch (IOException ignore) {} + } catch (IOException ioe) { + LOG.debug("Exception received while deleting temp files", ioe); + } } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java index 54d736f537..7b769a72e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.sharedcachemanager.store; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -189,11 +190,14 @@ Map getInitialCachedResources(FileSystem fs, conf.get(YarnConfiguration.SHARED_CACHE_ROOT, YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT); Path root = new Path(location); - if (!fs.exists(root)) { + try { + fs.getFileStatus(root); + } catch (FileNotFoundException e) { String message = "The shared cache root directory " + location + " was not found"; LOG.error(message); - throw new IOException(message); + throw (IOException)new FileNotFoundException(message) + .initCause(e); } int nestedLevel = SharedCacheUtil.getCacheDepth(conf);