From 6aac6cb212d3a96884c937448fd02e147ba80ef1 Mon Sep 17 00:00:00 2001 From: Sebastian Baunsgaard Date: Tue, 25 Apr 2023 18:14:04 +0200 Subject: [PATCH] HADOOP-18660. Filesystem Spelling Mistake (#5475). Contributed by Sebastian Baunsgaard. Signed-off-by: Ayush Saxena --- .../src/main/java/org/apache/hadoop/fs/FileSystem.java | 4 ++-- .../src/site/markdown/filesystem/pathcapabilities.md | 4 ++-- .../org/apache/hadoop/fs/http/server/HttpFSServer.java | 8 ++++---- .../hadoop/lib/servlet/FileSystemReleaseFilter.java | 4 ++-- .../src/site/markdown/manifest_committer.md | 4 ++-- .../apache/hadoop/mapred/uploader/FrameworkUploader.java | 4 ++-- .../java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java | 2 +- .../org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java | 2 +- .../site/markdown/tools/hadoop-aws/delegation_tokens.md | 4 ++-- .../site/markdown/tools/hadoop-aws/troubleshooting_s3a.md | 4 ++-- ...ileystem.java => ITestRoleDelegationInFilesystem.java} | 4 ++-- ...ystem.java => ITestSessionDelegationInFilesystem.java} | 6 +++--- 12 files changed, 25 insertions(+), 25 deletions(-) rename hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/{ITestRoleDelegationInFileystem.java => ITestRoleDelegationInFilesystem.java} (95%) rename hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/{ITestSessionDelegationInFileystem.java => ITestSessionDelegationInFilesystem.java} (99%) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 5d8f0e575f..52425211dc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -3602,9 +3602,9 @@ private static FileSystem createFileSystem(URI uri, Configuration conf) } catch (IOException | RuntimeException e) { // exception raised during initialization. // log summary at warn and full stack at debug - LOGGER.warn("Failed to initialize fileystem {}: {}", + LOGGER.warn("Failed to initialize filesystem {}: {}", uri, e.toString()); - LOGGER.debug("Failed to initialize fileystem", e); + LOGGER.debug("Failed to initialize filesystem", e); // then (robustly) close the FS, so as to invoke any // cleanup code. IOUtils.cleanupWithLogger(LOGGER, fs); diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/pathcapabilities.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/pathcapabilities.md index e053bfbaed..e00efed69e 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/pathcapabilities.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/pathcapabilities.md @@ -31,7 +31,7 @@ There are a number of goals here: having to invoke them. 1. Allow filesystems with their own optional per-instance features to declare whether or not they are active for the specific instance. -1. Allow for fileystem connectors which work with object stores to expose the +1. Allow for filesystem connectors which work with object stores to expose the fundamental difference in semantics of these stores (e.g: files not visible until closed, file rename being `O(data)`), directory rename being non-atomic, etc. @@ -122,7 +122,7 @@ will be permitted on that path by the caller. *Duration of availability* As the state of a remote store changes,so may path capabilities. This -may be due to changes in the local state of the fileystem (e.g. symbolic links +may be due to changes in the local state of the filesystem (e.g. symbolic links or mount points changing), or changes in its functionality (e.g. a feature becoming availaible/unavailable due to operational changes, system upgrades, etc.) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index b50d24900a..7d1646af21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -150,7 +150,7 @@ private UserGroupInformation getHttpUGI(HttpServletRequest request) { * @return FileSystemExecutor response * * @throws IOException thrown if an IO error occurs. - * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * @throws FileSystemAccessException thrown if a FileSystemAccess related error occurred. Thrown * exceptions are handled by {@link HttpFSExceptionProvider}. */ private T fsExecute(UserGroupInformation ugi, FileSystemAccess.FileSystemExecutor executor) @@ -161,8 +161,8 @@ private T fsExecute(UserGroupInformation ugi, FileSystemAccess.FileSystemExe } /** - * Returns a filesystem instance. The fileystem instance is wired for release at the completion of - * the current Servlet request via the {@link FileSystemReleaseFilter}. + * Returns a filesystem instance. The filesystem instance is wired for release at the completion + * of the current Servlet request via the {@link FileSystemReleaseFilter}. *

* If a do-as user is specified, the current user must be a valid proxyuser, otherwise an * AccessControlException will be thrown. @@ -173,7 +173,7 @@ private T fsExecute(UserGroupInformation ugi, FileSystemAccess.FileSystemExe * * @throws IOException thrown if an IO error occurred. Thrown exceptions are * handled by {@link HttpFSExceptionProvider}. - * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * @throws FileSystemAccessException thrown if a FileSystemAccess related error occurred. Thrown * exceptions are handled by {@link HttpFSExceptionProvider}. */ private FileSystem createFileSystem(UserGroupInformation ugi) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java index ec559f9125..73a0dbe739 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java @@ -94,14 +94,14 @@ public void destroy() { * Static method that sets the FileSystem to release back to * the {@link FileSystemAccess} service on servlet request completion. * - * @param fs fileystem instance. + * @param fs a filesystem instance. */ public static void setFileSystem(FileSystem fs) { FILE_SYSTEM_TL.set(fs); } /** - * Abstract method to be implemetned by concrete implementations of the + * Abstract method to be implemented by concrete implementations of the * filter that return the {@link FileSystemAccess} service to which the filesystem * will be returned to. * diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/manifest_committer.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/manifest_committer.md index 12fe1f0b5f..c95486549d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/manifest_committer.md +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/manifest_committer.md @@ -371,11 +371,11 @@ any reports saved to a report directory. ## Collecting Job Summaries `mapreduce.manifest.committer.summary.report.directory` The committer can be configured to save the `_SUCCESS` summary files to a report directory, -irrespective of whether the job succeed or failed, by setting a fileystem path in +irrespective of whether the job succeed or failed, by setting a filesystem path in the option `mapreduce.manifest.committer.summary.report.directory`. The path does not have to be on the same -store/filesystem as the destination of work. For example, a local fileystem could be used. +store/filesystem as the destination of work. For example, a local filesystem could be used. XML diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java index 52b6dde379..0285fb17aa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java @@ -204,7 +204,7 @@ void beginUpload() throws IOException, UploaderException { } else { LOG.warn("Cannot set replication to " + initialReplication + " for path: " + targetPath + - " on a non-distributed fileystem " + + " on a non-distributed filesystem " + fileSystem.getClass().getName()); } if (targetStream == null) { @@ -319,7 +319,7 @@ private void endUpload() } else { LOG.info("Cannot set replication to " + finalReplication + " for path: " + targetPath + - " on a non-distributed fileystem " + + " on a non-distributed filesystem " + fileSystem.getClass().getName()); } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java index b19a70dec9..55351f0c81 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java @@ -88,7 +88,7 @@ public class S3AReadOpContext extends S3AOpContext { * Instantiate. * @param path path of read * @param invoker invoker for normal retries. - * @param stats Fileystem statistics (may be null) + * @param stats Filesystem statistics (may be null) * @param instrumentation statistics context * @param dstFileStatus target file status * @param vectoredIOContext context for vectored read operation. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index 3c16d87fe1..608f9168c2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -811,7 +811,7 @@ protected static URI toUri(String s3Path) { try { uri = new URI(s3Path); } catch (URISyntaxException e) { - throw invalidArgs("Not a valid fileystem path: %s", s3Path); + throw invalidArgs("Not a valid filesystem path: %s", s3Path); } return uri; } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/delegation_tokens.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/delegation_tokens.md index ce204f118a..91f08bb730 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/delegation_tokens.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/delegation_tokens.md @@ -832,7 +832,7 @@ This tests marshalling and unmarshalling of tokens identifiers. Tests the lifecycle of session tokens. -#### Integration Test `ITestSessionDelegationInFileystem`. +#### Integration Test `ITestSessionDelegationInFilesystem` This collects DTs from one filesystem, and uses that to create a new FS instance and then perform filesystem operations. A miniKDC is instantiated. @@ -841,7 +841,7 @@ then perform filesystem operations. A miniKDC is instantiated. the second instance is picking up the DT information. * `UserGroupInformation.reset()` can be used to reset user secrets after every test case (e.g. teardown), so that issued DTs from one test case do not contaminate the next. -* It's subclass, `ITestRoleDelegationInFileystem` adds a check that the current credentials +* It's subclass, `ITestRoleDelegationInFilesystem` adds a check that the current credentials in the DT cannot be used to access data on other buckets —that is, the active session really is restricted to the target bucket. diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 6487846513..3cd3bb43c5 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -677,7 +677,7 @@ warning that the SDK resolution chain is in use: S3A filesystem client is using the SDK region resolution chain. 2021-06-23 19:56:56,073 [main] WARN fs.FileSystem (FileSystem.java:createFileSystem(3464)) - - Failed to initialize fileystem s3a://osm-pds/planet: + Failed to initialize filesystem s3a://osm-pds/planet: org.apache.hadoop.fs.s3a.AWSClientIOException: creating AWS S3 client on s3a://osm-pds: com.amazonaws.SdkClientException: Unable to find a region via the region provider chain. Must provide an explicit region in the builder or setup environment to supply a region.: @@ -1224,7 +1224,7 @@ KMS key ID is required for CSE-KMS to encrypt data, not providing one leads to failure. ``` -2021-07-07 11:33:04,550 WARN fs.FileSystem: Failed to initialize fileystem +2021-07-07 11:33:04,550 WARN fs.FileSystem: Failed to initialize filesystem s3a://ap-south-cse/: java.lang.IllegalArgumentException: CSE-KMS method requires KMS key ID. Use fs.s3a.encryption.key property to set it. -ls: CSE-KMS method requires KMS key ID. Use fs.s3a.encryption.key property to diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFileystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java similarity index 95% rename from hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFileystem.java rename to hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java index daf037f3bb..511b813475 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFileystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java @@ -32,8 +32,8 @@ * Subclass of the session test which checks roles; only works if * a role ARN has been declared. */ -public class ITestRoleDelegationInFileystem extends - ITestSessionDelegationInFileystem { +public class ITestRoleDelegationInFilesystem extends + ITestSessionDelegationInFilesystem { @Override public void setup() throws Exception { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFilesystem.java similarity index 99% rename from hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java rename to hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFilesystem.java index 9598ef084f..295125169a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFilesystem.java @@ -87,10 +87,10 @@ * UGI to be initialized with security enabled. */ @SuppressWarnings("StaticNonFinalField") -public class ITestSessionDelegationInFileystem extends AbstractDelegationIT { +public class ITestSessionDelegationInFilesystem extends AbstractDelegationIT { private static final Logger LOG = - LoggerFactory.getLogger(ITestSessionDelegationInFileystem.class); + LoggerFactory.getLogger(ITestSessionDelegationInFilesystem.class); private static MiniKerberizedHadoopCluster cluster; @@ -595,7 +595,7 @@ protected ObjectMetadata readLandsatMetadata(final S3AFileSystem delegatedFS) .withEndpoint(DEFAULT_ENDPOINT) .withMetrics(new EmptyS3AStatisticsContext() .newStatisticsFromAwsSdk()) - .withUserAgentSuffix("ITestSessionDelegationInFileystem"); + .withUserAgentSuffix("ITestSessionDelegationInFilesystem"); AmazonS3 s3 = factory.createS3Client(landsat, parameters); return Invoker.once("HEAD", host,