From 474fa80bfb11f20fe21272f793307258315e440f Mon Sep 17 00:00:00 2001 From: Hui Fei Date: Wed, 23 Sep 2020 00:10:33 +0800 Subject: [PATCH] HADOOP-17277. Correct spelling errors for separator (#2322) Contributed by Hui Fei. --- .../src/site/markdown/CommandsManual.md | 2 +- .../libhdfs-tests/test_libhdfs_mini_stress.c | 2 +- .../src/main/native/libhdfspp/include/hdfspp/uri.h | 2 +- .../uriparser2/uriparser2/uriparser/UriFile.c | 2 +- .../federation/utils/ConsistentHashRing.java | 6 +++--- .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 2 +- .../hadoop-yarn/conf/container-executor.cfg | 14 +++++++------- .../src/site/markdown/DockerContainers.md | 2 +- .../src/site/markdown/Federation.md | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md index 0bda253fc8..4842d5b86d 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md @@ -60,7 +60,7 @@ Many subcommands honor a common set of configuration options to alter their beha | `-files ` | Specify comma separated files to be copied to the map reduce cluster. Applies only to job. | | `-fs or ` | Specify default filesystem URL to use. Overrides 'fs.defaultFS' property from configurations. | | `-jt or ` | Specify a ResourceManager. Applies only to job. | -| `-libjars ` | Specify comma separated jar files to include in the classpath. Applies only to job. | +| `-libjars ` | Specify comma separated jar files to include in the classpath. Applies only to job. | Hadoop Common Commands ====================== diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c index 9054287405..846852bfd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c @@ -279,7 +279,7 @@ static int testHdfsMiniStressImpl(struct tlhThreadInfo *ti) EXPECT_NONNULL(ti->hdfs); // Error injection on, some failures are expected in the read path. // The expectation is that any memory stomps will cascade and cause - // the following test to fail. Ideally RPC errors would be seperated + // the following test to fail. Ideally RPC errors would be separated // from BlockReader errors (RPC is expected to recover from disconnects). doTestHdfsMiniStress(ti, 1); // No error injection diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h index d8574d15b3..bc3d8b96d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h @@ -103,7 +103,7 @@ public: std::string str(bool encoded_output=true) const; - // Get a string with each URI field printed on a seperate line + // Get a string with each URI field printed on a separate line std::string GetDebugString() const; private: // These are stored in encoded form diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c index 5471e5af8c..22f38bee25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c @@ -90,7 +90,7 @@ static URI_INLINE int URI_FUNC(FilenameToUriString)(const URI_CHAR * filename, if ((input[0] == _UT('\0')) || (fromUnix && input[0] == _UT('/')) || (!fromUnix && input[0] == _UT('\\'))) { - /* Copy text after last seperator */ + /* Copy text after last separator */ if (lastSep + 1 < input) { if (!fromUnix && absolute && (firstSegment == URI_TRUE)) { /* Quick hack to not convert "C:" to "C%3A" */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java index fc3e49ff9d..ab7bfb16cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java @@ -33,8 +33,8 @@ * or remove nodes, it minimizes the item migration. */ public class ConsistentHashRing { - private static final String SEPERATOR = "/"; - private static final String VIRTUAL_NODE_FORMAT = "%s" + SEPERATOR + "%d"; + private static final String SEPARATOR = "/"; + private static final String VIRTUAL_NODE_FORMAT = "%s" + SEPARATOR + "%d"; /** Hash ring. */ private SortedMap ring = new TreeMap(); @@ -119,7 +119,7 @@ public String getLocation(String item) { hash = tailMap.isEmpty() ? ring.firstKey() : tailMap.firstKey(); } String virtualNode = ring.get(hash); - int index = virtualNode.lastIndexOf(SEPERATOR); + int index = virtualNode.lastIndexOf(SEPARATOR); if (index >= 0) { return virtualNode.substring(0, index); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 9e1b160d6f..203082f067 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -3142,7 +3142,7 @@ See also: [`CREATESNAPSHOT`](#Create_Snapshot), [`DELETESNAPSHOT`](#Delete_Snaps | Description | A list of source paths. | | Type | String | | Default Value | \ | -| Valid Values | A list of comma seperated absolute FileSystem paths without scheme and authority. | +| Valid Values | A list of comma separated absolute FileSystem paths without scheme and authority. | | Syntax | Any string. | See also: [`CONCAT`](#Concat_Files) diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg index 0678acc719..587855d188 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg +++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg @@ -8,15 +8,15 @@ feature.tc.enabled=false #[docker] # module.enabled=## enable/disable the module. set to "true" to enable, disabled by default # docker.binary=/usr/bin/docker -# docker.allowed.capabilities=## comma seperated capabilities that can be granted, e.g CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE -# docker.allowed.devices=## comma seperated list of devices that can be mounted into a container -# docker.allowed.networks=## comma seperated networks that can be used. e.g bridge,host,none -# docker.allowed.ro-mounts=## comma seperated volumes that can be mounted as read-only -# docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as read-write, add the yarn local and log dirs to this list to run Hadoop jobs +# docker.allowed.capabilities=## comma separated capabilities that can be granted, e.g CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE +# docker.allowed.devices=## comma separated list of devices that can be mounted into a container +# docker.allowed.networks=## comma separated networks that can be used. e.g bridge,host,none +# docker.allowed.ro-mounts=## comma separated volumes that can be mounted as read-only +# docker.allowed.rw-mounts=## comma separate volumes that can be mounted as read-write, add the yarn local and log dirs to this list to run Hadoop jobs # docker.privileged-containers.enabled=false -# docker.allowed.volume-drivers=## comma seperated list of allowed volume-drivers +# docker.allowed.volume-drivers=## comma separated list of allowed volume-drivers # docker.no-new-privileges.enabled=## enable/disable the no-new-privileges flag for docker run. Set to "true" to enable, disabled by default -# docker.allowed.runtimes=## comma seperated runtimes that can be used. +# docker.allowed.runtimes=## comma separated runtimes that can be used. # The configs below deal with settings for FPGA resource #[fpga] diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md index db9c56d99e..1878cb31e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md @@ -284,7 +284,7 @@ are allowed. It contains the following properties: | `docker.trusted.registries` | Comma separated list of trusted docker registries for running trusted privileged docker containers. By default, no registries are defined. | | `docker.inspect.max.retries` | Integer value to check docker container readiness. Each inspection is set with 3 seconds delay. Default value of 10 will wait 30 seconds for docker container to become ready before marked as container failed. | | `docker.no-new-privileges.enabled` | Enable/disable the no-new-privileges flag for docker run. Set to "true" to enable, disabled by default. | -| `docker.allowed.runtimes` | Comma seperated runtimes that containers are allowed to use. By default no runtimes are allowed to be added.| +| `docker.allowed.runtimes` | Comma separated runtimes that containers are allowed to use. By default no runtimes are allowed to be added.| | `docker.service-mode.enabled` | Set to "true" or "false" to enable or disable docker container service mode. Default value is "false". | Please note that if you wish to run Docker containers that require access to the YARN local directories, you must add them to the docker.allowed.rw-mounts list. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md index d2f3247cd9..c7836e7547 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md @@ -256,7 +256,7 @@ Optional: |`yarn.router.submit.retry` | `3` | The number of retries in the router before we give up. | |`yarn.federation.statestore.max-connections` | `10` | This is the maximum number of parallel connections each Router makes to the state-store. | |`yarn.federation.cache-ttl.secs` | `60` | The Router caches informations, and this is the time to leave before the cache is invalidated. | -|`yarn.router.webapp.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.webapp.FederationInterceptorREST` | A comma-seperated list of interceptor classes to be run at the router when interfacing with the client via REST interface. The last step of this pipeline must be the Federation Interceptor REST. | +|`yarn.router.webapp.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.webapp.FederationInterceptorREST` | A comma-separated list of interceptor classes to be run at the router when interfacing with the client via REST interface. The last step of this pipeline must be the Federation Interceptor REST. | ###ON NMs: