HADOOP-13932. Fix indefinite article in comments (Contributed by LiXin Ge via Daniel Templeton)
This commit is contained in:
parent
ac1e5d4f77
commit
e216e8e233
@ -41,7 +41,7 @@
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
/**
|
||||
* Inspects a FSImage storage directory in the "old" (pre-HDFS-1073) format.
|
||||
* Inspects an FSImage storage directory in the "old" (pre-HDFS-1073) format.
|
||||
* This format has the following data files:
|
||||
* - fsimage
|
||||
* - fsimage.ckpt (when checkpoint is being uploaded)
|
||||
|
@ -65,7 +65,7 @@ Note that rolling upgrade is supported only from Hadoop-2.4.0 onwards.
|
||||
|
||||
### Upgrade without Downtime
|
||||
|
||||
In a HA cluster, there are two or more *NameNodes (NNs)*, many *DataNodes (DNs)*,
|
||||
In an HA cluster, there are two or more *NameNodes (NNs)*, many *DataNodes (DNs)*,
|
||||
a few *JournalNodes (JNs)* and a few *ZooKeeperNodes (ZKNs)*.
|
||||
*JNs* is relatively stable and does not require upgrade when upgrading HDFS in most of the cases.
|
||||
In the rolling upgrade procedure described here,
|
||||
@ -76,7 +76,7 @@ Upgrading *JNs* and *ZKNs* may incur cluster downtime.
|
||||
|
||||
Suppose there are two namenodes *NN1* and *NN2*,
|
||||
where *NN1* and *NN2* are respectively in active and standby states.
|
||||
The following are the steps for upgrading a HA cluster:
|
||||
The following are the steps for upgrading an HA cluster:
|
||||
|
||||
1. Prepare Rolling Upgrade
|
||||
1. Run "[`hdfs dfsadmin -rollingUpgrade prepare`](#dfsadmin_-rollingUpgrade)"
|
||||
@ -133,7 +133,7 @@ However, datanodes can still be upgraded in a rolling manner.
|
||||
|
||||
In a non-HA cluster, there are a *NameNode (NN)*, a *SecondaryNameNode (SNN)*
|
||||
and many *DataNodes (DNs)*.
|
||||
The procedure for upgrading a non-HA cluster is similar to upgrading a HA cluster
|
||||
The procedure for upgrading a non-HA cluster is similar to upgrading an HA cluster
|
||||
except that Step 2 "Upgrade Active and Standby *NNs*" is changed to below:
|
||||
|
||||
* Upgrade *NN* and *SNN*
|
||||
@ -175,7 +175,7 @@ A newer release is downgradable to the pre-upgrade release
|
||||
only if both the namenode layout version and the datanode layout version
|
||||
are not changed between these two releases.
|
||||
|
||||
In a HA cluster,
|
||||
In an HA cluster,
|
||||
when a rolling upgrade from an old software release to a new software release is in progress,
|
||||
it is possible to downgrade, in a rolling fashion, the upgraded machines back to the old software release.
|
||||
Same as before, suppose *NN1* and *NN2* are respectively in active and standby states.
|
||||
|
@ -76,7 +76,7 @@ libdhfs is thread safe.
|
||||
|
||||
* Concurrency and Hadoop FS "handles"
|
||||
|
||||
The Hadoop FS implementation includes a FS handle cache which
|
||||
The Hadoop FS implementation includes an FS handle cache which
|
||||
caches based on the URI of the namenode along with the user
|
||||
connecting. So, all calls to `hdfsConnect` will return the same
|
||||
handle but calls to `hdfsConnectAsUser` with different users will
|
||||
|
@ -9801,7 +9801,7 @@
|
||||
<param name="defaultPort" type="int"/>
|
||||
<doc>
|
||||
<![CDATA[Get the socket address for <code>name</code> property as a
|
||||
<code>InetSocketAddress</code>. On a HA cluster,
|
||||
<code>InetSocketAddress</code>. On an HA cluster,
|
||||
this fetches the address corresponding to the RM identified by
|
||||
{@link #RM_HA_ID}.
|
||||
@param name property name.
|
||||
|
@ -9416,7 +9416,7 @@
|
||||
<param name="defaultPort" type="int"/>
|
||||
<doc>
|
||||
<![CDATA[Get the socket address for <code>name</code> property as a
|
||||
<code>InetSocketAddress</code>. On a HA cluster,
|
||||
<code>InetSocketAddress</code>. On an HA cluster,
|
||||
this fetches the address corresponding to the RM identified by
|
||||
{@link #RM_HA_ID}.
|
||||
@param name property name.
|
||||
|
@ -208,7 +208,7 @@ public static String getRMHAId(Configuration conf) {
|
||||
|
||||
@VisibleForTesting
|
||||
static String getNeedToSetValueMessage(String confKey) {
|
||||
return confKey + " needs to be set in a HA configuration.";
|
||||
return confKey + " needs to be set in an HA configuration.";
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@ -223,7 +223,7 @@ static String getRMHAIdNeedToBeIncludedMessage(String ids,
|
||||
String rmId) {
|
||||
return YarnConfiguration.RM_HA_IDS + "("
|
||||
+ ids + ") need to contain " + YarnConfiguration.RM_HA_ID + "("
|
||||
+ rmId + ") in a HA configuration.";
|
||||
+ rmId + ") in an HA configuration.";
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -2767,7 +2767,7 @@ public static List<String> getServiceAddressConfKeys(Configuration conf) {
|
||||
|
||||
/**
|
||||
* Get the socket address for <code>name</code> property as a
|
||||
* <code>InetSocketAddress</code>. On a HA cluster,
|
||||
* <code>InetSocketAddress</code>. On an HA cluster,
|
||||
* this fetches the address corresponding to the RM identified by
|
||||
* {@link #RM_HA_ID}.
|
||||
* @param name property name.
|
||||
|
@ -441,7 +441,7 @@
|
||||
<property>
|
||||
<description>Host:Port of the ZooKeeper server to be used by the RM. This
|
||||
must be supplied when using the ZooKeeper based implementation of the
|
||||
RM state store and/or embedded automatic failover in a HA setting.
|
||||
RM state store and/or embedded automatic failover in an HA setting.
|
||||
</description>
|
||||
<name>yarn.resourcemanager.zk-address</name>
|
||||
<!--value>127.0.0.1:2181</value-->
|
||||
@ -490,7 +490,7 @@
|
||||
|
||||
<property>
|
||||
<description>
|
||||
ACLs to be used for the root znode when using ZKRMStateStore in a HA
|
||||
ACLs to be used for the root znode when using ZKRMStateStore in an HA
|
||||
scenario for fencing.
|
||||
|
||||
ZKRMStateStore supports implicit fencing to allow a single
|
||||
@ -606,7 +606,7 @@
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>Name of the cluster. In a HA setting,
|
||||
<description>Name of the cluster. In an HA setting,
|
||||
this is used to ensure the RM participates in leader
|
||||
election for this cluster and ensures it does not affect
|
||||
other clusters</description>
|
||||
@ -2188,7 +2188,7 @@
|
||||
<property>
|
||||
<name>yarn.timeline-service.client.fd-retain-secs</name>
|
||||
<description>
|
||||
How long the ATS v1.5 writer will keep a FSStream open.
|
||||
How long the ATS v1.5 writer will keep an FSStream open.
|
||||
If this fsstream does not write anything for this configured time,
|
||||
it will be close.
|
||||
</description>
|
||||
|
@ -740,7 +740,7 @@ T runWithRetries() throws Exception {
|
||||
try {
|
||||
return run();
|
||||
} catch (IOException e) {
|
||||
LOG.info("Exception while executing a FS operation.", e);
|
||||
LOG.info("Exception while executing an FS operation.", e);
|
||||
if (++retry > fsNumRetries) {
|
||||
LOG.info("Maxed out FS retries. Giving up!");
|
||||
throw e;
|
||||
|
@ -200,7 +200,7 @@ static FSQueueMetrics forQueue(String queueName, Queue parent,
|
||||
* @param parent parent queue
|
||||
* @param enableUserMetrics if user metrics is needed
|
||||
* @param conf configuration
|
||||
* @return a FSQueueMetrics object
|
||||
* @return an FSQueueMetrics object
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public synchronized
|
||||
|
@ -385,7 +385,7 @@ public File getTestWorkDir() {
|
||||
}
|
||||
|
||||
/**
|
||||
* In a HA cluster, go through all the RMs and find the Active RM. In a
|
||||
* In an HA cluster, go through all the RMs and find the Active RM. In a
|
||||
* non-HA cluster, return the index of the only RM.
|
||||
*
|
||||
* @return index of the active RM or -1 if none of them turn active
|
||||
|
Loading…
Reference in New Issue
Block a user