HDFS-1381. HDFS javadocs hard-code references to dfs.namenode.name.dir and dfs.datanode.data.dir parameters (Jim Plush via atm)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1139715 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2011-06-26 05:39:50 +00:00
parent 8014dfa1db
commit 604383b22d
4 changed files with 45 additions and 20 deletions

View File

@ -780,6 +780,9 @@ Trunk (unreleased changes)
HDFS-1321. If service port and main port are the same, there is no clear
log message explaining the issue. (Jim Plush via atm)
HDFS-1381. HDFS javadocs hard-code references to dfs.namenode.name.dir and
dfs.datanode.data.dir parameters (Jim Plush via atm)
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES

View File

@ -309,7 +309,8 @@ public MiniDFSCluster() {
* Servers will be started on free ports.
* <p>
* The caller must manage the creation of NameNode and DataNode directories
* and have already set dfs.namenode.name.dir and dfs.datanode.data.dir in the given conf.
* and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf.
*
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
@ -370,7 +371,8 @@ public MiniDFSCluster(Configuration conf,
/**
* NOTE: if possible, the other constructors that don't have nameNode port
* parameter should be used as they will ensure that the servers use free ports.
* parameter should be used as they will ensure that the servers use free
* ports.
* <p>
* Modify the config and start up the servers.
*
@ -379,9 +381,12 @@ public MiniDFSCluster(Configuration conf,
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up
* @param format if true, format the NameNode and DataNodes before starting
* up
* @param manageDfsDirs if true, the data directories for servers will be
* created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf
* @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
@ -411,7 +416,9 @@ public MiniDFSCluster(int nameNodePort,
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up
* @param manageDfsDirs if true, the data directories for servers will be
* created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf
* @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
@ -443,9 +450,12 @@ public MiniDFSCluster(int nameNodePort,
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up
* @param manageNameDfsDirs if true, the data directories for servers will be
* created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf
* @param manageDataDfsDirs if true, the data directories for datanodes will
* be created and dfs.datanode.data.dir set to same in the conf
* be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}
* set to same in the conf
* @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
@ -710,7 +720,8 @@ public void waitClusterUp() {
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set
* in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
@ -741,7 +752,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
@ -774,7 +786,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
@ -900,7 +913,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
@ -930,7 +944,8 @@ public void startDataNodes(Configuration conf, int numDataNodes,
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and dfs.datanode.data.dir will be set in the conf
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will
* be set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on

View File

@ -110,7 +110,8 @@ void log(String label, int numDirs, int testCaseNum, boolean[] state) {
/**
* Sets up the storage directories for namenode as defined by
* dfs.namenode.name.dir. For each element in dfs.namenode.name.dir, the subdirectories
* {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY}. For each element
* in {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY}, the subdirectories
* represented by the first four elements of the <code>state</code> array
* will be created and populated.
*
@ -139,7 +140,8 @@ String[] createNameNodeStorageState(boolean[] state) throws Exception {
/**
* Sets up the storage directories for a datanode under
* dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}. For each element in
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}, the subdirectories
* represented by the first four elements of the <code>state</code> array
* will be created and populated.
* See {@link UpgradeUtilities#createDataNodeStorageDirs()}
@ -167,7 +169,8 @@ String[] createDataNodeStorageState(boolean[] state) throws Exception {
/**
* Sets up the storage directories for a block pool under
* dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}. For each element
* in {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}, the subdirectories
* represented by the first four elements of the <code>state</code> array
* will be created and populated.
* See {@link UpgradeUtilities#createBlockPoolStorageDirs()}

View File

@ -181,8 +181,9 @@ private static void writeFile(FileSystem fs, Path path, byte[] buffer,
}
/**
* Initialize dfs.namenode.name.dir and dfs.datanode.data.dir with the specified number of
* directory entries. Also initialize dfs.blockreport.intervalMsec.
* Initialize {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} with the specified
* number of directory entries. Also initialize dfs.blockreport.intervalMsec.
*/
public static Configuration initializeStorageStateConf(int numDirs,
Configuration conf) {
@ -306,7 +307,8 @@ public static long checksumContents(NodeType nodeType, File dir) throws IOExcept
}
/**
* Simulate the <code>dfs.namenode.name.dir</code> of a populated DFS filesystem.
* Simulate the {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} of a populated
* DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of namenode storage directory that comes from a singleton
* namenode master (that contains edits, fsimage, version and time files).
@ -333,7 +335,8 @@ public static File[] createNameNodeStorageDirs(String[] parents,
}
/**
* Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of datanode storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination
@ -360,7 +363,8 @@ public static File[] createDataNodeStorageDirs(String[] parents,
}
/**
* Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of block pool storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination