a196766ea0
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1134994 13f79535-47bb-0310-9956-ffa450edef68
10390 lines
441 KiB
XML
10390 lines
441 KiB
XML
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
|
|
<!-- Generated by the JDiff Javadoc doclet -->
|
|
<!-- (http://www.jdiff.org) -->
|
|
<!-- on Sun May 31 20:46:08 PDT 2009 -->
|
|
|
|
<api
|
|
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
|
|
xsi:noNamespaceSchemaLocation='api.xsd'
|
|
name="hadoop-hdfs 0.20.0"
|
|
jdversion="1.0.9">
|
|
|
|
<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/jdiff-1.0.9.jar:/home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/xerces-1.4.4.jar -classpath /home/gkesavan/release-0.20.0/build/classes:/home/gkesavan/release-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar:/home/gkesavan/release-0.20.0/lib/hsqldb-1.8.0.10.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-2.1.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar:/home/gkesavan/release-0.20.0/lib/kfs-0.2.2.jar:/home/gkesavan/release-0.20.0/conf:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.0.4.jar:/home/gkesavan/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/home/gkesavan/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.0.1.jar:/home/gkesavan/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.3.jar:/home/gkesavan/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/gkesavan/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.6.1.jar:/home/gkesavan/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/home/gkesavan/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/gkesavan/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/gkesavan/.ivy2/cache/junit/junit/jars/junit-3.8.1.jar:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging-api/jars/commons-logging-api-1.0.4.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.4.3.jar:/home/gkesavan/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.4.3.jar:/home/gkesavan/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/gkesavan/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-launcher.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-resolver.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-starteam.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-netrexx.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-testutil.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jai.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-swing.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jmf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bcel.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jdepend.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jsch.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bsf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-antlr.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-weblogic.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-junit.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-log4j.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xercesImpl.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-oro.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-trax.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-nodeps.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-logging.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-regexp.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-stylebook.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-javamail.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-net.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xml-apis.jar:/home/gkesavan/tools/jdk1.6.0_07-32bit/lib/tools.jar -sourcepath /home/gkesavan/release-0.20.0/src/hdfs -apidir /home/gkesavan/release-0.20.0/lib/jdiff -apiname hadoop 0.20.1-dev -->
|
|
<package name="org.apache.hadoop.hdfs">
|
|
<!-- start class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
|
|
<class name="ChecksumDistributedFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="ChecksumDistributedFileSystem"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="ChecksumDistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="deprecated, no comment">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@deprecated]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getRawCapacity" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return the total raw capacity of the filesystem, disregarding
|
|
replication .]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRawUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return the total raw used space in the filesystem, disregarding
|
|
replication .]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return statistics for each datanode.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setSafeMode" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Enter, leave or get safe mode.
|
|
|
|
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshNodes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="finalizeUpgrade"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Finalize previously upgraded files system state.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="metaSave"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="pathname" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="reportChecksumFailure" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
|
|
<param name="inPos" type="long"/>
|
|
<param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
|
|
<param name="sumsPos" type="long"/>
|
|
<doc>
|
|
<![CDATA[We need to find the blocks that didn't match. Likely only one
|
|
is corrupt but we will report both to the namenode. In the future,
|
|
we can consider figuring out exactly which block is corrupt.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the stat information about the file.]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[An implementation of ChecksumFileSystem over DistributedFileSystem.
|
|
Note that as of now (May 07), DistributedFileSystem natively checksums
|
|
all of its data. Using this class is not be necessary in most cases.
|
|
Currently provided mainly for backward compatibility and testing.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
|
|
<!-- start class org.apache.hadoop.hdfs.DFSClient -->
|
|
<class name="DFSClient" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
|
|
<implements name="java.io.Closeable"/>
|
|
<constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new DFSClient connected to the default namenode.]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new DFSClient connected to the given namenode server.]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</constructor>
|
|
<method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="close"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Close the file system, abandoning all of the leases and files being
|
|
created and close connections to the namenode.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDefaultBlockSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get the default block size for this cluster
|
|
@return the default block size in bytes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="reportBadBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Report corrupt blocks that were discovered by the client.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDefaultReplication" return="short"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getHints" return="java.lang.String[][]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="Use getBlockLocations instead
|
|
|
|
Get hints about the location of the indicated block(s).
|
|
|
|
getHints() returns a list of hostnames that store data for
|
|
a specific file region. It returns a set of hostnames for
|
|
every block within the indicated region.
|
|
|
|
This function is very useful when writing code that considers
|
|
data-placement when performing operations. For example, the
|
|
MapReduce system tries to schedule tasks on the same machines
|
|
as the data-block the task processes.">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="start" type="long"/>
|
|
<param name="length" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@deprecated Use getBlockLocations instead
|
|
|
|
Get hints about the location of the indicated block(s).
|
|
|
|
getHints() returns a list of hostnames that store data for
|
|
a specific file region. It returns a set of hostnames for
|
|
every block within the indicated region.
|
|
|
|
This function is very useful when writing code that considers
|
|
data-placement when performing operations. For example, the
|
|
MapReduce system tries to schedule tasks on the same machines
|
|
as the data-block the task processes.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="start" type="long"/>
|
|
<param name="length" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get block location info about file
|
|
|
|
getBlockLocations() returns a list of hostnames that store
|
|
data for a specific file region. It returns a set of hostnames
|
|
for every block within the indicated region.
|
|
|
|
This function is very useful when writing code that considers
|
|
data-placement when performing operations. For example, the
|
|
MapReduce system tries to schedule tasks on the same machines
|
|
as the data-block the task processes.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="open" return="org.apache.hadoop.hdfs.DFSClient.DFSInputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="create" return="java.io.OutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new dfs file and return an output stream for writing into it.
|
|
|
|
@param src stream name
|
|
@param overwrite do not check for file existence if true
|
|
@return output stream
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create" return="java.io.OutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new dfs file and return an output stream for writing into it
|
|
with write-progress reporting.
|
|
|
|
@param src stream name
|
|
@param overwrite do not check for file existence if true
|
|
@return output stream
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create" return="java.io.OutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new dfs file with the specified block replication
|
|
and return an output stream for writing into the file.
|
|
|
|
@param src stream name
|
|
@param overwrite do not check for file existence if true
|
|
@param replication block replication
|
|
@return output stream
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create" return="java.io.OutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new dfs file with the specified block replication
|
|
with write-progress reporting and return an output stream for writing
|
|
into the file.
|
|
|
|
@param src stream name
|
|
@param overwrite do not check for file existence if true
|
|
@param replication block replication
|
|
@return output stream
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create" return="java.io.OutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<param name="buffersize" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Call
|
|
{@link #create(String,FsPermission,boolean,short,long,Progressable,int)}
|
|
with default permission.
|
|
@see FsPermission#getDefault()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create" return="java.io.OutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<param name="buffersize" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new dfs file with the specified block replication
|
|
with write-progress reporting and return an output stream for writing
|
|
into the file.
|
|
|
|
@param src stream name
|
|
@param permission The permission of the directory being created.
|
|
If permission == null, use {@link FsPermission#getDefault()}.
|
|
@param overwrite do not check for file existence if true
|
|
@param replication block replication
|
|
@return output stream
|
|
@throws IOException
|
|
@see ClientProtocol#create(String, FsPermission, String, boolean, short, long)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setReplication" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="replication" type="short"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set replication for an existing file.
|
|
|
|
@see ClientProtocol#setReplication(String, short)
|
|
@param replication
|
|
@throws IOException
|
|
@return true is successful or false if file does not exist]]>
|
|
</doc>
|
|
</method>
|
|
<method name="rename" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="dst" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Rename file or directory.
|
|
See {@link ClientProtocol#rename(String, String)}.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Delete file or directory.
|
|
See {@link ClientProtocol#delete(String)}.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="recursive" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[delete file or directory.
|
|
delete contents of the directory if non empty and recursive
|
|
set to true]]>
|
|
</doc>
|
|
</method>
|
|
<method name="exists" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Implemented using getFileInfo(src)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isDirectory" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="Use getFileStatus() instead">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@deprecated Use getFileStatus() instead]]>
|
|
</doc>
|
|
</method>
|
|
<method name="listPaths" return="org.apache.hadoop.fs.FileStatus[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
|
|
<param name="socketFactory" type="javax.net.SocketFactory"/>
|
|
<param name="socketTimeout" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get the checksum of a file.
|
|
@param src The file path
|
|
@return The checksum]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setPermission"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set permissions to a file or directory.
|
|
@param src path name.
|
|
@param permission
|
|
@throws <code>FileNotFoundException</code> is file does not exist.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setOwner"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="username" type="java.lang.String"/>
|
|
<param name="groupname" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set file or directory owner.
|
|
@param src path name.
|
|
@param username user id.
|
|
@param groupname user group.
|
|
@throws <code>FileNotFoundException</code> is file does not exist.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="totalRawCapacity" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="totalRawUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getMissingBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns count of blocks with no good replicas left. Normally should be
|
|
zero.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUnderReplicatedBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns count of blocks with one of more replica missing.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCorruptBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns count of blocks with at least one replica marked corrupt.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="setSafeMode" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Enter, leave or get safe mode.
|
|
See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)}
|
|
for more details.
|
|
|
|
@see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshNodes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Refresh the hosts and exclude files. (Rereads them.)
|
|
See {@link ClientProtocol#refreshNodes()}
|
|
for more details.
|
|
|
|
@see ClientProtocol#refreshNodes()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="metaSave"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="pathname" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Dumps DFS data structures into specified file.
|
|
See {@link ClientProtocol#metaSave(String)}
|
|
for more details.
|
|
|
|
@see ClientProtocol#metaSave(String)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="finalizeUpgrade"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="mkdirs" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="mkdirs" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a directory (or hierarchy of directories) with the given
|
|
name and permission.
|
|
|
|
@param src The path of the directory being created
|
|
@param permission The permission of the directory being created.
|
|
If permission == null, use {@link FsPermission#getDefault()}.
|
|
@return True if the operation success.
|
|
@see ClientProtocol#mkdirs(String, FsPermission)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTimes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="mtime" type="long"/>
|
|
<param name="atime" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[set the modification and access time of a file
|
|
@throws FileNotFoundException if the path is not a file]]>
|
|
</doc>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"
|
|
transient="false" volatile="false"
|
|
static="false" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[DFSClient can connect to a Hadoop Filesystem and
|
|
perform basic file tasks. It uses the ClientProtocol
|
|
to communicate with a NameNode daemon, and connects
|
|
directly to DataNodes to read/write block data.
|
|
|
|
Hadoop DFS users should obtain an instance of
|
|
DistributedFileSystem, which uses DFSClient to handle
|
|
filesystem tasks.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.DFSClient -->
|
|
<!-- start class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
|
|
<class name="DFSClient.BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="read" return="int"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="buf" type="byte[]"/>
|
|
<param name="off" type="int"/>
|
|
<param name="len" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="skip" return="long"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="n" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="read" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="seekToNewSource" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="targetPos" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="seek"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="pos" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getChunkPosition" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="pos" type="long"/>
|
|
</method>
|
|
<method name="readChunk" return="int"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="pos" type="long"/>
|
|
<param name="buf" type="byte[]"/>
|
|
<param name="offset" type="int"/>
|
|
<param name="len" type="int"/>
|
|
<param name="checksumBuf" type="byte[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="sock" type="java.net.Socket"/>
|
|
<param name="file" type="java.lang.String"/>
|
|
<param name="blockId" type="long"/>
|
|
<param name="genStamp" type="long"/>
|
|
<param name="startOffset" type="long"/>
|
|
<param name="len" type="long"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="sock" type="java.net.Socket"/>
|
|
<param name="file" type="java.lang.String"/>
|
|
<param name="blockId" type="long"/>
|
|
<param name="genStamp" type="long"/>
|
|
<param name="startOffset" type="long"/>
|
|
<param name="len" type="long"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<param name="verifyChecksum" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Java Doc required]]>
|
|
</doc>
|
|
</method>
|
|
<method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="sock" type="java.net.Socket"/>
|
|
<param name="file" type="java.lang.String"/>
|
|
<param name="blockId" type="long"/>
|
|
<param name="genStamp" type="long"/>
|
|
<param name="startOffset" type="long"/>
|
|
<param name="len" type="long"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<param name="verifyChecksum" type="boolean"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="close"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readAll" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="buf" type="byte[]"/>
|
|
<param name="offset" type="int"/>
|
|
<param name="len" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[kind of like readFully(). Only reads as much as possible.
|
|
And allows use of protected readFully().]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This is a wrapper around connection to datadone
|
|
and understands checksum, offset etc]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
|
|
<!-- start class org.apache.hadoop.hdfs.DFSUtil -->
|
|
<class name="DFSUtil" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DFSUtil"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="isValidName" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<doc>
|
|
<![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
|
|
and names which contain a ":" or "/"]]>
|
|
</doc>
|
|
</method>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.DFSUtil -->
|
|
<!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
|
|
<class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DistributedFileSystem"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="deprecated, no comment">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@deprecated]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getName" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="deprecated, no comment">
|
|
<doc>
|
|
<![CDATA[@deprecated]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUri" return="java.net.URI"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="initialize"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="uri" type="java.net.URI"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="checkPath"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="org.apache.hadoop.fs.Path"/>
|
|
<doc>
|
|
<![CDATA[Permit paths which explicitly specify the default port.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="makeQualified" return="org.apache.hadoop.fs.Path"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="org.apache.hadoop.fs.Path"/>
|
|
<doc>
|
|
<![CDATA[Normalize paths that explicitly specify the default port.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getDefaultBlockSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getDefaultReplication" return="short"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setWorkingDirectory"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="dir" type="org.apache.hadoop.fs.Path"/>
|
|
</method>
|
|
<method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="file" type="org.apache.hadoop.fs.FileStatus"/>
|
|
<param name="start" type="long"/>
|
|
<param name="len" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="setVerifyChecksum"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="verifyChecksum" type="boolean"/>
|
|
</method>
|
|
<method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[This optional operation is not yet supported.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="setReplication" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="replication" type="short"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="rename" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="dst" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Rename files/dirs]]>
|
|
</doc>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get rid of Path f, whether a true file or dir.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="recursive" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[requires a boolean check to delete a non
|
|
empty directory recursively.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setQuota"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="namespaceQuota" type="long"/>
|
|
<param name="diskspaceQuota" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set a directory's quotas
|
|
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="p" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="mkdirs" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="close"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return the disk usage of the filesystem, including total capacity,
|
|
used space, and remaining space]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRawCapacity" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return the total raw capacity of the filesystem, disregarding
|
|
replication .]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRawUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return the total raw used space in the filesystem, disregarding
|
|
replication .]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getMissingBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns count of blocks with no good replicas left. Normally should be
|
|
zero.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUnderReplicatedBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns count of blocks with one of more replica missing.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCorruptBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns count of blocks with at least one replica marked corrupt.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return statistics for each datanode.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setSafeMode" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Enter, leave or get safe mode.
|
|
|
|
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
|
|
FSConstants.SafeModeAction)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="saveNamespace"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Save namespace image.
|
|
|
|
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshNodes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Refreshes the list of hosts and excluded hosts from the configured
|
|
files.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="finalizeUpgrade"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Finalize previously upgraded files system state.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="metaSave"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="pathname" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="reportChecksumFailure" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
|
|
<param name="inPos" type="long"/>
|
|
<param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
|
|
<param name="sumsPos" type="long"/>
|
|
<doc>
|
|
<![CDATA[We need to find the blocks that didn't match. Likely only one
|
|
is corrupt but we will report both to the namenode. In the future,
|
|
we can consider figuring out exactly which block is corrupt.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the stat information about the file.
|
|
@throws FileNotFoundException if the file does not exist.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setPermission"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="p" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc }]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setOwner"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="p" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="username" type="java.lang.String"/>
|
|
<param name="groupname" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc }]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTimes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="p" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="mtime" type="long"/>
|
|
<param name="atime" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc }]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Implementation of the abstract FileSystem for the DFS system.
|
|
This object is the way end-user code interacts with a Hadoop
|
|
DistributedFileSystem.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
|
|
<!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
|
|
<class name="DistributedFileSystem.DiskStatus" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getCapacity" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getDfsUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getRemaining" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
|
|
<!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
|
|
<class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="HDFSPolicyProvider"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<doc>
|
|
<![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
|
|
<!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
|
|
<class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="HftpFileSystem"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="initialize"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.net.URI"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="pickOneAddress" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="hostname" type="java.lang.String"/>
|
|
<exception name="UnknownHostException" type="java.net.UnknownHostException"/>
|
|
<doc>
|
|
<![CDATA[randomly pick one from all available IP addresses of a given hostname]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUri" return="java.net.URI"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="openConnection" return="java.net.HttpURLConnection"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<param name="query" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
|
|
@param path The path component of the URL
|
|
@param query The query component of the URL]]>
|
|
</doc>
|
|
</method>
|
|
<method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="buffersize" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setWorkingDirectory"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
</method>
|
|
<method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[This optional operation is not yet supported.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="bufferSize" type="int"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="rename" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="dst" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="recursive" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="mkdirs" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="org.apache.hadoop.fs.Path"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="nnAddr" type="java.net.InetSocketAddress"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="ran" type="java.util.Random"
|
|
transient="false" volatile="false"
|
|
static="false" final="true" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="df" type="java.text.SimpleDateFormat"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
|
|
The following implementation provides a limited, read-only interface
|
|
to a filesystem over HTTP.
|
|
@see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
|
|
@see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
|
|
<!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
|
|
<class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="HsftpFileSystem"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="initialize"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.net.URI"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="openConnection" return="java.net.HttpURLConnection"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<param name="query" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getUri" return="java.net.URI"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<doc>
|
|
<![CDATA[An implementation of a protocol for accessing filesystems over HTTPS.
|
|
The following implementation provides a limited, read-only interface
|
|
to a filesystem over HTTPS.
|
|
@see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
|
|
@see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
|
|
<!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
|
|
<class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="true" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<implements name="javax.net.ssl.HostnameVerifier"/>
|
|
<constructor name="HsftpFileSystem.DummyHostnameVerifier"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="verify" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="hostname" type="java.lang.String"/>
|
|
<param name="session" type="javax.net.ssl.SSLSession"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
|
|
<doc>
|
|
<![CDATA[<p>A distributed implementation of {@link
|
|
org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
|
|
Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
|
|
|
|
<p>The most important difference is that unlike GFS, Hadoop DFS files
|
|
have strictly one writer at any one time. Bytes are always appended
|
|
to the end of the writer's stream. There is no notion of "record appends"
|
|
or "mutations" that are then checked or reordered. Writers simply emit
|
|
a byte stream. That byte stream is guaranteed to be stored in the
|
|
order written.</p>]]>
|
|
</doc>
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.protocol">
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
|
|
<class name="AlreadyBeingCreatedException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="AlreadyBeingCreatedException" type="java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[The exception that happens when you ask to create a file that already
|
|
is being created, but is not closed yet.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.Block -->
|
|
<class name="Block" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<implements name="java.lang.Comparable"/>
|
|
<constructor name="Block"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="Block" type="long, long, long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="Block" type="long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="Block" type="java.io.File, long, long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Find the blockid from the given filename]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="isBlockFilename" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="f" type="java.io.File"/>
|
|
</method>
|
|
<method name="set"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blkid" type="long"/>
|
|
<param name="len" type="long"/>
|
|
<param name="genStamp" type="long"/>
|
|
</method>
|
|
<method name="getBlockId" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setBlockId"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="bid" type="long"/>
|
|
</method>
|
|
<method name="getBlockName" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getNumBytes" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setNumBytes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="len" type="long"/>
|
|
</method>
|
|
<method name="getGenerationStamp" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setGenerationStamp"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="stamp" type="long"/>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="compareTo" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="equals" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="o" type="java.lang.Object"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="hashCode" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="GRANDFATHER_GENERATION_STAMP" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[A Block is a Hadoop FS primitive, identified by a
|
|
long.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.Block -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
|
|
<class name="BlockListAsLongs" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="BlockListAsLongs" type="long[]"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Constructor
|
|
@param iBlockList - BlockListALongs create from this long[] parameter]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="convertToArrayLongs" return="long[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blockArray" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
|
|
<doc>
|
|
<![CDATA[Converting a block[] to a long[]
|
|
@param blockArray - the input array block[]
|
|
@return the output array of long[]]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getNumberOfBlocks" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The number of blocks
|
|
@return - the number of blocks]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockId" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="index" type="int"/>
|
|
<doc>
|
|
<![CDATA[The block-id of the indexTh block
|
|
@param index - the block whose block-id is desired
|
|
@return the block-id]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockLen" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="index" type="int"/>
|
|
<doc>
|
|
<![CDATA[The block-len of the indexTh block
|
|
@param index - the block whose block-len is desired
|
|
@return - the block-len]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockGenStamp" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="index" type="int"/>
|
|
<doc>
|
|
<![CDATA[The generation stamp of the indexTh block
|
|
@param index - the block whose block-len is desired
|
|
@return - the generation stamp]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This class provides an interface for accessing list of blocks that
|
|
has been implemented as long[].
|
|
This class is usefull for block report. Rather than send block reports
|
|
as a Block[] we can send it as a long[].]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
|
|
<!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
|
|
<interface name="ClientDatanodeProtocol" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
|
|
<method name="recoverBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="keepLength" type="boolean"/>
|
|
<param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Start generation-stamp recovery for specified block
|
|
@param block the specified block
|
|
@param keepLength keep the block length
|
|
@param targets the list of possible locations of specified block
|
|
@return the new blockid if recovery successful and the generation stamp
|
|
got updated as part of the recovery, else returns null if the block id
|
|
not have any data and the block was deleted.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="versionID" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[3: add keepLength parameter.]]>
|
|
</doc>
|
|
</field>
|
|
<doc>
|
|
<![CDATA[An client-datanode protocol for block recovery]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
|
|
<!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
|
|
<interface name="ClientProtocol" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
|
|
<method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="offset" type="long"/>
|
|
<param name="length" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get locations of the blocks of the specified file within the specified range.
|
|
DataNode locations for each block are sorted by
|
|
the proximity to the client.
|
|
<p>
|
|
Return {@link LocatedBlocks} which contains
|
|
file length, blocks and their locations.
|
|
DataNode locations for each block are sorted by
|
|
the distance to the client's address.
|
|
<p>
|
|
The client will then have to contact
|
|
one of the indicated DataNodes to obtain the actual data.
|
|
|
|
@param src file name
|
|
@param offset range start offset
|
|
@param length range length
|
|
@return file length and array of blocks with their locations
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a new file entry in the namespace.
|
|
<p>
|
|
This will create an empty file specified by the source path.
|
|
The path should reflect a full path originated at the root.
|
|
The name-node does not have a notion of "current" directory for a client.
|
|
<p>
|
|
Once created, the file is visible and available for read to other clients.
|
|
Although, other clients cannot {@link #delete(String)}, re-create or
|
|
{@link #rename(String, String)} it until the file is completed
|
|
or explicitly as a result of lease expiration.
|
|
<p>
|
|
Blocks have a maximum size. Clients that intend to
|
|
create multi-block files must also use {@link #addBlock(String, String)}.
|
|
|
|
@param src path of the file being created.
|
|
@param masked masked permission.
|
|
@param clientName name of the current client.
|
|
@param overwrite indicates whether the file should be
|
|
overwritten if it already exists.
|
|
@param replication block replication factor.
|
|
@param blockSize maximum block size.
|
|
|
|
@throws AccessControlException if permission to create file is
|
|
denied by the system. As usually on the client side the exception will
|
|
be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
|
|
@throws QuotaExceededException if the file creation violates
|
|
any quota restriction
|
|
@throws IOException if other errors occur.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Append to the end of the file.
|
|
@param src path of the file being created.
|
|
@param clientName name of the current client.
|
|
@return information about the last partial block if any.
|
|
@throws AccessControlException if permission to append file is
|
|
denied by the system. As usually on the client side the exception will
|
|
be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
|
|
Allows appending to an existing file if the server is
|
|
configured with the parameter dfs.support.append set to true, otherwise
|
|
throws an IOException.
|
|
@throws IOException if other errors occur.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setReplication" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="replication" type="short"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set replication for an existing file.
|
|
<p>
|
|
The NameNode sets replication to the new value and returns.
|
|
The actual block replication is not expected to be performed during
|
|
this method call. The blocks will be populated or removed in the
|
|
background as the result of the routine block maintenance procedures.
|
|
|
|
@param src file name
|
|
@param replication new replication
|
|
@throws IOException
|
|
@return true if successful;
|
|
false if file does not exist or is a directory]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setPermission"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set permissions for an existing file/directory.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setOwner"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="username" type="java.lang.String"/>
|
|
<param name="groupname" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set owner of a path (i.e. a file or a directory).
|
|
The parameters username and groupname cannot both be null.
|
|
@param src
|
|
@param username If it is null, the original username remains unchanged.
|
|
@param groupname If it is null, the original groupname remains unchanged.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="abandonBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="holder" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The client can give up on a blcok by calling abandonBlock().
|
|
The client can then
|
|
either obtain a new block, or complete or abandon the file.
|
|
Any partial writes to the block will be discarded.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[A client that wants to write an additional block to the
|
|
indicated filename (which must currently be open for writing)
|
|
should call addBlock().
|
|
|
|
addBlock() allocates a new block and datanodes the block data
|
|
should be replicated to.
|
|
|
|
@return LocatedBlock allocated block information.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="complete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The client is done writing data to the given filename, and would
|
|
like to complete it.
|
|
|
|
The function returns whether the file has been closed successfully.
|
|
If the function returns false, the caller should try again.
|
|
|
|
A call to complete() will not return true until all the file's
|
|
blocks have been replicated the minimum number of times. Thus,
|
|
DataNode failures may cause a client to call complete() several
|
|
times before succeeding.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="reportBadBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The client wants to report corrupted blocks (blocks with specified
|
|
locations on datanodes).
|
|
@param blocks Array of located blocks to report]]>
|
|
</doc>
|
|
</method>
|
|
<method name="rename" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="dst" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Rename an item in the file system namespace.
|
|
|
|
@param src existing file or directory name.
|
|
@param dst new name.
|
|
@return true if successful, or false if the old name does not exist
|
|
or if the new name already belongs to the namespace.
|
|
@throws IOException if the new name is invalid.
|
|
@throws QuotaExceededException if the rename would violate
|
|
any quota restriction]]>
|
|
</doc>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Delete the given file or directory from the file system.
|
|
<p>
|
|
Any blocks belonging to the deleted files will be garbage-collected.
|
|
|
|
@param src existing name.
|
|
@return true only if the existing file or directory was actually removed
|
|
from the file system.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="recursive" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Delete the given file or directory from the file system.
|
|
<p>
|
|
same as delete but provides a way to avoid accidentally
|
|
deleting non empty directories programmatically.
|
|
@param src existing name
|
|
@param recursive if true deletes a non empty directory recursively,
|
|
else throws an exception.
|
|
@return true only if the existing file or directory was actually removed
|
|
from the file system.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="mkdirs" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a directory (or hierarchy of directories) with the given
|
|
name and permission.
|
|
|
|
@param src The path of the directory being created
|
|
@param masked The masked permission of the directory being created
|
|
@return True if the operation success.
|
|
@throws {@link AccessControlException} if permission to create file is
|
|
denied by the system. As usually on the client side the exception will
|
|
be wraped into {@link org.apache.hadoop.ipc.RemoteException}.
|
|
@throws QuotaExceededException if the operation would violate
|
|
any quota restriction.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get a listing of the indicated directory]]>
|
|
</doc>
|
|
</method>
|
|
<method name="renewLease"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Client programs can cause stateful changes in the NameNode
|
|
that affect other clients. A client may obtain a file and
|
|
neither abandon nor complete it. A client might hold a series
|
|
of locks that prevent other clients from proceeding.
|
|
Clearly, it would be bad if a client held a bunch of locks
|
|
that it never gave up. This can happen easily if the client
|
|
dies unexpectedly.
|
|
<p>
|
|
So, the NameNode will revoke the locks and live file-creates
|
|
for clients that it thinks have died. A client tells the
|
|
NameNode that it is still alive by periodically calling
|
|
renewLease(). If a certain amount of time passes since
|
|
the last call to renewLease(), the NameNode assumes the
|
|
client has died.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStats" return="long[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get a set of statistics about the filesystem.
|
|
Right now, only three values are returned.
|
|
<ul>
|
|
<li> [0] contains the total storage capacity of the system, in bytes.</li>
|
|
<li> [1] contains the total used space of the system, in bytes.</li>
|
|
<li> [2] contains the available storage of the system, in bytes.</li>
|
|
<li> [3] contains number of under replicated blocks in the system.</li>
|
|
<li> [4] contains number of blocks with a corrupt replica. </li>
|
|
<li> [5] contains number of blocks without any good replicas left. </li>
|
|
</ul>
|
|
Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
|
|
actual numbers to index into the array.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get a report on the system's current datanodes.
|
|
One DatanodeInfo object is returned for each DataNode.
|
|
Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
|
|
otherwise all datanodes if type is ALL.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getPreferredBlockSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="filename" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get the block size for the given file.
|
|
@param filename The name of the file
|
|
@return The number of bytes in each block
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setSafeMode" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Enter, leave or get safe mode.
|
|
<p>
|
|
Safe mode is a name node state when it
|
|
<ol><li>does not accept changes to name space (read-only), and</li>
|
|
<li>does not replicate or delete blocks.</li></ol>
|
|
|
|
<p>
|
|
Safe mode is entered automatically at name node startup.
|
|
Safe mode can also be entered manually using
|
|
{@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
|
|
<p>
|
|
At startup the name node accepts data node reports collecting
|
|
information about block locations.
|
|
In order to leave safe mode it needs to collect a configurable
|
|
percentage called threshold of blocks, which satisfy the minimal
|
|
replication condition.
|
|
The minimal replication condition is that each block must have at least
|
|
<tt>dfs.replication.min</tt> replicas.
|
|
When the threshold is reached the name node extends safe mode
|
|
for a configurable amount of time
|
|
to let the remaining data nodes to check in before it
|
|
will start replicating missing blocks.
|
|
Then the name node leaves safe mode.
|
|
<p>
|
|
If safe mode is turned on manually using
|
|
{@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
|
|
then the name node stays in safe mode until it is manually turned off
|
|
using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
|
|
Current state of the name node can be verified using
|
|
{@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
|
|
<h4>Configuration parameters:</h4>
|
|
<tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
|
|
<tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
|
|
<tt>dfs.replication.min</tt> is the minimal replication parameter.
|
|
|
|
<h4>Special cases:</h4>
|
|
The name node does not enter safe mode at startup if the threshold is
|
|
set to 0 or if the name space is empty.<br>
|
|
If the threshold is set to 1 then all blocks need to have at least
|
|
minimal replication.<br>
|
|
If the threshold value is greater than 1 then the name node will not be
|
|
able to turn off safe mode automatically.<br>
|
|
Safe mode can always be turned off manually.
|
|
|
|
@param action <ul> <li>0 leave safe mode;</li>
|
|
<li>1 enter safe mode;</li>
|
|
<li>2 get safe mode state.</li></ul>
|
|
@return <ul><li>0 if the safe mode is OFF or</li>
|
|
<li>1 if the safe mode is ON.</li></ul>
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="saveNamespace"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Save namespace image.
|
|
<p>
|
|
Saves current namespace into storage directories and reset edits log.
|
|
Requires superuser privilege and safe mode.
|
|
|
|
@throws AccessControlException if the superuser privilege is violated.
|
|
@throws IOException if image creation failed.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshNodes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Tells the namenode to reread the hosts and exclude files.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="finalizeUpgrade"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Finalize previous upgrade.
|
|
Remove file system state saved during the upgrade.
|
|
The upgrade will become irreversible.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
|
|
|
|
@param action {@link FSConstants.UpgradeAction} to perform
|
|
@return upgrade status information or null if no upgrades are in progress
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="metaSave"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="filename" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Dumps namenode data structures into specified file. If file
|
|
already exists, then append.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get the file info for a specific file or directory.
|
|
@param src The string representation of the path to the file
|
|
@throws IOException if permission to access file is denied by the system
|
|
@return object containing information regarding the file
|
|
or null if file not found]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get {@link ContentSummary} rooted at the specified directory.
|
|
@param path The string representation of the path]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setQuota"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<param name="namespaceQuota" type="long"/>
|
|
<param name="diskspaceQuota" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set the quota for a directory.
|
|
@param path The string representation of the path to the directory
|
|
@param namespaceQuota Limit on the number of names in the tree rooted
|
|
at the directory
|
|
@param diskspaceQuota Limit on disk space occupied all the files under
|
|
this directory.
|
|
<br><br>
|
|
|
|
The quota can have three types of values : (1) 0 or more will set
|
|
the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
|
|
the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
|
|
implies the quota will be reset. Any other value is a runtime error.
|
|
|
|
@throws FileNotFoundException if the path is a file or
|
|
does not exist
|
|
@throws QuotaExceededException if the directory size
|
|
is greater than the given quota]]>
|
|
</doc>
|
|
</method>
|
|
<method name="fsync"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="client" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Write all metadata for this file into persistent storage.
|
|
The file must be currently open for writing.
|
|
@param src The string representation of the path
|
|
@param client The string representation of the client]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTimes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="mtime" type="long"/>
|
|
<param name="atime" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Sets the modification and access time of the file to the specified time.
|
|
@param src The string representation of the path
|
|
@param mtime The number of milliseconds since Jan 1, 1970.
|
|
Setting mtime to -1 means that modification time should not be set
|
|
by this call.
|
|
@param atime The number of milliseconds since Jan 1, 1970.
|
|
Setting atime to -1 means that access time should not be set
|
|
by this call.]]>
|
|
</doc>
|
|
</method>
|
|
<field name="versionID" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Compared to the previous version the following changes have been introduced:
|
|
(Only the latest change is reflected.
|
|
The log of historical changes can be retrieved from the svn).
|
|
41: saveNamespace introduced.]]>
|
|
</doc>
|
|
</field>
|
|
<field name="GET_STATS_CAPACITY_IDX" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="GET_STATS_USED_IDX" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="GET_STATS_REMAINING_IDX" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[ClientProtocol is used by user code via
|
|
{@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
|
|
with the NameNode. User code can manipulate the directory namespace,
|
|
as well as open/close file streams, etc.]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
|
|
<class name="DatanodeID" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.WritableComparable"/>
|
|
<constructor name="DatanodeID"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Equivalent to DatanodeID("").]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeID" type="java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[DatanodeID copy constructor
|
|
|
|
@param from]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Create DatanodeID
|
|
@param nodeName (hostname:portNumber)
|
|
@param storageID data storage ID
|
|
@param infoPort info server port
|
|
@param ipcPort ipc server port]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getName" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[@return hostname:portNumber.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStorageID" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[@return data storage ID.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getInfoPort" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getIpcPort" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setStorageID"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="storageID" type="java.lang.String"/>
|
|
<doc>
|
|
<![CDATA[sets the data storage ID.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getHost" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[@return hostname and no :portNumber.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getPort" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="equals" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="to" type="java.lang.Object"/>
|
|
</method>
|
|
<method name="hashCode" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="updateRegInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
|
|
<doc>
|
|
<![CDATA[Update fields when a new registration request comes in.
|
|
Note that this does not update storageID.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="compareTo" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
|
|
<doc>
|
|
<![CDATA[Comparable.
|
|
Basis of compare is the String name (host:portNumber) only.
|
|
@param that
|
|
@return as specified by Comparable.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="name" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="storageID" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="infoPort" type="int"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="ipcPort" type="int"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[DatanodeID is composed of the data node
|
|
name (hostname:portNumber) and the data storage ID,
|
|
which it currently represents.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
|
|
<class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.net.Node"/>
|
|
<constructor name="DatanodeInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getCapacity" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The raw capacity.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDfsUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The used space by the data node.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getNonDfsUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The used space by the data node.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDfsUsedPercent" return="float"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The used space by the data node as percentage of present capacity]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRemaining" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The raw free space.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRemainingPercent" return="float"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The remaining space as percentage of configured capacity.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getLastUpdate" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The time when this information was accurate.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getXceiverCount" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[number of active connections]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setCapacity"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="capacity" type="long"/>
|
|
<doc>
|
|
<![CDATA[Sets raw capacity.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setRemaining"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="remaining" type="long"/>
|
|
<doc>
|
|
<![CDATA[Sets raw free space.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setLastUpdate"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="lastUpdate" type="long"/>
|
|
<doc>
|
|
<![CDATA[Sets time when this information was accurate.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setXceiverCount"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="xceiverCount" type="int"/>
|
|
<doc>
|
|
<![CDATA[Sets number of active connections]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getNetworkLocation" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[rack name]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setNetworkLocation"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="location" type="java.lang.String"/>
|
|
<doc>
|
|
<![CDATA[Sets the rack name]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getHostName" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setHostName"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="host" type="java.lang.String"/>
|
|
</method>
|
|
<method name="getDatanodeReport" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[A formatted string for reporting the status of the DataNode.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="dumpDatanode" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[A formatted string for printing the status of the DataNode.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="startDecommission"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Start decommissioning a node.
|
|
old state.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="stopDecommission"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Stop decommissioning a node.
|
|
old state.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isDecommissionInProgress" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns true if the node is in the process of being decommissioned]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isDecommissioned" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns true if the node has been decommissioned.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setDecommissioned"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Sets the admin state to indicate that decommision is complete.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setAdminState"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
|
|
<doc>
|
|
<![CDATA[Sets the admin state of this node.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getParent" return="org.apache.hadoop.net.Node"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return this node's parent]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setParent"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="parent" type="org.apache.hadoop.net.Node"/>
|
|
</method>
|
|
<method name="getLevel" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return this node's level in the tree.
|
|
E.g. the root of a tree returns 0 and its children return 1]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setLevel"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="level" type="int"/>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="capacity" type="long"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="dfsUsed" type="long"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="remaining" type="long"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="lastUpdate" type="long"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="xceiverCount" type="int"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="location" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="hostName" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[HostName as suplied by the datanode during registration as its
|
|
name. Namenode uses datanode IP address as the name.]]>
|
|
</doc>
|
|
</field>
|
|
<field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[DatanodeInfo represents the status of a DataNode.
|
|
This object is used for communication in the
|
|
Datanode Protocol and the Client Protocol.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
|
|
<class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
|
|
abstract="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
|
|
<!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
|
|
<interface name="DataTransferProtocol" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<field name="DATA_TRANSFER_VERSION" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Version for data transfers between clients and datanodes
|
|
This should change when serialization of DatanodeInfo, not just
|
|
when protocol changes. It is not very obvious.]]>
|
|
</doc>
|
|
</field>
|
|
<field name="OP_WRITE_BLOCK" type="byte"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_READ_BLOCK" type="byte"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_READ_METADATA" type="byte"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_REPLACE_BLOCK" type="byte"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_COPY_BLOCK" type="byte"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_BLOCK_CHECKSUM" type="byte"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_STATUS_SUCCESS" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_STATUS_ERROR" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_STATUS_ERROR_CHECKSUM" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_STATUS_ERROR_INVALID" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_STATUS_ERROR_EXISTS" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="OP_STATUS_CHECKSUM_OK" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[The Client transfers data to/from datanode using a streaming protocol.]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
|
|
<!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
|
|
<interface name="FSConstants" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<field name="MIN_BLOCKS_FOR_WRITE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="BLOCK_INVALIDATE_CHUNK" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="QUOTA_DONT_SET" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="QUOTA_RESET" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="HEARTBEAT_INTERVAL" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="BLOCKREPORT_INTERVAL" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="BLOCKREPORT_INITIAL_DELAY" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LEASE_SOFTLIMIT_PERIOD" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LEASE_HARDLIMIT_PERIOD" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LEASE_RECOVER_PERIOD" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="MAX_PATH_LENGTH" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="MAX_PATH_DEPTH" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="BUFFER_SIZE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="SMALL_BUFFER_SIZE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DEFAULT_BLOCK_SIZE" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="SIZE_OF_INTEGER" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LAYOUT_VERSION" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Some handy constants]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
|
|
<class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
|
|
abstract="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
|
|
<class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
|
|
abstract="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
|
|
<class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
|
|
abstract="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Distributed upgrade actions:
|
|
|
|
1. Get upgrade status.
|
|
2. Get detailed upgrade status.
|
|
3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
|
|
<class name="LocatedBlock" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="LocatedBlock"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getStartOffset" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getBlockSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="isCorrupt" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[A LocatedBlock is a pair of Block, DatanodeInfo[]
|
|
objects. It tells where to find a Block.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
|
|
<class name="LocatedBlocks" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="LocatedBlocks" type="long, java.util.List, boolean"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getLocatedBlocks" return="java.util.List"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get located blocks.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="get" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="index" type="int"/>
|
|
<doc>
|
|
<![CDATA[Get located block.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="locatedBlockCount" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get number of located blocks.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFileLength" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="isUnderConstruction" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return ture if file was under construction when
|
|
this LocatedBlocks was constructed, false otherwise.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="findBlock" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="offset" type="long"/>
|
|
<doc>
|
|
<![CDATA[Find block containing specified offset.
|
|
|
|
@return block if found, or null otherwise.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="insertRange"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blockIdx" type="int"/>
|
|
<param name="newBlocks" type="java.util.List"/>
|
|
</method>
|
|
<method name="getInsertIndex" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="binSearchResult" type="int"/>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Collection of blocks with their locations and the file length.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
|
|
<class name="QuotaExceededException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="QuotaExceededException" type="java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="QuotaExceededException" type="long, long, long, long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="setPathName"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
</method>
|
|
<method name="getMessage" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This exception is thrown when modification to HDFS results in violation
|
|
of a directory quota. A directory quota might be namespace quota (limit
|
|
on number of files and directories) or a diskspace quota (limit on space
|
|
taken by all the file under the directory tree). <br> <br>
|
|
|
|
The message for the exception specifies the directory where the quota
|
|
was violated and actual quotas.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
|
|
<!-- start class org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException -->
|
|
<class name="UnregisteredDatanodeException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="UnregisteredDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="UnregisteredDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID, org.apache.hadoop.hdfs.protocol.DatanodeInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[This exception is thrown when a datanode that has not previously
|
|
registered is trying to access the name node.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.server.balancer">
|
|
<!-- start class org.apache.hadoop.hdfs.server.balancer.Balancer -->
|
|
<class name="Balancer" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.util.Tool"/>
|
|
<method name="main"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
<doc>
|
|
<![CDATA[Run a balancer
|
|
@param args]]>
|
|
</doc>
|
|
</method>
|
|
<method name="run" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[main method of Balancer
|
|
@param args arguments to a Balancer
|
|
@exception any exception occurs during datanode balancing]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getConf" return="org.apache.hadoop.conf.Configuration"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[return this balancer's configuration]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setConf"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<doc>
|
|
<![CDATA[set this balancer's configuration]]>
|
|
</doc>
|
|
</method>
|
|
<field name="MAX_NUM_CONCURRENT_MOVES" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The maximum number of concurrent blocks moves for
|
|
balancing purpose at a datanode]]>
|
|
</doc>
|
|
</field>
|
|
<field name="SUCCESS" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="ALREADY_RUNNING" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="NO_MOVE_BLOCK" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="NO_MOVE_PROGRESS" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="IO_EXCEPTION" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="ILLEGAL_ARGS" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
|
|
when some datanodes become full or when new empty nodes join the cluster.
|
|
The tool is deployed as an application program that can be run by the
|
|
cluster administrator on a live HDFS cluster while applications
|
|
adding and deleting files.
|
|
|
|
<p>SYNOPSIS
|
|
<pre>
|
|
To start:
|
|
bin/start-balancer.sh [-threshold <threshold>]
|
|
Example: bin/ start-balancer.sh
|
|
start the balancer with a default threshold of 10%
|
|
bin/ start-balancer.sh -threshold 5
|
|
start the balancer with a threshold of 5%
|
|
To stop:
|
|
bin/ stop-balancer.sh
|
|
</pre>
|
|
|
|
<p>DESCRIPTION
|
|
<p>The threshold parameter is a fraction in the range of (0%, 100%) with a
|
|
default value of 10%. The threshold sets a target for whether the cluster
|
|
is balanced. A cluster is balanced if for each datanode, the utilization
|
|
of the node (ratio of used space at the node to total capacity of the node)
|
|
differs from the utilization of the (ratio of used space in the cluster
|
|
to total capacity of the cluster) by no more than the threshold value.
|
|
The smaller the threshold, the more balanced a cluster will become.
|
|
It takes more time to run the balancer for small threshold values.
|
|
Also for a very small threshold the cluster may not be able to reach the
|
|
balanced state when applications write and delete files concurrently.
|
|
|
|
<p>The tool moves blocks from highly utilized datanodes to poorly
|
|
utilized datanodes iteratively. In each iteration a datanode moves or
|
|
receives no more than the lesser of 10G bytes or the threshold fraction
|
|
of its capacity. Each iteration runs no more than 20 minutes.
|
|
At the end of each iteration, the balancer obtains updated datanodes
|
|
information from the namenode.
|
|
|
|
<p>A system property that limits the balancer's use of bandwidth is
|
|
defined in the default configuration file:
|
|
<pre>
|
|
<property>
|
|
<name>dfs.balance.bandwidthPerSec</name>
|
|
<value>1048576</value>
|
|
<description> Specifies the maximum bandwidth that each datanode
|
|
can utilize for the balancing purpose in term of the number of bytes
|
|
per second. </description>
|
|
</property>
|
|
</pre>
|
|
|
|
<p>This property determines the maximum speed at which a block will be
|
|
moved from one datanode to another. The default value is 1MB/s. The higher
|
|
the bandwidth, the faster a cluster can reach the balanced state,
|
|
but with greater competition with application processes. If an
|
|
administrator changes the value of this property in the configuration
|
|
file, the change is observed when HDFS is next restarted.
|
|
|
|
<p>MONITERING BALANCER PROGRESS
|
|
<p>After the balancer is started, an output file name where the balancer
|
|
progress will be recorded is printed on the screen. The administrator
|
|
can monitor the running of the balancer by reading the output file.
|
|
The output shows the balancer's status iteration by iteration. In each
|
|
iteration it prints the starting time, the iteration number, the total
|
|
number of bytes that have been moved in the previous iterations,
|
|
the total number of bytes that are left to move in order for the cluster
|
|
to be balanced, and the number of bytes that are being moved in this
|
|
iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
|
|
To Move" is decreasing.
|
|
|
|
<p>Running multiple instances of the balancer in an HDFS cluster is
|
|
prohibited by the tool.
|
|
|
|
<p>The balancer automatically exits when any of the following five
|
|
conditions is satisfied:
|
|
<ol>
|
|
<li>The cluster is balanced;
|
|
<li>No block can be moved;
|
|
<li>No block has been moved for five consecutive iterations;
|
|
<li>An IOException occurs while communicating with the namenode;
|
|
<li>Another balancer is running.
|
|
</ol>
|
|
|
|
<p>Upon exit, a balancer returns an exit code and prints one of the
|
|
following messages to the output file in corresponding to the above exit
|
|
reasons:
|
|
<ol>
|
|
<li>The cluster is balanced. Exiting
|
|
<li>No block can be moved. Exiting...
|
|
<li>No block has been moved for 3 iterations. Exiting...
|
|
<li>Received an IO exception: failure reason. Exiting...
|
|
<li>Another balancer is running. Exiting...
|
|
</ol>
|
|
|
|
<p>The administrator can interrupt the execution of the balancer at any
|
|
time by running the command "stop-balancer.sh" on the machine where the
|
|
balancer is running.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.balancer.Balancer -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.server.common">
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
|
|
<class name="GenerationStamp" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.WritableComparable"/>
|
|
<constructor name="GenerationStamp"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Create a new instance, initialized to FIRST_VALID_STAMP.]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getStamp" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns the current generation stamp]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setStamp"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="stamp" type="long"/>
|
|
<doc>
|
|
<![CDATA[Sets the current generation stamp]]>
|
|
</doc>
|
|
</method>
|
|
<method name="nextStamp" return="long"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[First increments the counter and then returns the stamp]]>
|
|
</doc>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="compare" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="x" type="long"/>
|
|
<param name="y" type="long"/>
|
|
</method>
|
|
<method name="compareTo" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="that" type="org.apache.hadoop.hdfs.server.common.GenerationStamp"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="equals" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="o" type="java.lang.Object"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="equalsWithWildcard" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="x" type="long"/>
|
|
<param name="y" type="long"/>
|
|
</method>
|
|
<method name="hashCode" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="WILDCARD_STAMP" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="FIRST_VALID_STAMP" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[A GenerationStamp is a Hadoop FS primitive, identified by a long.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
|
|
<interface name="HdfsConstants" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<field name="READ_TIMEOUT" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="WRITE_TIMEOUT" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="WRITE_TIMEOUT_EXTENSION" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Some handy internal HDFS constants]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
|
|
<class name="HdfsConstants.NodeType" extends="java.lang.Enum"
|
|
abstract="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<field name="NAME_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DATA_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Type of the node]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
|
|
<class name="HdfsConstants.StartupOption" extends="java.lang.Enum"
|
|
abstract="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<method name="getName" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<field name="FORMAT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="REGULAR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="UPGRADE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="ROLLBACK" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="FINALIZE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="IMPORT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
|
|
<class name="InconsistentFSStateException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String, java.lang.Throwable"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[The exception is thrown when file system state is inconsistent
|
|
and is not recoverable.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
|
|
<class name="IncorrectVersionException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="IncorrectVersionException" type="int, java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="IncorrectVersionException" type="int, java.lang.String, int"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[The exception is thrown when external version does not match
|
|
current version of the appication.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.Storage -->
|
|
<class name="Storage" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
|
|
abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Create empty storage info of the specified type]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, int, long"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, org.apache.hadoop.hdfs.server.common.StorageInfo"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="dirIterator" return="java.util.Iterator"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return default iterator
|
|
This iterator returns all entires of storageDirs]]>
|
|
</doc>
|
|
</method>
|
|
<method name="dirIterator" return="java.util.Iterator"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="dirType" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
|
|
<doc>
|
|
<![CDATA[Return iterator based on Storage Directory Type
|
|
This iterator selects entires of storageDirs of type dirType and returns
|
|
them via the Iterator]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getNumStorageDirs" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getStorageDir" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="idx" type="int"/>
|
|
</method>
|
|
<method name="addStorageDir"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
</method>
|
|
<method name="isConversionNeeded" return="boolean"
|
|
abstract="true" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="checkVersionUpgradable"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="oldVersion" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Checks if the upgrade from the given old version is supported. If
|
|
no upgrade is supported, it throws IncorrectVersionException.
|
|
|
|
@param oldVersion]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="props" type="java.util.Properties"/>
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get common storage fields.
|
|
Should be overloaded if additional fields need to be get.
|
|
|
|
@param props
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="props" type="java.util.Properties"/>
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set common storage fields.
|
|
Should be overloaded if additional fields need to be set.
|
|
|
|
@param props
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="rename"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="from" type="java.io.File"/>
|
|
<param name="to" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="deleteDir"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="dir" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="writeAll"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Write all data storage files.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="unlockAll"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Unlock all storage directories.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isLockSupported" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="idx" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Check whether underlying file system supports file locking.
|
|
|
|
@return <code>true</code> if exclusive locks are supported or
|
|
<code>false</code> otherwise.
|
|
@throws IOException
|
|
@see StorageDirectory#lock()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBuildVersion" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getRegistrationID" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
|
|
</method>
|
|
<method name="corruptPreUpgradeStorage"
|
|
abstract="true" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="rootDir" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="writeCorruptedData"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="file" type="java.io.RandomAccessFile"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LAST_PRE_UPGRADE_LAYOUT_VERSION" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LAST_UPGRADABLE_LAYOUT_VERSION" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LAST_UPGRADABLE_HADOOP_VERSION" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="PRE_GENERATIONSTAMP_LAYOUT_VERSION" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="STORAGE_FILE_VERSION" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="STORAGE_DIR_CURRENT" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="storageDirs" type="java.util.List"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Storage information file.
|
|
<p>
|
|
Local storage information is stored in a separate file VERSION.
|
|
It contains type of the node,
|
|
the storage layout version, the namespace id, and
|
|
the fs state creation time.
|
|
<p>
|
|
Local storage can reside in multiple directories.
|
|
Each directory should contain the same VERSION file as the others.
|
|
During startup Hadoop servers (name-node and data-nodes) read their local
|
|
storage information from them.
|
|
<p>
|
|
The servers hold a lock for each storage directory while they run so that
|
|
other nodes were not able to startup sharing the same storage.
|
|
The locks are released when the servers stop (normally or abnormally).]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.Storage -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
|
|
<class name="Storage.StorageDirectory" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="Storage.StorageDirectory" type="java.io.File"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="Storage.StorageDirectory" type="java.io.File, org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getRoot" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get root directory of this storage]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get storage directory type]]>
|
|
</doc>
|
|
</method>
|
|
<method name="read"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Read version file.
|
|
|
|
@throws IOException if file cannot be read or contains inconsistent data]]>
|
|
</doc>
|
|
</method>
|
|
<method name="read"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="from" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Write version file.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="to" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="clearDirectory"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Clear and re-create storage directory.
|
|
<p>
|
|
Removes contents of the current directory and creates an empty directory.
|
|
|
|
This does not fully format storage directory.
|
|
It cannot write the version file since it should be written last after
|
|
all other storage type dependent files are written.
|
|
Derived storage is responsible for setting specific storage values and
|
|
writing the version file to disk.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCurrentDir" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getVersionFile" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getPreviousVersionFile" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getPreviousDir" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getPreviousTmp" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getRemovedTmp" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getFinalizedTmp" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getLastCheckpointTmp" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getPreviousCheckpoint" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="analyzeStorage" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="startOpt" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Check consistency of the storage directory
|
|
|
|
@param startOpt a startup option.
|
|
|
|
@return state {@link StorageState} of the storage directory
|
|
@throws {@link InconsistentFSStateException} if directory state is not
|
|
consistent and cannot be recovered]]>
|
|
</doc>
|
|
</method>
|
|
<method name="doRecover"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="curState" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Complete or recover storage state from previously failed transition.
|
|
|
|
@param curState specifies what/how the state should be recovered
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="lock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Lock storage to provide exclusive access.
|
|
|
|
<p> Locking is not supported by all file systems.
|
|
E.g., NFS does not consistently support exclusive locks.
|
|
|
|
<p> If locking is supported we guarantee exculsive access to the
|
|
storage directory. Otherwise, no guarantee is given.
|
|
|
|
@throws IOException if locking fails]]>
|
|
</doc>
|
|
</method>
|
|
<method name="unlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Unlock storage.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[One of the storage directories.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
|
|
<interface name="Storage.StorageDirType" abstract="true"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="isOfType" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[An interface to denote storage directory type
|
|
Implementations can define a type for storage directory by implementing
|
|
this interface.]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
|
|
<class name="Storage.StorageState" extends="java.lang.Enum"
|
|
abstract="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<field name="NON_EXISTENT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="NOT_FORMATTED" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="COMPLETE_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="RECOVER_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="COMPLETE_FINALIZE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="COMPLETE_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="RECOVER_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="COMPLETE_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="RECOVER_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="NORMAL" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.StorageInfo -->
|
|
<class name="StorageInfo" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="StorageInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="StorageInfo" type="int, int, long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="StorageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getLayoutVersion" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getNamespaceID" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getCTime" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setStorageInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="from" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
|
|
</method>
|
|
<field name="layoutVersion" type="int"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="namespaceID" type="int"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="cTime" type="long"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Common class for storage information.
|
|
|
|
TODO namespaceID should be long and computed as hash(address + port)]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.StorageInfo -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
|
|
<interface name="Upgradeable" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="java.lang.Comparable"/>
|
|
<method name="getVersion" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get the layout version of the upgrade object.
|
|
@return layout version]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get the type of the software component, which this object is upgrading.
|
|
@return type]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDescription" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Description of the upgrade object for displaying.
|
|
@return description]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUpgradeStatus" return="short"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Upgrade status determines a percentage of the work done out of the total
|
|
amount required by the upgrade.
|
|
|
|
100% means that the upgrade is completed.
|
|
Any value < 100 means it is not complete.
|
|
|
|
The return value should provide at least 2 values, e.g. 0 and 100.
|
|
@return integer value in the range [0, 100].]]>
|
|
</doc>
|
|
</method>
|
|
<method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Prepare for the upgrade.
|
|
E.g. initialize upgrade data structures and set status to 0.
|
|
|
|
Returns an upgrade command that is used for broadcasting to other cluster
|
|
components.
|
|
E.g. name-node informs data-nodes that they must perform a distributed upgrade.
|
|
|
|
@return an UpgradeCommand for broadcasting.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Complete upgrade.
|
|
E.g. cleanup upgrade data structures or write metadata to disk.
|
|
|
|
Returns an upgrade command that is used for broadcasting to other cluster
|
|
components.
|
|
E.g. data-nodes inform the name-node that they completed the upgrade
|
|
while other data-nodes are still upgrading.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="details" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get status report for the upgrade.
|
|
|
|
@param details true if upgradeStatus details need to be included,
|
|
false otherwise
|
|
@return {@link UpgradeStatusReport}
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Common interface for distributed upgrade objects.
|
|
|
|
Each upgrade object corresponds to a layout version,
|
|
which is the latest version that should be upgraded using this object.
|
|
That is all components whose layout version is greater or equal to the
|
|
one returned by {@link #getVersion()} must be upgraded with this object.]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
|
|
<class name="UpgradeManager" extends="java.lang.Object"
|
|
abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="UpgradeManager"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getBroadcastCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getUpgradeState" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getUpgradeVersion" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setUpgradeState"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="uState" type="boolean"/>
|
|
<param name="uVersion" type="int"/>
|
|
</method>
|
|
<method name="getDistributedUpgrades" return="java.util.SortedSet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getUpgradeStatus" return="short"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="initializeUpgrade" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="isUpgradeCompleted" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
abstract="true" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="startUpgrade" return="boolean"
|
|
abstract="true" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="completeUpgrade"
|
|
abstract="true" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="currentUpgrades" type="java.util.SortedSet"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="upgradeState" type="boolean"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="upgradeVersion" type="int"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="broadcastCommand" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Generic upgrade manager.
|
|
|
|
{@link #broadcastCommand} is the command that should be]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
|
|
<class name="UpgradeObject" extends="java.lang.Object"
|
|
abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
|
|
<constructor name="UpgradeObject"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getUpgradeStatus" return="short"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getDescription" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="details" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="compareTo" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="o" type="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
|
|
</method>
|
|
<method name="equals" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="o" type="java.lang.Object"/>
|
|
</method>
|
|
<method name="hashCode" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<field name="status" type="short"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Abstract upgrade object.
|
|
|
|
Contains default implementation of common methods of {@link Upgradeable}
|
|
interface.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
|
|
<class name="UpgradeObjectCollection" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="UpgradeObjectCollection"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getDistributedUpgrades" return="java.util.SortedSet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="versionFrom" type="int"/>
|
|
<param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Collection of upgrade objects.
|
|
|
|
Upgrade objects should be registered here before they can be used.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
|
|
<class name="UpgradeStatusReport" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="UpgradeStatusReport"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="UpgradeStatusReport" type="int, short, boolean"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getVersion" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get the layout version of the currently running upgrade.
|
|
@return layout version]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUpgradeStatus" return="short"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
|
|
|
|
@see Upgradeable#getUpgradeStatus()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isFinalized" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Is current upgrade finalized.
|
|
@return true if finalized or false otherwise.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStatusText" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="details" type="boolean"/>
|
|
<doc>
|
|
<![CDATA[Get upgradeStatus data as a text for reporting.
|
|
Should be overloaded for a particular upgrade specific upgradeStatus data.
|
|
|
|
@param details true if upgradeStatus details need to be included,
|
|
false otherwise
|
|
@return text]]>
|
|
</doc>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Print basic upgradeStatus details.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="version" type="int"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="upgradeStatus" type="short"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="finalized" type="boolean"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Base upgrade upgradeStatus class.
|
|
Overload this class if specific status fields need to be reported.
|
|
|
|
Describes status of current upgrade.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.common.Util -->
|
|
<class name="Util" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="Util"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="now" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Current system time.
|
|
@return current time in msec.]]>
|
|
</doc>
|
|
</method>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.common.Util -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.server.datanode">
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.DataNode -->
|
|
<class name="DataNode" extends="org.apache.hadoop.conf.Configured"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"/>
|
|
<implements name="org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"/>
|
|
<implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
|
|
<implements name="java.lang.Runnable"/>
|
|
<method name="createSocketAddr" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="target" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="newSocket" return="java.net.Socket"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Creates either NIO or regular depending on socketWriteTimeout.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the DataNode object]]>
|
|
</doc>
|
|
</method>
|
|
<method name="createInterDataNodeProtocolProxy" return="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="datanodeid" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getNameNodeAddr" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getSelfAddr" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getNamenode" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the namenode's identifier]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setNewStorageID"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="dnReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
</method>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Shut down this instance of the datanode.
|
|
Returns only after shutdown is complete.
|
|
This method can only be called by the offerService thread.
|
|
Otherwise, deadlock might occur.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="checkDiskError"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="e" type="java.io.IOException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="checkDiskError"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="offerService"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[Main loop for the DataNode. Runs until shutdown,
|
|
forever calling remote NameNode functions.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="notifyNamenodeReceivedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="delHint" type="java.lang.String"/>
|
|
</method>
|
|
<method name="run"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[No matter what kind of exception we get, keep retrying to offerService().
|
|
That's the loop that connects to the NameNode and provides basic DataNode
|
|
functionality.
|
|
|
|
Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
|
|
</doc>
|
|
</method>
|
|
<method name="runDatanodeDaemon"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="dn" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Start a single datanode daemon and wait for it to finish.
|
|
If this thread is specifically interrupted, it will stop waiting.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Instantiate a single datanode object. This must be run by invoking
|
|
{@link DataNode#runDatanodeDaemon(DataNode)} subsequently.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
|
|
If this thread is specifically interrupted, it will stop waiting.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="makeInstance" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="dataDirs" type="java.lang.String[]"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Make an instance of DataNode after ensuring that at least one of the
|
|
given data directories (and their parent directories, if necessary)
|
|
can be created.
|
|
@param dataDirs List of directories, where the new DataNode instance should
|
|
keep its files.
|
|
@param conf Configuration instance to use.
|
|
@return DataNode instance for given list of data dirs and conf, or null if
|
|
no directory from this directory list can be created.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="scheduleBlockReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="delay" type="long"/>
|
|
<doc>
|
|
<![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFSDataset" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[This method is used for testing.
|
|
Examples are adding and deleting blocks directly.
|
|
The most common usage will be when the data node's storage is similated.
|
|
|
|
@return the fsdataset that stores the blocks]]>
|
|
</doc>
|
|
</method>
|
|
<method name="main"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
</method>
|
|
<method name="getBlockMetaDataInfo" return="org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="recoverBlocks" return="org.apache.hadoop.util.Daemon"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
|
|
<param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"/>
|
|
</method>
|
|
<method name="updateBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="finalize" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getProtocolVersion" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="protocol" type="java.lang.String"/>
|
|
<param name="clientVersion" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="recoverBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="keepLength" type="boolean"/>
|
|
<param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DN_CLIENTTRACE_FORMAT" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="namenode" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="data" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="dnRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="EMPTY_DEL_HINT" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blockScanner" type="org.apache.hadoop.hdfs.server.datanode.DataBlockScanner"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blockScannerThread" type="org.apache.hadoop.util.Daemon"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="ipcServer" type="org.apache.hadoop.ipc.Server"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="PKT_HEADER_LEN" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Header size for a packet]]>
|
|
</doc>
|
|
</field>
|
|
<doc>
|
|
<![CDATA[DataNode is a class (and program) that stores a set of
|
|
blocks for a DFS deployment. A single deployment can
|
|
have one or many DataNodes. Each DataNode communicates
|
|
regularly with a single NameNode. It also communicates
|
|
with client code and other DataNodes from time to time.
|
|
|
|
DataNodes store a series of named blocks. The DataNode
|
|
allows client code to read these blocks, or to write new
|
|
block data. The DataNode may also, in response to instructions
|
|
from its NameNode, delete blocks or copy blocks to/from other
|
|
DataNodes.
|
|
|
|
The DataNode maintains just one critical table:
|
|
block-> stream of bytes (of BLOCK_SIZE or less)
|
|
|
|
This info is stored on a local disk. The DataNode
|
|
reports the table's contents to the NameNode upon startup
|
|
and every so often afterwards.
|
|
|
|
DataNodes spend their lives in an endless loop of asking
|
|
the NameNode for something to do. A NameNode cannot connect
|
|
to a DataNode directly; a NameNode simply returns values from
|
|
functions invoked by a DataNode.
|
|
|
|
DataNodes maintain an open server socket so that client code
|
|
or other DataNodes can read/write data. The host/port for
|
|
this server is reported to the NameNode, which then sends that
|
|
information to clients or other DataNodes that might be interested.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.DataNode -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
|
|
<class name="DataStorage" extends="org.apache.hadoop.hdfs.server.common.Storage"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DataStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo, java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getStorageID" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="setFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="props" type="java.util.Properties"/>
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="props" type="java.util.Properties"/>
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="isConversionNeeded" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="corruptPreUpgradeStorage"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="rootDir" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Data storage information file.
|
|
<p>
|
|
@see Storage]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
|
|
<class name="FSDataset" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
|
|
<implements name="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"/>
|
|
<constructor name="FSDataset" type="org.apache.hadoop.hdfs.server.datanode.DataStorage, org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[An FSDataset has a directory where it loads its data files.]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getMetaFile" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="findBlockFile" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blockId" type="long"/>
|
|
<doc>
|
|
<![CDATA[Return the block file for the given ID]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blkid" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="metaFileExists" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getMetaDataLength" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getDfsUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return the total space used by dfs datanode]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacity" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return total capacity, used and unused]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRemaining" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Return how many bytes can still be stored in the FSDataset]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getLength" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Find the block's on-disk length]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockFile" return="java.io.File"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get File name for a given block.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockInputStream" return="java.io.InputStream"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getBlockInputStream" return="java.io.InputStream"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="seekOffset" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="blkOffset" type="long"/>
|
|
<param name="ckoff" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns handles to the block file and its metadata file]]>
|
|
</doc>
|
|
</method>
|
|
<method name="detachBlock" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="numLinks" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Make a copy of the block if this block is linked to an existing
|
|
snapshot. This ensures that modifying this block does not modify
|
|
data in any existing snapshots.
|
|
@param block Block
|
|
@param numLinks Detach if the number of links exceed this value
|
|
@throws IOException
|
|
@return - true if the specified block was detached]]>
|
|
</doc>
|
|
</method>
|
|
<method name="updateBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="writeToBlock" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="isRecovery" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Start writing to a block file
|
|
If isRecovery is true and the block pre-exists, then we kill all
|
|
volumeMap.put(b, v);
|
|
volumeMap.put(b, v);
|
|
other threads that might be writing to this block, and then reopen the file.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getChannelPosition" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Retrieves the offset in the block to which the
|
|
the next write will write data to.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setChannelPosition"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
|
|
<param name="dataOffset" type="long"/>
|
|
<param name="ckOffset" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Sets the offset in the block to which the
|
|
the next write will write data to.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="finalizeBlock"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Complete the block write!]]>
|
|
</doc>
|
|
</method>
|
|
<method name="unfinalizeBlock"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Remove the temporary block file (if any)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.Block[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return a table of block data]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isValidBlock" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<doc>
|
|
<![CDATA[Check whether the given block is a valid one.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="validateBlockMetadata"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="invalidate"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[We're informed that a block is no longer valid. We
|
|
could lazily garbage-collect the block, but why bother?
|
|
just get rid of it.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFile" return="java.io.File"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<doc>
|
|
<![CDATA[Turn the block identifier into a filename.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="checkDataDir"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
|
|
<doc>
|
|
<![CDATA[check if a data directory is healthy
|
|
@throws DiskErrorException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getStorageInfo" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<field name="METADATA_EXTENSION" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="METADATA_VERSION" type="short"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[FSDataset manages a set of data blocks. Each block
|
|
has a unique name and an extent on disk.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
|
|
<interface name="FSDatasetInterface" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean"/>
|
|
<method name="getMetaDataLength" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the length of the metadata file of the specified block
|
|
@param b - the block for which the metadata length is desired
|
|
@return the length of the metadata file for the specified block.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns metaData of block b as an input stream (and its length)
|
|
@param b - the block
|
|
@return the metadata input stream;
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="metaFileExists" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Does the meta file exist for this block?
|
|
@param b - the block
|
|
@return true of the metafile for specified block exits
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getLength" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the specified block's on-disk length (excluding metadata)
|
|
@param b
|
|
@return the specified block's on-disk length (excluding metadta)
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blkid" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@return the generation stamp stored with the block.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockInputStream" return="java.io.InputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns an input stream to read the contents of the specified block
|
|
@param b
|
|
@return an input stream to read the contents of the specified block
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockInputStream" return="java.io.InputStream"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="seekOffset" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns an input stream at specified offset of the specified block
|
|
@param b
|
|
@param seekOffset
|
|
@return an input stream to read the contents of the specified block,
|
|
starting at the offset
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="blkoff" type="long"/>
|
|
<param name="ckoff" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns an input stream at specified offset of the specified block
|
|
The block is still in the tmp directory and is not finalized
|
|
@param b
|
|
@param blkoff
|
|
@param ckoff
|
|
@return an input stream to read the contents of the specified block,
|
|
starting at the offset
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="writeToBlock" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="isRecovery" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Creates the block and returns output streams to write data and CRC
|
|
@param b
|
|
@param isRecovery True if this is part of erro recovery, otherwise false
|
|
@return a BlockWriteStreams object to allow writing the block data
|
|
and CRC
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="updateBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Update the block to the new generation stamp and length.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="finalizeBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Finalizes the block previously opened for writing using writeToBlock.
|
|
The block size is what is in the parameter b and it must match the amount
|
|
of data written
|
|
@param b
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="unfinalizeBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
|
|
The temporary file associated with this block is deleted.
|
|
@param b
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.Block[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns the block report - the full list of blocks stored
|
|
@return - the block report - the full list of blocks stored]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isValidBlock" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<doc>
|
|
<![CDATA[Is the block valid?
|
|
@param b
|
|
@return - true if the specified block is valid]]>
|
|
</doc>
|
|
</method>
|
|
<method name="invalidate"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Invalidates the specified blocks
|
|
@param invalidBlks - the blocks to be invalidated
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="checkDataDir"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
|
|
<doc>
|
|
<![CDATA[Check if all the data directories are healthy
|
|
@throws DiskErrorException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Stringifies the name of the storage]]>
|
|
</doc>
|
|
</method>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Shutdown the FSDataset]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getChannelPosition" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the current offset in the data stream.
|
|
@param b
|
|
@param stream The stream to the data file and checksum file
|
|
@return the position of the file pointer in the data stream
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setChannelPosition"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
|
|
<param name="dataOffset" type="long"/>
|
|
<param name="ckOffset" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Sets the file pointer of the data stream and checksum stream to
|
|
the specified values.
|
|
@param b
|
|
@param stream The stream for the data file and checksum file
|
|
@param dataOffset The position to which the file pointre for the data stream
|
|
should be set
|
|
@param ckOffset The position to which the file pointre for the checksum stream
|
|
should be set
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="validateBlockMetadata"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Validate that the contents in the Block matches
|
|
the file on disk. Returns true if everything is fine.
|
|
@param b The block to be verified.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This is an interface for the underlying storage that stores blocks for
|
|
a data node.
|
|
Examples are the FSDataset (which stores blocks on dirs) and
|
|
SimulatedFSDataset (which simulates data).]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
|
|
<class name="FSDatasetInterface.BlockInputStreams" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="java.io.Closeable"/>
|
|
<method name="close"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This class contains the input streams for the data and checksum
|
|
of a block]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
|
|
<class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[This class contains the output streams for the data and checksum
|
|
of a block]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
|
|
<class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="getLength" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This class provides the input stream and length of the metadata
|
|
of a block]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
|
|
<class name="UpgradeObjectDatanode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
|
|
abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="java.lang.Runnable"/>
|
|
<constructor name="UpgradeObjectDatanode"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getDatanode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="doUpgrade"
|
|
abstract="true" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Specifies how the upgrade is performed.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="run"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Complete upgrade and return a status complete command for broadcasting.
|
|
|
|
Data-nodes finish upgrade at different times.
|
|
The data-node needs to re-confirm with the name-node that the upgrade
|
|
is complete while other nodes are still upgrading.]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Base class for data-node upgrade objects.
|
|
Data-node upgrades are run in separate threads.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
|
|
<class name="DataNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DataNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This is the JMX MBean for reporting the DataNode Activity.
|
|
The MBean is register using the name
|
|
"hadoop:service=DataNode,name=DataNodeActivity-<storageid>"
|
|
|
|
Many of the activity metrics are sampled and averaged on an interval
|
|
which can be specified in the metrics config file.
|
|
<p>
|
|
For the metrics that are sampled and averaged, one must specify
|
|
a metrics context that does periodic update calls. Most metrics contexts do.
|
|
The default Null metrics context however does NOT. So if you aren't
|
|
using any other metrics context then you can turn on the viewing and averaging
|
|
of sampled metrics by specifying the following two lines
|
|
in the hadoop-meterics.properties file:
|
|
<pre>
|
|
dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
|
dfs.period=10
|
|
</pre>
|
|
<p>
|
|
Note that the metrics are collected regardless of the context used.
|
|
The context with the update thread is used to average the data periodically
|
|
|
|
|
|
|
|
Impl details: We use a dynamic mbean that gets the list of the metrics
|
|
from the metrics registry passed as an argument to the constructor]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
|
|
<class name="DataNodeMetrics" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.metrics.Updater"/>
|
|
<constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="doUpdates"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
|
|
<doc>
|
|
<![CDATA[Since this object is a registered updater, this method will be called
|
|
periodically, e.g. every 5 seconds.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="resetAllMinMax"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="readMetadataOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blockChecksumOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[This class is for maintaining the various DataNode statistics
|
|
and publishing them through the metrics interfaces.
|
|
This also registers the JMX MBean for RPC.
|
|
<p>
|
|
This class has a number of metrics variables that are publicly accessible;
|
|
these variables (objects) have methods to update their values;
|
|
for example:
|
|
<p> {@link #blocksRead}.inc()]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
|
|
<interface name="FSDatasetMBean" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="getDfsUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the total space (in bytes) used by dfs datanode
|
|
@return the total space used by dfs datanode
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacity" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns total capacity (in bytes) of storage (used and unused)
|
|
@return total capacity of storage (used and unused)
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRemaining" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the amount of free storage space (in bytes)
|
|
@return The amount of free storage space
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStorageInfo" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns the storage id of the underlying storage]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This Interface defines the methods to get the status of a the FSDataset of
|
|
a data node.
|
|
It is also used for publishing via JMX (hence we follow the JMX naming
|
|
convention.)
|
|
* Note we have not used the MetricsDynamicMBeanBase to implement this
|
|
because the interface for the FSDatasetMBean is stable and should
|
|
be published as an interface.
|
|
|
|
<p>
|
|
Data Node runtime statistic info is report in another MBean
|
|
@see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.server.namenode">
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
|
|
<class name="CheckpointSignature" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.WritableComparable"/>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="compareTo" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="o" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
|
|
</method>
|
|
<method name="equals" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="o" type="java.lang.Object"/>
|
|
</method>
|
|
<method name="hashCode" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[A unique signature intended to identify checkpoint transactions.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
|
|
<class name="CorruptReplicasMap" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="CorruptReplicasMap"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="addToCorruptReplicasMap"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="dn" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
|
|
<doc>
|
|
<![CDATA[Mark the block belonging to datanode as corrupt.
|
|
|
|
@param blk Block to be added to CorruptReplicasMap
|
|
@param dn DatanodeDescriptor which holds the corrupt replica]]>
|
|
</doc>
|
|
</method>
|
|
<method name="numCorruptReplicas" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
</method>
|
|
<method name="size" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Stores information about all corrupt blocks in the File System.
|
|
A Block is considered corrupt only if all of its replicas are
|
|
corrupt. While reporting replicas of a Block, we hide any corrupt
|
|
copies. These copies are removed once Block is found to have
|
|
expected number of good replicas.
|
|
Mapping: Block -> TreeSet<DatanodeDescriptor>]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
|
|
<class name="DatanodeDescriptor" extends="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DatanodeDescriptor"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Default constructor]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[DatanodeDescriptor constructor
|
|
@param nodeID id of the data node]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[DatanodeDescriptor constructor
|
|
|
|
@param nodeID id of the data node
|
|
@param networkLocation location of the data node in network]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[DatanodeDescriptor constructor
|
|
|
|
@param nodeID id of the data node
|
|
@param networkLocation location of the data node in network
|
|
@param hostName it could be different from host specified for DatanodeID]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, long, long, long, int"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[DatanodeDescriptor constructor
|
|
|
|
@param nodeID id of the data node
|
|
@param capacity capacity of the data node
|
|
@param dfsUsed space used by the data node
|
|
@param remaining remaing capacity of the data node
|
|
@param xceiverCount # of data transfers at the data node]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String, long, long, long, int"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[DatanodeDescriptor constructor
|
|
|
|
@param nodeID id of the data node
|
|
@param networkLocation location of the data node in network
|
|
@param capacity capacity of the data node, including space used by non-dfs
|
|
@param dfsUsed the used space by dfs datanode
|
|
@param remaining remaing capacity of the data node
|
|
@param xceiverCount # of data transfers at the data node]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="numBlocks" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getBlocksScheduled" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[@return Approximate number of blocks currently scheduled to be written
|
|
to this datanode.]]>
|
|
</doc>
|
|
</method>
|
|
<field name="isAlive" type="boolean"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[DatanodeDescriptor tracks stats on a given DataNode,
|
|
such as available storage capacity, last update time, etc.,
|
|
and maintains a set of blocks stored on the datanode.
|
|
|
|
This data structure is a data structure that is internal
|
|
to the namenode. It is *not* sent over-the-wire to the Client
|
|
or the Datnodes. Neither is it stored persistently in the
|
|
fsImage.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
|
|
<class name="DatanodeDescriptor.BlockTargetPair" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<field name="block" type="org.apache.hadoop.hdfs.protocol.Block"
|
|
transient="false" volatile="false"
|
|
static="false" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="targets" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
|
|
transient="false" volatile="false"
|
|
static="false" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Block and targets pair]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
|
|
<class name="FileChecksumServlets" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="FileChecksumServlets"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[Servlets for file checksum]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
|
|
<class name="FileChecksumServlets.GetServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="FileChecksumServlets.GetServlet"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="doGet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
|
|
<exception name="ServletException" type="javax.servlet.ServletException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Get FileChecksum]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
|
|
<class name="FileChecksumServlets.RedirectServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="FileChecksumServlets.RedirectServlet"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="doGet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
|
|
<exception name="ServletException" type="javax.servlet.ServletException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Redirect file checksum queries to an appropriate datanode.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
|
|
<class name="FileDataServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="FileDataServlet"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="createUri" return="java.net.URI"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="i" type="org.apache.hadoop.fs.FileStatus"/>
|
|
<param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
|
|
<param name="nnproxy" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<exception name="URISyntaxException" type="java.net.URISyntaxException"/>
|
|
<doc>
|
|
<![CDATA[Create a redirection URI]]>
|
|
</doc>
|
|
</method>
|
|
<method name="doGet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Service a GET request as described below.
|
|
Request:
|
|
{@code
|
|
GET http://<nn>:<port>/data[/<path>] HTTP/1.1
|
|
}]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
|
|
@see org.apache.hadoop.hdfs.HftpFileSystem]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
|
|
<class name="FsckServlet" extends="javax.servlet.http.HttpServlet"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="FsckServlet"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="doGet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
|
|
<exception name="ServletException" type="javax.servlet.ServletException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This class is used in Namesystem's jetty to do fsck on namenode.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
|
|
<class name="FSEditLog" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="open"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create empty edit log files.
|
|
Initialize the output stream for logging.
|
|
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="createEditLogFile"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="close"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Shutdown the file store.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="logSync"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="logOpenFile"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Add open lease record to edit log.
|
|
Records the block locations of the last block.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="logCloseFile"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
|
|
<doc>
|
|
<![CDATA[Add close lease record to edit log.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="logMkDir"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
|
|
<doc>
|
|
<![CDATA[Add create directory record to edit log]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[FSEditLog maintains a log of the namespace modifications.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImage -->
|
|
<class name="FSImage" extends="org.apache.hadoop.hdfs.server.common.Storage"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="FSImage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="FSImage" type="java.io.File"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Represents an Image (image and edit file).]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="props" type="java.util.Properties"/>
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="setFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="props" type="java.util.Properties"/>
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Write last checkpoint time and version file into the storage directory.
|
|
|
|
The version file should always be written last.
|
|
Missing or corrupted version file indicates that
|
|
the checkpoint is not valid.
|
|
|
|
@param sd storage directory
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getEditLog" return="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="isConversionNeeded" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="saveFSImage"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Save the contents of the FS image
|
|
and create empty edits.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="format"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFsEditName" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="corruptPreUpgradeStorage"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="rootDir" type="java.io.File"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="checkpointTime" type="long"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="removedStorageDirs" type="java.util.List"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[list of failed (and thus removed) storages]]>
|
|
</doc>
|
|
</field>
|
|
<doc>
|
|
<![CDATA[FSImage handles checkpointing and logging of the namespace edits.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImage -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
|
|
<class name="FSNamesystem" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
|
|
<implements name="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean"/>
|
|
<method name="getNamespaceDirs" return="java.util.Collection"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
</method>
|
|
<method name="getNamespaceEditsDirs" return="java.util.Collection"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
</method>
|
|
<method name="getUpgradePermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the default path permission when upgrading from releases with no
|
|
permissions (<=0.15) to releases with permissions (>=0.16)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the FSNamesystem object]]>
|
|
</doc>
|
|
</method>
|
|
<method name="close"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Close down this file system manager.
|
|
Causes heartbeat and lease daemons to stop; waits briefly for
|
|
them to finish, but a short timeout returns control back to caller.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setPermission"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set permissions for an existing file.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setOwner"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="username" type="java.lang.String"/>
|
|
<param name="group" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set owner for an existing file.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="offset" type="long"/>
|
|
<param name="length" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get block locations within the specified range.
|
|
@see ClientProtocol#getBlockLocations(String, long, long)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="offset" type="long"/>
|
|
<param name="length" type="long"/>
|
|
<param name="doAccessTime" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get block locations within the specified range.
|
|
@see ClientProtocol#getBlockLocations(String, long, long)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTimes"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="mtime" type="long"/>
|
|
<param name="atime" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[stores the modification and access time for this inode.
|
|
The access time is precise upto an hour. The transaction, if needed, is
|
|
written to the edits log but is not flushed.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setReplication" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="replication" type="short"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Set replication for an existing file.
|
|
|
|
The NameNode sets new replication and schedules either replication of
|
|
under-replicated data blocks or removal of the eccessive block copies
|
|
if the blocks are over-replicated.
|
|
|
|
@see ClientProtocol#setReplication(String, short)
|
|
@param src file name
|
|
@param replication new replication
|
|
@return true if successful;
|
|
false if file does not exist or is a directory]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getAdditionalBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The client would like to obtain an additional block for the indicated
|
|
filename (which is being written-to). Return an array that consists
|
|
of the block, plus a set of machines. The first on this list should
|
|
be where the client writes data. Subsequent items in the list must
|
|
be provided in the connection to the first datanode.
|
|
|
|
Make sure the previous blocks have been reported by datanodes and
|
|
are replicated. Will return an empty 2-elt array if we want the
|
|
client to "try again later".]]>
|
|
</doc>
|
|
</method>
|
|
<method name="abandonBlock" return="boolean"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="holder" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The client would like to let go of the given block]]>
|
|
</doc>
|
|
</method>
|
|
<method name="completeFile" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="holder" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="markBlockAsCorrupt"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Mark the block belonging to datanode as corrupt
|
|
@param blk Block to be marked as corrupt
|
|
@param dn Datanode which holds the corrupt replica]]>
|
|
</doc>
|
|
</method>
|
|
<method name="invalidateBlock"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Invalidates the given block on the given datanode.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="renameTo" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="dst" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Change the indicated filename.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="recursive" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Remove the indicated filename from namespace. If the filename
|
|
is a directory (non empty) and recursive is set to false then throw exception.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="mkdirs" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="permissions" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create all the necessary directories]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get a listing of all files at 'src'. The Object[] array
|
|
exists so we can return file attributes (soon to be implemented)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="registerDatanode"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Register Datanode.
|
|
<p>
|
|
The purpose of registration is to identify whether the new datanode
|
|
serves a new data storage, and will report new data block copies,
|
|
which the namenode was not aware of; or the datanode is a replacement
|
|
node for the data storage that was previously served by a different
|
|
or the same (in terms of host:port) datanode.
|
|
The data storages are distinguished by their storageIDs. When a new
|
|
data storage is reported the namenode issues a new unique storageID.
|
|
<p>
|
|
Finally, the namenode returns its namespaceID as the registrationID
|
|
for the datanodes.
|
|
namespaceID is a persistent attribute of the name space.
|
|
The registrationID is checked every time the datanode is communicating
|
|
with the namenode.
|
|
Datanodes with inappropriate registrationID are rejected.
|
|
If the namenode stops, and then restarts it can restore its
|
|
namespaceID and will continue serving the datanodes that has previously
|
|
registered with the namenode without restarting the whole cluster.
|
|
|
|
@see org.apache.hadoop.hdfs.server.datanode.DataNode#register()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getRegistrationID" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get registrationID for datanodes based on the namespaceID.
|
|
|
|
@see #registerDatanode(DatanodeRegistration)
|
|
@see FSImage#newNamespaceID()
|
|
@return registration ID]]>
|
|
</doc>
|
|
</method>
|
|
<method name="computeDatanodeWork" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Compute block replication and block invalidation work
|
|
that can be scheduled on data-nodes.
|
|
The datanode will be informed of this work at the next heartbeat.
|
|
|
|
@return number of blocks scheduled for replication or removal.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setNodeReplicationLimit"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="limit" type="int"/>
|
|
</method>
|
|
<method name="removeDatanode"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[remove a datanode descriptor
|
|
@param nodeID datanode ID]]>
|
|
</doc>
|
|
</method>
|
|
<method name="processReport"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
|
|
<param name="newReport" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The given node is reporting all its blocks. Use this info to
|
|
update the (machine-->blocklist) and (block-->machinelist) tables.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="blockReceived"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="delHint" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The given node is reporting that it received a certain block.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getMissingBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getCapacityTotal" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total raw bytes including non-dfs used space.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total used space by data nodes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityUsedPercent" return="float"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total used space by data nodes as percentage of total capacity]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityUsedNonDFS" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total used space by data nodes for non DFS purposes such
|
|
as storing temporary files on the local file system]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityRemaining" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total non-used raw bytes.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityRemainingPercent" return="float"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total remaining space by data nodes as percentage of total capacity]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getTotalLoad" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total number of connections.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
|
|
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
|
|
</method>
|
|
<method name="DFSNodesStatus"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="live" type="java.util.ArrayList"/>
|
|
<param name="dead" type="java.util.ArrayList"/>
|
|
</method>
|
|
<method name="stopDecommission"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Stop decommissioning the specified datanodes.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDataNodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<method name="getDFSNameNodeAddress" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="use {@link NameNode#getNameNodeAddress()} instead.">
|
|
<doc>
|
|
<![CDATA[@deprecated use {@link NameNode#getNameNodeAddress()} instead.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStartTime" return="java.util.Date"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="refreshNodes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Rereads the config to get hosts and exclude list file names.
|
|
Rereads the files to update the hosts and exclude lists. It
|
|
checks if any of the hosts have changed states:
|
|
1. Added to hosts --> no further work needed here.
|
|
2. Removed from hosts --> mark AdminState as decommissioned.
|
|
3. Added to exclude --> start decommission.
|
|
4. Removed from exclude --> stop decommission.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get data node by storage ID.
|
|
|
|
@param nodeID
|
|
@return DatanodeDescriptor or null if the node is not found.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="randomDataNode" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getRandomDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getBlocksTotal" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Get the total number of blocks in the system.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFilesTotal" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getPendingReplicationBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getUnderReplicatedBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getCorruptReplicaBlocksCount" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns number of blocks with corrupt replicas]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getScheduledReplicationBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getFSState" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getFSNamesystemMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[get FSNamesystemMetrics]]>
|
|
</doc>
|
|
</method>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[shutdown FSNamesystem]]>
|
|
</doc>
|
|
</method>
|
|
<method name="numLiveDataNodes" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Number of live data nodes
|
|
@return Number of live data nodes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="numDeadDataNodes" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Number of dead data nodes
|
|
@return Number of dead data nodes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setGenerationStamp"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="stamp" type="long"/>
|
|
<doc>
|
|
<![CDATA[Sets the generation stamp for this filesystem]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getGenerationStamp" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Gets the generation stamp for this filesystem]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="AUDIT_FORMAT" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="auditLog" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="dir" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="corruptReplicas" type="org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="leaseManager" type="org.apache.hadoop.hdfs.server.namenode.LeaseManager"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="lmthread" type="org.apache.hadoop.util.Daemon"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="replthread" type="org.apache.hadoop.util.Daemon"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="fsNamesystemObject" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
|
|
transient="false" volatile="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[FSNamesystem does the actual bookkeeping work for the
|
|
DataNode.
|
|
|
|
It tracks several important tables.
|
|
|
|
1) valid fsname --> blocklist (kept on disk, logged)
|
|
2) Set of all valid blocks (inverted #1)
|
|
3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
|
|
4) machine --> blocklist (inverted #2)
|
|
5) LRU cache of updated-heartbeat machines]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
|
|
<class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="GetImageServlet"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="doGet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
|
|
<exception name="ServletException" type="javax.servlet.ServletException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This class is used in Namesystem's jetty to retrieve a file.
|
|
Typically used by the Secondary NameNode to retrieve image and
|
|
edit file for periodic checkpointing.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.JspHelper -->
|
|
<class name="JspHelper" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="JspHelper"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="randomNode" return="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="streamBlockInAscii"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="addr" type="java.net.InetSocketAddress"/>
|
|
<param name="blockId" type="long"/>
|
|
<param name="genStamp" type="long"/>
|
|
<param name="blockSize" type="long"/>
|
|
<param name="offsetIntoBlock" type="long"/>
|
|
<param name="chunkSizeToView" type="long"/>
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="DFSNodesStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="live" type="java.util.ArrayList"/>
|
|
<param name="dead" type="java.util.ArrayList"/>
|
|
</method>
|
|
<method name="addTableHeader"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="addTableRow"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<param name="columns" type="java.lang.String[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="addTableRow"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<param name="columns" type="java.lang.String[]"/>
|
|
<param name="row" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="addTableFooter"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getSafeModeText" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getWarningText" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="fsn" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"/>
|
|
</method>
|
|
<method name="getInodeLimitText" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getUpgradeStatusText" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="sortNodeList"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodes" type="java.util.ArrayList"/>
|
|
<param name="field" type="java.lang.String"/>
|
|
<param name="order" type="java.lang.String"/>
|
|
</method>
|
|
<method name="printPathWithLinks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="dir" type="java.lang.String"/>
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<param name="namenodeInfoPort" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="printGotoForm"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<param name="namenodeInfoPort" type="int"/>
|
|
<param name="file" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="createTitle"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="javax.servlet.jsp.JspWriter"/>
|
|
<param name="req" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="file" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="nameNodeAddr" type="java.net.InetSocketAddress"
|
|
transient="false" volatile="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="conf" type="org.apache.hadoop.conf.Configuration"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="webUGI" type="org.apache.hadoop.security.UnixUserGroupInformation"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="defaultChunkSizeToView" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.JspHelper -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
|
|
<class name="LeaseExpiredException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="LeaseExpiredException" type="java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[The lease that was being used to create this file has expired.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
|
|
<class name="LeaseManager" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="getLeaseByPath" return="org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<doc>
|
|
<![CDATA[@return the lease containing src]]>
|
|
</doc>
|
|
</method>
|
|
<method name="countLease" return="int"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[@return the number of leases currently in the system]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setLeasePeriod"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="softLimit" type="long"/>
|
|
<param name="hardLimit" type="long"/>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[LeaseManager does the lease housekeeping for writing on files.
|
|
This class also provides useful static methods for lease recovery.
|
|
|
|
Lease Recovery Algorithm
|
|
1) Namenode retrieves lease information
|
|
2) For each file f in the lease, consider the last block b of f
|
|
2.1) Get the datanodes which contains b
|
|
2.2) Assign one of the datanodes as the primary datanode p
|
|
|
|
2.3) p obtains a new generation stamp form the namenode
|
|
2.4) p get the block info from each datanode
|
|
2.5) p computes the minimum block length
|
|
2.6) p updates the datanodes, which have a valid generation stamp,
|
|
with the new generation stamp and the minimum block length
|
|
2.7) p acknowledges the namenode the update results
|
|
|
|
2.8) Namenode updates the BlockInfo
|
|
2.9) Namenode removes f from the lease
|
|
and removes the lease once all files have been removed
|
|
2.10) Namenode commit changes to edit log]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
|
|
<class name="ListPathsServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="ListPathsServlet"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="buildRoot" return="java.util.Map"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
|
|
<doc>
|
|
<![CDATA[Build a map from the query string, setting values and defaults.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="doGet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
|
|
<exception name="ServletException" type="javax.servlet.ServletException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Service a GET request as described below.
|
|
Request:
|
|
{@code
|
|
GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
|
|
}
|
|
|
|
Where <i>option</i> (default) in:
|
|
recursive ("no")
|
|
filter (".*")
|
|
exclude ("\..*\.crc")
|
|
|
|
Response: A flat list of files/directories in the following format:
|
|
{@code
|
|
<listing path="..." recursive="(yes|no)" filter="..."
|
|
time="yyyy-MM-dd hh:mm:ss UTC" version="...">
|
|
<directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
|
|
<file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
|
|
blocksize="..."
|
|
replication="..." size="..."/>
|
|
</listing>
|
|
}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="df" type="java.text.SimpleDateFormat"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Obtain meta-information about a filesystem.
|
|
@see org.apache.hadoop.hdfs.HftpFileSystem]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode -->
|
|
<class name="NameNode" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
|
|
<implements name="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"/>
|
|
<implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"/>
|
|
<implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
|
|
<implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
|
|
<constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Start NameNode.
|
|
<p>
|
|
The name-node can be started with one of the following startup options:
|
|
<ul>
|
|
<li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
|
|
<li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
|
|
<li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
|
|
upgrade and create a snapshot of the current file system state</li>
|
|
<li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
|
|
cluster back to the previous state</li>
|
|
</ul>
|
|
The option is passed via configuration field:
|
|
<tt>dfs.namenode.startup</tt>
|
|
|
|
The conf will be modified to reflect the actual ports on which
|
|
the NameNode is up and running if the user passes the port as
|
|
<code>zero</code> in the conf.
|
|
|
|
@param conf confirguration
|
|
@throws IOException]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getProtocolVersion" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="protocol" type="java.lang.String"/>
|
|
<param name="clientVersion" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="format"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Format a new filesystem. Destroys any filesystem that may already
|
|
exist at this location.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getNameNodeMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getAddress" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="address" type="java.lang.String"/>
|
|
</method>
|
|
<method name="getAddress" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
</method>
|
|
<method name="getUri" return="java.net.URI"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="namenode" type="java.net.InetSocketAddress"/>
|
|
</method>
|
|
<method name="join"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Wait for service to finish.
|
|
(Normally, it runs forever.)]]>
|
|
</doc>
|
|
</method>
|
|
<method name="stop"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Stop all NameNode threads and wait for all to finish.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
|
|
<param name="size" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[return a list of blocks & their locations on <code>datanode</code> whose
|
|
total size is <code>size</code>
|
|
|
|
@param datanode on which blocks are located
|
|
@param size total size of blocks]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="offset" type="long"/>
|
|
<param name="length" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="create"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<param name="overwrite" type="boolean"/>
|
|
<param name="replication" type="short"/>
|
|
<param name="blockSize" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setReplication" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="replication" type="short"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setPermission"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setOwner"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="username" type="java.lang.String"/>
|
|
<param name="groupname" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="abandonBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="holder" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The client needs to give up on the block.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="complete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="reportBadBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[The client has detected an error on the specified located blocks
|
|
and is reporting them to the server. For now, the namenode will
|
|
mark the block as corrupt. In the future we might
|
|
check the blocks are actually corrupt.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="nextGenerationStamp" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="commitBlockSynchronization"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="newgenerationstamp" type="long"/>
|
|
<param name="newlength" type="long"/>
|
|
<param name="closeFile" type="boolean"/>
|
|
<param name="deleteblock" type="boolean"/>
|
|
<param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getPreferredBlockSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="filename" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="rename" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="dst" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="delete" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="recursive" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="mkdirs" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="renewLease"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get the file info for a specific file.
|
|
@param src The string representation of the path to the file
|
|
@throws IOException if permission to access file is denied by the system
|
|
@return object containing information regarding the file
|
|
or null if file not found]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getStats" return="long[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@inheritDoc]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="setSafeMode" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@inheritDoc]]>
|
|
</doc>
|
|
</method>
|
|
<method name="isInSafeMode" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Is the cluster currently in safe mode?]]>
|
|
</doc>
|
|
</method>
|
|
<method name="saveNamespace"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@inheritDoc]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshNodes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Refresh the list of datanodes that the namenode should allow to
|
|
connect. Re-reads conf by creating new Configuration object and
|
|
uses the files list in the configuration to update the list.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getEditLogSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the size of the current edit log.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Roll the edit log.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="rollFsImage"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Roll the image]]>
|
|
</doc>
|
|
</method>
|
|
<method name="finalizeUpgrade"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="metaSave"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="filename" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Dumps namenode state into specified file]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setQuota"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="path" type="java.lang.String"/>
|
|
<param name="namespaceQuota" type="long"/>
|
|
<param name="diskspaceQuota" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="fsync"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="clientName" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTimes"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="src" type="java.lang.String"/>
|
|
<param name="mtime" type="long"/>
|
|
<param name="atime" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@inheritDoc]]>
|
|
</doc>
|
|
</method>
|
|
<method name="register" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="capacity" type="long"/>
|
|
<param name="dfsUsed" type="long"/>
|
|
<param name="remaining" type="long"/>
|
|
<param name="xmitsInProgress" type="int"/>
|
|
<param name="xceiverCount" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Data node notify the name node that it is alive
|
|
Return an array of block-oriented commands for the datanode to execute.
|
|
This will be either a transfer or a delete operation.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="blocks" type="long[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="blockReceived"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
|
|
<param name="delHints" type="java.lang.String[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="errorReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="errorCode" type="int"/>
|
|
<param name="msg" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="verifyRequest"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Verify request.
|
|
|
|
Verifies correctness of the datanode version, registration ID, and
|
|
if the datanode does not need to be shutdown.
|
|
|
|
@param nodeReg data node registration
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="verifyVersion"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="version" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Verify version.
|
|
|
|
@param version
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFsImageName" return="java.io.File"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the name of the fsImage file]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFSImage" return="org.apache.hadoop.hdfs.server.namenode.FSImage"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getFsImageNameCheckpoint" return="java.io.File[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Returns the name of the fsImage file uploaded by periodic
|
|
checkpointing]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getNameNodeAddress" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns the address on which the NameNodes is listening to.
|
|
@return the address on which the NameNodes is listening to.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getHttpAddress" return="java.net.InetSocketAddress"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Returns the address of the NameNodes http server,
|
|
which is used to access the name-node web UI.
|
|
|
|
@return the http address.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshServiceAcl"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="createNameNode" return="org.apache.hadoop.hdfs.server.namenode.NameNode"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="main"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
</method>
|
|
<field name="DEFAULT_PORT" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="stateChangeLog" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[NameNode serves as both directory namespace manager and
|
|
"inode table" for the Hadoop DFS. There is a single NameNode
|
|
running in any DFS deployment. (Well, except when there
|
|
is a second backup/failover NameNode.)
|
|
|
|
The NameNode controls two critical tables:
|
|
1) filename->blocksequence (namespace)
|
|
2) block->machinelist ("inodes")
|
|
|
|
The first table is stored on disk and is very precious.
|
|
The second table is rebuilt every time the NameNode comes
|
|
up.
|
|
|
|
'NameNode' refers to both this class as well as the 'NameNode server'.
|
|
The 'FSNamesystem' class actually performs most of the filesystem
|
|
management. The majority of the 'NameNode' class itself is concerned
|
|
with exposing the IPC interface and the http server to the outside world,
|
|
plus some configuration management.
|
|
|
|
NameNode implements the ClientProtocol interface, which allows
|
|
clients to ask for DFS services. ClientProtocol is not
|
|
designed for direct use by authors of DFS client code. End-users
|
|
should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
|
|
|
|
NameNode also implements the DatanodeProtocol interface, used by
|
|
DataNode programs that actually store DFS data blocks. These
|
|
methods are invoked repeatedly and automatically by all the
|
|
DataNodes in a DFS deployment.
|
|
|
|
NameNode also implements the NamenodeProtocol interface, used by
|
|
secondary namenodes or rebalancing processes to get partial namenode's
|
|
state, for example partial blocksMap etc.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
|
|
<class name="NamenodeFsck" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="NamenodeFsck" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NameNode, java.util.Map, javax.servlet.http.HttpServletResponse"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Filesystem checker.
|
|
@param conf configuration (namenode config)
|
|
@param nn namenode that this fsck is going to use
|
|
@param pmap key=value[] map that is passed to the http servlet as url parameters
|
|
@param response the object into which this servelet writes the url contents
|
|
@throws IOException]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="fsck"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Check files on DFS, starting from the indicated path.
|
|
@throws Exception]]>
|
|
</doc>
|
|
</method>
|
|
<method name="run" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[@param args]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="CORRUPT_STATUS" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="HEALTHY_STATUS" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="NONEXISTENT_STATUS" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="FAILURE_STATUS" type="java.lang.String"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="FIXING_NONE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Don't attempt any fixing .]]>
|
|
</doc>
|
|
</field>
|
|
<field name="FIXING_MOVE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Move corrupted files to /lost+found .]]>
|
|
</doc>
|
|
</field>
|
|
<field name="FIXING_DELETE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Delete corrupted files.]]>
|
|
</doc>
|
|
</field>
|
|
<doc>
|
|
<![CDATA[This class provides rudimentary checking of DFS volumes for errors and
|
|
sub-optimal conditions.
|
|
<p>The tool scans all files and directories, starting from an indicated
|
|
root path. The following abnormal conditions are detected and handled:</p>
|
|
<ul>
|
|
<li>files with blocks that are completely missing from all datanodes.<br/>
|
|
In this case the tool can perform one of the following actions:
|
|
<ul>
|
|
<li>none ({@link #FIXING_NONE})</li>
|
|
<li>move corrupted files to /lost+found directory on DFS
|
|
({@link #FIXING_MOVE}). Remaining data blocks are saved as a
|
|
block chains, representing longest consecutive series of valid blocks.</li>
|
|
<li>delete corrupted files ({@link #FIXING_DELETE})</li>
|
|
</ul>
|
|
</li>
|
|
<li>detect files with under-replicated or over-replicated blocks</li>
|
|
</ul>
|
|
Additionally, the tool collects a detailed overall DFS statistics, and
|
|
optionally can print detailed statistics on block locations and replication
|
|
factors of each file.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.FsckResult -->
|
|
<class name="NamenodeFsck.FsckResult" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="NamenodeFsck.FsckResult"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="isHealthy" return="boolean"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[DFS is considered healthy if there are no missing blocks.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="addMissing"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="id" type="java.lang.String"/>
|
|
<param name="size" type="long"/>
|
|
<doc>
|
|
<![CDATA[Add a missing block name, plus its size.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getMissingIds" return="java.util.ArrayList"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return a list of missing block names (as list of Strings).]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getMissingSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return total size of missing data, in bytes.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setMissingSize"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="missingSize" type="long"/>
|
|
</method>
|
|
<method name="getExcessiveReplicas" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the number of over-replicated blocks.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setExcessiveReplicas"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="overReplicatedBlocks" type="long"/>
|
|
</method>
|
|
<method name="getReplicationFactor" return="float"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the actual replication factor.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getMissingReplicas" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the number of under-replicated blocks. Note: missing blocks are not counted here.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setMissingReplicas"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="underReplicatedBlocks" type="long"/>
|
|
</method>
|
|
<method name="getTotalDirs" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return total number of directories encountered during this scan.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTotalDirs"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="totalDirs" type="long"/>
|
|
</method>
|
|
<method name="getTotalFiles" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return total number of files encountered during this scan.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTotalFiles"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="totalFiles" type="long"/>
|
|
</method>
|
|
<method name="getTotalOpenFiles" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return total number of files opened for write encountered during this scan.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTotalOpenFiles"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="totalOpenFiles" type="long"/>
|
|
<doc>
|
|
<![CDATA[Set total number of open files encountered during this scan.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getTotalSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return total size of scanned data, in bytes.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTotalSize"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="totalSize" type="long"/>
|
|
</method>
|
|
<method name="getTotalOpenFilesSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return total size of open files data, in bytes.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTotalOpenFilesSize"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="totalOpenFilesSize" type="long"/>
|
|
</method>
|
|
<method name="getReplication" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the intended replication factor, against which the over/under-
|
|
replicated blocks are counted. Note: this values comes from the current
|
|
Configuration supplied for the tool, so it may be different from the
|
|
value in DFS Configuration.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setReplication"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="replication" type="int"/>
|
|
</method>
|
|
<method name="getTotalBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the total number of blocks in the scanned area.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTotalBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="totalBlocks" type="long"/>
|
|
</method>
|
|
<method name="getTotalOpenFilesBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the total number of blocks held by open files.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setTotalOpenFilesBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="totalOpenFilesBlocks" type="long"/>
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getCorruptFiles" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Return the number of currupted files.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setCorruptFiles"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="corruptFiles" type="long"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[FsckResult of checking, plus overall DFS statistics.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.FsckResult -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
|
|
<class name="NotReplicatedYetException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="NotReplicatedYetException" type="java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[The file has not finished being written to enough datanodes yet.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
|
|
<class name="SafeModeException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[This exception is thrown when the name node is in safe mode.
|
|
Client cannot modified namespace until the safe mode is off.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
|
|
<class name="SecondaryNameNode" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="java.lang.Runnable"/>
|
|
<constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Create a connection to the primary namenode.]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Shut down this instance of the datanode.
|
|
Returns only after shutdown is complete.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="run"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="main"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[main() has some simple utility methods.
|
|
@param argv Command line parameters.
|
|
@exception Exception if the filesystem does not exist.]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[The Secondary NameNode is a helper to the primary NameNode.
|
|
The Secondary is responsible for supporting periodic checkpoints
|
|
of the HDFS metadata. The current design allows only one Secondary
|
|
NameNode per HDFs cluster.
|
|
|
|
The Secondary NameNode is a daemon that periodically wakes
|
|
up (determined by the schedule specified in the configuration),
|
|
triggers a periodic checkpoint and then goes back to sleep.
|
|
The Secondary NameNode uses the ClientProtocol to talk to the
|
|
primary NameNode.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
|
|
<class name="StreamFile" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="StreamFile"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getDFSClient" return="org.apache.hadoop.hdfs.DFSClient"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[getting a client for connecting to dfs]]>
|
|
</doc>
|
|
</method>
|
|
<method name="doGet"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="request" type="javax.servlet.http.HttpServletRequest"/>
|
|
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
|
|
<exception name="ServletException" type="javax.servlet.ServletException"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
|
|
<class name="UpgradeObjectNamenode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
|
|
abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="UpgradeObjectNamenode"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="true" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="command" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Process an upgrade command.
|
|
RPC has only one very generic command for all upgrade related inter
|
|
component communications.
|
|
The actual command recognition and execution should be handled here.
|
|
The reply is sent back also as an UpgradeCommand.
|
|
|
|
@param command
|
|
@return the reply command which is analyzed on the client side.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="forceProceed"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Base class for name-node upgrade objects.
|
|
Data-node upgrades are run in separate threads.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
|
|
<!-- start interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
|
|
<interface name="FSNamesystemMBean" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<method name="getFSState" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[The state of the file system: Safemode or Operational
|
|
@return the state]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getBlocksTotal" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Number of allocated blocks in the system
|
|
@return - number of allocated blocks]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityTotal" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total storage capacity
|
|
@return - total capacity in bytes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityRemaining" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Free (unused) storage capacity
|
|
@return - free capacity in bytes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getCapacityUsed" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Used storage capacity
|
|
@return - used capacity in bytes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getFilesTotal" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total number of files and directories
|
|
@return - num of files and directories]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getPendingReplicationBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Blocks pending to be replicated
|
|
@return - num of blocks to be replicated]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getUnderReplicatedBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Blocks under replicated
|
|
@return - num of blocks under replicated]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getScheduledReplicationBlocks" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Blocks scheduled for replication
|
|
@return - num of blocks scheduled for replication]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getTotalLoad" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Total Load on the FSNamesystem
|
|
@return - total load of FSNamesystem]]>
|
|
</doc>
|
|
</method>
|
|
<method name="numLiveDataNodes" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Number of Live data nodes
|
|
@return number of live data nodes]]>
|
|
</doc>
|
|
</method>
|
|
<method name="numDeadDataNodes" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Number of dead data nodes
|
|
@return number of dead data nodes]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
|
|
a name node.
|
|
It is also used for publishing via JMX (hence we follow the JMX naming
|
|
convention.)
|
|
|
|
Note we have not used the MetricsDynamicMBeanBase to implement this
|
|
because the interface for the NameNodeStateMBean is stable and should
|
|
be published as an interface.
|
|
|
|
<p>
|
|
Name Node runtime activity statistic info is report in another MBean
|
|
@see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
|
|
<class name="FSNamesystemMetrics" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.metrics.Updater"/>
|
|
<constructor name="FSNamesystemMetrics" type="org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="doUpdates"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
|
|
<doc>
|
|
<![CDATA[Since this object is a registered updater, this method will be called
|
|
periodically, e.g. every 5 seconds.
|
|
We set the metrics value within this function before pushing it out.
|
|
FSNamesystem updates its own local variables which are
|
|
light weight compared to Metrics counters.
|
|
|
|
Some of the metrics are explicity casted to int. Few metrics collectors
|
|
do not handle long values. It is safe to cast to int for now as all these
|
|
values fit in int value.
|
|
Metrics related to DFS capacity are stored in bytes which do not fit in
|
|
int, so they are rounded to GB]]>
|
|
</doc>
|
|
</method>
|
|
<field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="filesTotal" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blocksTotal" type="org.apache.hadoop.metrics.util.MetricsLongValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="capacityTotalGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="capacityUsedGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="capacityRemainingGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="totalLoad" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="pendingReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="underReplicatedBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="scheduledReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="missingBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[This class is for maintaining the various FSNamesystem status metrics
|
|
and publishing them through the metrics interfaces.
|
|
The SNamesystem creates and registers the JMX MBean.
|
|
<p>
|
|
This class has a number of metrics variables that are publicly accessible;
|
|
these variables (objects) have methods to update their values;
|
|
for example:
|
|
<p> {@link #filesTotal}.set()]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivtyMBean -->
|
|
<class name="NameNodeActivtyMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="NameNodeActivtyMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry"
|
|
static="false" final="false" visibility="protected"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This is the JMX MBean for reporting the NameNode Activity.
|
|
The MBean is register using the name
|
|
"hadoop:service=NameNode,name=NameNodeActivity"
|
|
|
|
Many of the activity metrics are sampled and averaged on an interval
|
|
which can be specified in the metrics config file.
|
|
<p>
|
|
For the metrics that are sampled and averaged, one must specify
|
|
a metrics context that does periodic update calls. Most metrics contexts do.
|
|
The default Null metrics context however does NOT. So if you aren't
|
|
using any other metrics context then you can turn on the viewing and averaging
|
|
of sampled metrics by specifying the following two lines
|
|
in the hadoop-meterics.properties file:
|
|
<pre>
|
|
dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
|
|
dfs.period=10
|
|
</pre>
|
|
<p>
|
|
Note that the metrics are collected regardless of the context used.
|
|
The context with the update thread is used to average the data periodically
|
|
|
|
|
|
|
|
Impl details: We use a dynamic mbean that gets the list of the metrics
|
|
from the metrics registry passed as an argument to the constructor]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivtyMBean -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
|
|
<class name="NameNodeMetrics" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.metrics.Updater"/>
|
|
<constructor name="NameNodeMetrics" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NameNode"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="shutdown"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="doUpdates"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
|
|
<doc>
|
|
<![CDATA[Since this object is a registered updater, this method will be called
|
|
periodically, e.g. every 5 seconds.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="resetAllMinMax"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numFilesAppended" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="transactionsBatchedInSync" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[This class is for maintaining the various NameNode activity statistics
|
|
and publishing them through the metrics interfaces.
|
|
This also registers the JMX MBean for RPC.
|
|
<p>
|
|
This class has a number of metrics variables that are publicly accessible;
|
|
these variables (objects) have methods to update their values;
|
|
for example:
|
|
<p> {@link #syncs}.inc()]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.server.protocol">
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
|
|
<class name="BlockCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="BlockCommand"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="BlockCommand" type="int, java.util.List"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Create BlockCommand for transferring blocks to another datanode
|
|
@param blocktargetlist blocks to be transferred]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="BlockCommand" type="int, org.apache.hadoop.hdfs.protocol.Block[]"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Create BlockCommand for the given action
|
|
@param blocks blocks related to the action]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.Block[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getTargets" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[A BlockCommand is an instruction to a datanode
|
|
regarding some blocks under its control. It tells
|
|
the DataNode to either invalidate a set of indicated
|
|
blocks, or to copy a set of indicated blocks to
|
|
another DataNode.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo -->
|
|
<class name="BlockMetaDataInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="BlockMetaDataInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="BlockMetaDataInfo" type="org.apache.hadoop.hdfs.protocol.Block, long"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getLastScanTime" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[Meta data information for a block]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
|
|
<class name="BlocksWithLocations" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="BlocksWithLocations" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Constructor with one parameter]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[getter]]>
|
|
</doc>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[serialization method]]>
|
|
</doc>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[deserialization method]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[A class to implement an array of BlockLocations
|
|
It provide efficient customized serialization/deserialization methods
|
|
in stead of using the default array (de)serialization provided by RPC]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
|
|
<class name="BlocksWithLocations.BlockWithLocations" extends="java.lang.Object"
|
|
abstract="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="BlocksWithLocations.BlockWithLocations"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[default constructor]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="BlocksWithLocations.BlockWithLocations" type="org.apache.hadoop.hdfs.protocol.Block, java.lang.String[]"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[constructor]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[get the block]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getDatanodes" return="java.lang.String[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[get the block's locations]]>
|
|
</doc>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[deserialization method]]>
|
|
</doc>
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[serialization method]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[A class to keep track of a block and its locations]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
|
|
<class name="DatanodeCommand" extends="java.lang.Object"
|
|
abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="DatanodeCommand"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getAction" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="REGISTER" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="FINALIZE" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
|
|
<interface name="DatanodeProtocol" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
|
|
<method name="register" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Register Datanode.
|
|
|
|
@see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
|
|
@see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
|
|
|
|
@return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
|
|
new storageID if the datanode did not have one and
|
|
registration ID for further communication.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="capacity" type="long"/>
|
|
<param name="dfsUsed" type="long"/>
|
|
<param name="remaining" type="long"/>
|
|
<param name="xmitsInProgress" type="int"/>
|
|
<param name="xceiverCount" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[sendHeartbeat() tells the NameNode that the DataNode is still
|
|
alive and well. Includes some status info, too.
|
|
It also gives the NameNode a chance to return
|
|
an array of "DatanodeCommand" objects.
|
|
A DatanodeCommand tells the DataNode to invalidate local block(s),
|
|
or to copy them to other DataNodes, etc.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="blocks" type="long[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[blockReport() tells the NameNode about all the locally-stored blocks.
|
|
The NameNode returns an array of Blocks that have become obsolete
|
|
and should be deleted. This function is meant to upload *all*
|
|
the locally-stored blocks. It's invoked upon startup and then
|
|
infrequently afterwards.
|
|
@param registration
|
|
@param blocks - the block list as an array of longs.
|
|
Each block is represented as 2 longs.
|
|
This is done instead of Block[] to reduce memory used by block reports.
|
|
|
|
@return - the next command for DN to process.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="blockReceived"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
|
|
<param name="delHints" type="java.lang.String[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[blockReceived() allows the DataNode to tell the NameNode about
|
|
recently-received block data, with a hint for pereferred replica
|
|
to be deleted when there is any excessive blocks.
|
|
For example, whenever client code
|
|
writes a new Block here, or another DataNode copies a Block to
|
|
this DataNode, it will call blockReceived().]]>
|
|
</doc>
|
|
</method>
|
|
<method name="errorReport"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
|
|
<param name="errorCode" type="int"/>
|
|
<param name="msg" type="java.lang.String"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[errorReport() tells the NameNode about something that has gone
|
|
awry. Useful for debugging.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[This is a very general way to send a command to the name-node during
|
|
distributed upgrade process.
|
|
|
|
The generosity is because the variety of upgrade commands is unpredictable.
|
|
The reply from the name-node is also received in the form of an upgrade
|
|
command.
|
|
|
|
@return a reply in the form of an upgrade command]]>
|
|
</doc>
|
|
</method>
|
|
<method name="reportBadBlocks"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
|
|
}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="nextGenerationStamp" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@return the next GenerationStamp to be associated with the specified
|
|
block.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="commitBlockSynchronization"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="newgenerationstamp" type="long"/>
|
|
<param name="newlength" type="long"/>
|
|
<param name="closeFile" type="boolean"/>
|
|
<param name="deleteblock" type="boolean"/>
|
|
<param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Commit block synchronization in lease recovery]]>
|
|
</doc>
|
|
</method>
|
|
<field name="versionID" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[19: SendHeartbeat returns an array of DatanodeCommand objects
|
|
in stead of a DatanodeCommand object.]]>
|
|
</doc>
|
|
</field>
|
|
<field name="NOTIFY" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DISK_ERROR" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="INVALID_BLOCK" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DNA_UNKNOWN" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Determines actions that data node should perform
|
|
when receiving a datanode command.]]>
|
|
</doc>
|
|
</field>
|
|
<field name="DNA_TRANSFER" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DNA_INVALIDATE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DNA_SHUTDOWN" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DNA_REGISTER" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DNA_FINALIZE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="DNA_RECOVERBLOCK" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Protocol that a DFS datanode uses to communicate with the NameNode.
|
|
It's used to upload current load information and block reports.
|
|
|
|
The only way a NameNode can communicate with a DataNode is by
|
|
returning values from these functions.]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
|
|
<class name="DatanodeRegistration" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="DatanodeRegistration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Default constructor.]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DatanodeRegistration" type="java.lang.String"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Create DatanodeRegistration]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="setInfoPort"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="infoPort" type="int"/>
|
|
</method>
|
|
<method name="setIpcPort"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="ipcPort" type="int"/>
|
|
</method>
|
|
<method name="setStorageInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
|
|
</method>
|
|
<method name="setName"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="name" type="java.lang.String"/>
|
|
</method>
|
|
<method name="getVersion" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getRegistrationID" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="toString" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[{@inheritDoc}]]>
|
|
</doc>
|
|
</method>
|
|
<field name="storageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
|
|
transient="false" volatile="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[DatanodeRegistration class conatins all information the Namenode needs
|
|
to identify and verify a Datanode when it contacts the Namenode.
|
|
This information is sent by Datanode with each communication request.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
|
|
<class name="DisallowedDatanodeException" extends="java.io.IOException"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DisallowedDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<doc>
|
|
<![CDATA[This exception is thrown when a datanode tries to register or communicate
|
|
with the namenode when it does not appear on the list of included nodes,
|
|
or has been specifically excluded.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
|
|
<interface name="InterDatanodeProtocol" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
|
|
<method name="getBlockMetaDataInfo" return="org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[@return the BlockMetaDataInfo of a block;
|
|
null if the block is not found]]>
|
|
</doc>
|
|
</method>
|
|
<method name="updateBlock"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
|
|
<param name="finalize" type="boolean"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Update the block to the new generation stamp and length.]]>
|
|
</doc>
|
|
</method>
|
|
<field name="LOG" type="org.apache.commons.logging.Log"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="versionID" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[3: added a finalize parameter to updateBlock]]>
|
|
</doc>
|
|
</field>
|
|
<doc>
|
|
<![CDATA[An inter-datanode protocol for updating generation stamp]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
|
|
<!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
|
|
<interface name="NamenodeProtocol" abstract="true"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
|
|
<method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
|
|
<param name="size" type="long"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get a list of blocks belonged to <code>datanode</code>
|
|
whose total size is equal to <code>size</code>
|
|
@param datanode a data node
|
|
@param size requested size
|
|
@return a list of blocks & their locations
|
|
@throws RemoteException if size is less than or equal to 0 or
|
|
datanode does not exist]]>
|
|
</doc>
|
|
</method>
|
|
<method name="getEditLogSize" return="long"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Get the size of the current edit log (in bytes).
|
|
@return The number of bytes in the current edit log.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Closes the current edit log and opens a new one. The
|
|
call fails if the file system is in SafeMode.
|
|
@throws IOException
|
|
@return a unique token to identify this transaction.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="rollFsImage"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Rolls the fsImage log. It removes the old fsImage, copies the
|
|
new image to fsImage, removes the old edits and renames edits.new
|
|
to edits. The call fails if any of the four files are missing.
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<field name="versionID" type="long"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[2: Added getEditLogSize(), rollEditLog(), rollFSImage().]]>
|
|
</doc>
|
|
</field>
|
|
<doc>
|
|
<![CDATA[Protocol that a secondary NameNode uses to communicate with the NameNode.
|
|
It's used to get part of the name node state]]>
|
|
</doc>
|
|
</interface>
|
|
<!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
|
|
<class name="NamespaceInfo" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.io.Writable"/>
|
|
<constructor name="NamespaceInfo"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="NamespaceInfo" type="int, long, int"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getBuildVersion" return="java.lang.String"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getDistributedUpgradeVersion" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[NamespaceInfo is returned by the name-node in reply
|
|
to a data-node handshake.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
|
|
<!-- start class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
|
|
<class name="UpgradeCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="UpgradeCommand"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<constructor name="UpgradeCommand" type="int, int, short"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</constructor>
|
|
<method name="getVersion" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="getCurrentStatus" return="short"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
</method>
|
|
<method name="write"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="out" type="java.io.DataOutput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<method name="readFields"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="in" type="java.io.DataInput"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
</method>
|
|
<field name="UC_ACTION_REPORT_STATUS" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<field name="UC_ACTION_START_UPGRADE" type="int"
|
|
transient="false" volatile="false"
|
|
static="true" final="true" visibility="public"
|
|
deprecated="not deprecated">
|
|
</field>
|
|
<doc>
|
|
<![CDATA[This as a generic distributed upgrade command.
|
|
|
|
During the upgrade cluster components send upgrade commands to each other
|
|
in order to obtain or share information with them.
|
|
It is supposed that each upgrade defines specific upgrade command by
|
|
deriving them from this class.
|
|
The upgrade command contains version of the upgrade, which is verified
|
|
on the receiving side and current status of the upgrade.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
|
|
</package>
|
|
<package name="org.apache.hadoop.hdfs.tools">
|
|
<!-- start class org.apache.hadoop.hdfs.tools.DFSAdmin -->
|
|
<class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<constructor name="DFSAdmin"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Construct a DFSAdmin object.]]>
|
|
</doc>
|
|
</constructor>
|
|
<constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<doc>
|
|
<![CDATA[Construct a DFSAdmin object.]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="report"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Gives a report on how the FileSystem is doing.
|
|
@exception IOException if the filesystem does not exist.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="setSafeMode"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<param name="idx" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Safe mode maintenance command.
|
|
Usage: java DFSAdmin -safemode [enter | leave | get]
|
|
@param argv List of of command line parameters.
|
|
@param idx The index of the command that is being processed.
|
|
@exception IOException if the filesystem does not exist.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="saveNamespace" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Command to ask the namenode to save the namespace.
|
|
Usage: java DFSAdmin -saveNamespace
|
|
@exception IOException
|
|
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshNodes" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
|
|
file.
|
|
Usage: java DFSAdmin -refreshNodes
|
|
@exception IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="finalizeUpgrade" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Command to ask the namenode to finalize previously performed upgrade.
|
|
Usage: java DFSAdmin -finalizeUpgrade
|
|
@exception IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="upgradeProgress" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<param name="idx" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Command to request current distributed upgrade status,
|
|
a detailed status, or to force the upgrade to proceed.
|
|
|
|
Usage: java DFSAdmin -upgradeProgress [status | details | force]
|
|
@exception IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="metaSave" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<param name="idx" type="int"/>
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Dumps DFS data structures into specified file.
|
|
Usage: java DFSAdmin -metasave filename
|
|
@param argv List of of command line parameters.
|
|
@param idx The index of the command that is being processed.
|
|
@exception IOException if an error accoured wile accessing
|
|
the file or path.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="refreshServiceAcl" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="IOException" type="java.io.IOException"/>
|
|
<doc>
|
|
<![CDATA[Refresh the authorization policy on the {@link NameNode}.
|
|
@return exitcode 0 on success, non-zero on failure
|
|
@throws IOException]]>
|
|
</doc>
|
|
</method>
|
|
<method name="run" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[@param argv The parameters passed to this program.
|
|
@exception Exception if the filesystem does not exist.
|
|
@return 0 on success, non zero on error.]]>
|
|
</doc>
|
|
</method>
|
|
<method name="main"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="argv" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[main() has some simple utility methods.
|
|
@param argv Command line parameters.
|
|
@exception Exception if the filesystem does not exist.]]>
|
|
</doc>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This class provides some DFS administrative access.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.tools.DFSAdmin -->
|
|
<!-- start class org.apache.hadoop.hdfs.tools.DFSck -->
|
|
<class name="DFSck" extends="org.apache.hadoop.conf.Configured"
|
|
abstract="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<implements name="org.apache.hadoop.util.Tool"/>
|
|
<constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[Filesystem checker.
|
|
@param conf current Configuration
|
|
@throws Exception]]>
|
|
</doc>
|
|
</constructor>
|
|
<method name="run" return="int"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="false" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
<doc>
|
|
<![CDATA[@param args]]>
|
|
</doc>
|
|
</method>
|
|
<method name="main"
|
|
abstract="false" native="false" synchronized="false"
|
|
static="true" final="false" visibility="public"
|
|
deprecated="not deprecated">
|
|
<param name="args" type="java.lang.String[]"/>
|
|
<exception name="Exception" type="java.lang.Exception"/>
|
|
</method>
|
|
<doc>
|
|
<![CDATA[This class provides rudimentary checking of DFS volumes for errors and
|
|
sub-optimal conditions.
|
|
<p>The tool scans all files and directories, starting from an indicated
|
|
root path. The following abnormal conditions are detected and handled:</p>
|
|
<ul>
|
|
<li>files with blocks that are completely missing from all datanodes.<br/>
|
|
In this case the tool can perform one of the following actions:
|
|
<ul>
|
|
<li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
|
|
<li>move corrupted files to /lost+found directory on DFS
|
|
({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
|
|
block chains, representing longest consecutive series of valid blocks.</li>
|
|
<li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
|
|
</ul>
|
|
</li>
|
|
<li>detect files with under-replicated or over-replicated blocks</li>
|
|
</ul>
|
|
Additionally, the tool collects a detailed overall DFS statistics, and
|
|
optionally can print detailed statistics on block locations and replication
|
|
factors of each file.
|
|
The tool also provides and option to filter open files during the scan.]]>
|
|
</doc>
|
|
</class>
|
|
<!-- end class org.apache.hadoop.hdfs.tools.DFSck -->
|
|
</package>
|
|
|
|
</api>
|