diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index 2783bf3b30..a6ce035fa9 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -97,6 +97,7 @@ public ExcludeHandler(Object target) {
this.target = target;
}
+ @Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
String methodName = method.getName();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index cf7aafafb7..f1cb41dd6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1847,6 +1847,7 @@ public void clear() {
*
* @return an iterator over the entries.
*/
+ @Override
public Iterator> iterator() {
// Get a copy of just the string to string pairs. After the old object
// methods that allow non-strings to be put into configurations are removed,
@@ -2272,6 +2273,7 @@ public void readFields(DataInput in) throws IOException {
}
//@Override
+ @Override
public void write(DataOutput out) throws IOException {
Properties props = getProps();
WritableUtils.writeVInt(out, props.size());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java
index 2bc7e537e4..f06af2b98d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java
@@ -39,11 +39,13 @@ public Configured(Configuration conf) {
}
// inherit javadoc
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
// inherit javadoc
+ @Override
public Configuration getConf() {
return conf;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index 041b263edd..452d29f7b7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -23,12 +23,10 @@
import org.apache.commons.lang.StringEscapeUtils;
import java.util.Collection;
-import java.util.Map;
import java.util.Enumeration;
import java.io.IOException;
import java.io.PrintWriter;
-import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
@@ -57,9 +55,6 @@ public class ReconfigurationServlet extends HttpServlet {
public static final String CONF_SERVLET_RECONFIGURABLE_PREFIX =
"conf.servlet.reconfigurable.";
- /**
- * {@inheritDoc}
- */
@Override
public void init() throws ServletException {
super.init();
@@ -202,9 +197,6 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
}
}
- /**
- * {@inheritDoc}
- */
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
@@ -219,9 +211,6 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
printFooter(out);
}
- /**
- * {@inheritDoc}
- */
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index d9eda44580..6adbeab60a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -47,7 +47,6 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java
index a319fb7b36..b4a4a85674 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java
@@ -45,22 +45,27 @@ public AvroFSInput(final FileContext fc, final Path p) throws IOException {
this.stream = fc.open(p);
}
+ @Override
public long length() {
return len;
}
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
+ @Override
public void seek(long p) throws IOException {
stream.seek(p);
}
+ @Override
public long tell() throws IOException {
return stream.getPos();
}
+ @Override
public void close() throws IOException {
stream.close();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index cfe9ee8c66..fa095343c5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -204,6 +204,7 @@ public void setTopologyPaths(String[] topologyPaths) throws IOException {
}
}
+ @Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
index f322924012..745148281d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
@@ -19,7 +19,6 @@
import java.io.BufferedInputStream;
import java.io.FileDescriptor;
-import java.io.FileInputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -50,10 +49,12 @@ public BufferedFSInputStream(FSInputStream in, int size) {
super(in, size);
}
+ @Override
public long getPos() throws IOException {
return ((FSInputStream)in).getPos()-(count-pos);
}
+ @Override
public long skip(long n) throws IOException {
if (n <= 0) {
return 0;
@@ -63,6 +64,7 @@ public long skip(long n) throws IOException {
return n;
}
+ @Override
public void seek(long pos) throws IOException {
if( pos<0 ) {
return;
@@ -82,20 +84,24 @@ public void seek(long pos) throws IOException {
((FSInputStream)in).seek(pos);
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
pos = 0;
count = 0;
return ((FSInputStream)in).seekToNewSource(targetPos);
}
+ @Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
return ((FSInputStream)in).read(position, buffer, offset, length) ;
}
+ @Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
((FSInputStream)in).readFully(position, buffer, offset, length);
}
+ @Override
public void readFully(long position, byte[] buffer) throws IOException {
((FSInputStream)in).readFully(position, buffer);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 17707718b8..42ee870268 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -53,6 +53,7 @@ public ChecksumFileSystem(FileSystem fs) {
super(fs);
}
+ @Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
@@ -64,6 +65,7 @@ public void setConf(Configuration conf) {
/**
* Set whether to verify checksum.
*/
+ @Override
public void setVerifyChecksum(boolean verifyChecksum) {
this.verifyChecksum = verifyChecksum;
}
@@ -74,6 +76,7 @@ public void setWriteChecksum(boolean writeChecksum) {
}
/** get the raw file system */
+ @Override
public FileSystem getRawFileSystem() {
return fs;
}
@@ -162,14 +165,17 @@ private long getChecksumFilePos( long dataPos ) {
return HEADER_LENGTH + 4*(dataPos/bytesPerSum);
}
+ @Override
protected long getChunkPosition( long dataPos ) {
return dataPos/bytesPerSum*bytesPerSum;
}
+ @Override
public int available() throws IOException {
return datas.available() + super.available();
}
+ @Override
public int read(long position, byte[] b, int off, int len)
throws IOException {
// parameter check
@@ -190,6 +196,7 @@ public int read(long position, byte[] b, int off, int len)
return nread;
}
+ @Override
public void close() throws IOException {
datas.close();
if( sums != null ) {
@@ -290,6 +297,7 @@ private long getFileLength() throws IOException {
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
+ @Override
public synchronized long skip(long n) throws IOException {
long curPos = getPos();
long fileLength = getFileLength();
@@ -311,6 +319,7 @@ public synchronized long skip(long n) throws IOException {
* ChecksumException if the chunk to seek to is corrupted
*/
+ @Override
public synchronized void seek(long pos) throws IOException {
if(pos>getFileLength()) {
throw new IOException("Cannot seek after EOF");
@@ -339,7 +348,7 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return new FSDataBoundedInputStream(fs, f, in);
}
- /** {@inheritDoc} */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
@@ -398,6 +407,7 @@ public ChecksumFSOutputSummer(ChecksumFileSystem fs,
sums.writeInt(bytesPerSum);
}
+ @Override
public void close() throws IOException {
flushBuffer();
sums.close();
@@ -412,7 +422,6 @@ protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
}
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -454,7 +463,6 @@ private FSDataOutputStream create(Path f, FsPermission permission,
return out;
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -472,6 +480,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
* @return true if successful;
* false if file does not exist or is a directory
*/
+ @Override
public boolean setReplication(Path src, short replication) throws IOException {
boolean value = fs.setReplication(src, replication);
if (!value)
@@ -487,6 +496,7 @@ public boolean setReplication(Path src, short replication) throws IOException {
/**
* Rename files/dirs
*/
+ @Override
public boolean rename(Path src, Path dst) throws IOException {
if (fs.isDirectory(src)) {
return fs.rename(src, dst);
@@ -516,6 +526,7 @@ public boolean rename(Path src, Path dst) throws IOException {
* Implement the delete(Path, boolean) in checksum
* file system.
*/
+ @Override
public boolean delete(Path f, boolean recursive) throws IOException{
FileStatus fstatus = null;
try {
@@ -538,6 +549,7 @@ public boolean delete(Path f, boolean recursive) throws IOException{
}
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(Path file) {
return !isChecksumFile(file);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 4784991982..12805d86a6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -32,7 +32,6 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.PureJavaCrc32;
-import org.apache.hadoop.util.StringUtils;
/**
* Abstract Checksumed Fs.
@@ -61,6 +60,7 @@ public ChecksumFs(AbstractFileSystem theFs)
/**
* Set whether to verify checksum.
*/
+ @Override
public void setVerifyChecksum(boolean inVerifyChecksum) {
this.verifyChecksum = inVerifyChecksum;
}
@@ -152,14 +152,17 @@ private long getChecksumFilePos(long dataPos) {
return HEADER_LENGTH + 4*(dataPos/bytesPerSum);
}
+ @Override
protected long getChunkPosition(long dataPos) {
return dataPos/bytesPerSum*bytesPerSum;
}
+ @Override
public int available() throws IOException {
return datas.available() + super.available();
}
+ @Override
public int read(long position, byte[] b, int off, int len)
throws IOException, UnresolvedLinkException {
// parameter check
@@ -180,6 +183,7 @@ public int read(long position, byte[] b, int off, int len)
return nread;
}
+ @Override
public void close() throws IOException {
datas.close();
if (sums != null) {
@@ -258,6 +262,7 @@ private long getFileLength() throws IOException, UnresolvedLinkException {
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
+ @Override
public synchronized long skip(long n) throws IOException {
final long curPos = getPos();
final long fileLength = getFileLength();
@@ -279,6 +284,7 @@ public synchronized long skip(long n) throws IOException {
* ChecksumException if the chunk to seek to is corrupted
*/
+ @Override
public synchronized void seek(long pos) throws IOException {
if (pos>getFileLength()) {
throw new IOException("Cannot seek after EOF");
@@ -348,6 +354,7 @@ public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file,
sums.writeInt(bytesPerSum);
}
+ @Override
public void close() throws IOException {
flushBuffer();
sums.close();
@@ -447,6 +454,7 @@ public void renameInternal(Path src, Path dst)
* Implement the delete(Path, boolean) in checksum
* file system.
*/
+ @Override
public boolean delete(Path f, boolean recursive)
throws IOException, UnresolvedLinkException {
FileStatus fstatus = null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index c0ab82de1d..0d685b43e1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -75,7 +75,7 @@ public ContentSummary(
/** Returns (disk) space quota */
public long getSpaceQuota() {return spaceQuota;}
- /** {@inheritDoc} */
+ @Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(length);
@@ -86,7 +86,7 @@ public void write(DataOutput out) throws IOException {
out.writeLong(spaceQuota);
}
- /** {@inheritDoc} */
+ @Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
this.length = in.readLong();
@@ -131,7 +131,7 @@ public static String getHeader(boolean qOption) {
return qOption ? QUOTA_HEADER : HEADER;
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
return toString(true);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
index 9949834222..c552f331f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
@@ -131,6 +131,7 @@ public String getMount() throws IOException {
return mount;
}
+ @Override
public String toString() {
return
"df -k " + mount +"\n" +
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
index 5caec7204d..2c96b0abaf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
@@ -76,6 +76,7 @@ public DU(File path, Configuration conf) throws IOException {
**/
class DURefreshThread implements Runnable {
+ @Override
public void run() {
while(shouldRun) {
@@ -169,16 +170,19 @@ public void shutdown() {
}
}
+ @Override
public String toString() {
return
"du -sk " + dirPath +"\n" +
used + "\t" + dirPath;
}
+ @Override
protected String[] getExecString() {
return new String[] {"du", "-sk", dirPath};
}
+ @Override
protected void parseExecResult(BufferedReader lines) throws IOException {
String line = lines.readLine();
if (line == null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index e47dffb082..eef53140c3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -44,6 +44,7 @@ public FSDataInputStream(InputStream in)
*
* @param desired offset to seek to
*/
+ @Override
public synchronized void seek(long desired) throws IOException {
((Seekable)in).seek(desired);
}
@@ -53,6 +54,7 @@ public synchronized void seek(long desired) throws IOException {
*
* @return current position in the input stream
*/
+ @Override
public long getPos() throws IOException {
return ((Seekable)in).getPos();
}
@@ -68,6 +70,7 @@ public long getPos() throws IOException {
* if there is no more data because the end of the stream has been
* reached
*/
+ @Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
return ((PositionedReadable)in).read(position, buffer, offset, length);
@@ -85,6 +88,7 @@ public int read(long position, byte[] buffer, int offset, int length)
* If an exception is thrown an undetermined number
* of bytes in the buffer may have been written.
*/
+ @Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
((PositionedReadable)in).readFully(position, buffer, offset, length);
@@ -93,6 +97,7 @@ public void readFully(long position, byte[] buffer, int offset, int length)
/**
* See {@link #readFully(long, byte[], int, int)}.
*/
+ @Override
public void readFully(long position, byte[] buffer)
throws IOException {
((PositionedReadable)in).readFully(position, buffer, 0, buffer.length);
@@ -104,6 +109,7 @@ public void readFully(long position, byte[] buffer)
* @param targetPos position to seek to
* @return true if a new source is found, false otherwise
*/
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
return ((Seekable)in).seekToNewSource(targetPos);
}
@@ -118,6 +124,7 @@ public InputStream getWrappedStream() {
return in;
}
+ @Override
public int read(ByteBuffer buf) throws IOException {
if (in instanceof ByteBufferReadable) {
return ((ByteBufferReadable)in).read(buf);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
index 9974f27e24..cc992e7c94 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
@@ -140,6 +140,7 @@ protected synchronized boolean needChecksum() {
* @exception IOException if an I/O error occurs.
*/
+ @Override
public synchronized int read() throws IOException {
if (pos >= count) {
fill();
@@ -180,6 +181,7 @@ public synchronized int read() throws IOException {
* @exception IOException if an I/O error occurs.
* ChecksumException if any checksum error occurs
*/
+ @Override
public synchronized int read(byte[] b, int off, int len) throws IOException {
// parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
@@ -367,6 +369,7 @@ public synchronized int available() throws IOException {
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
+ @Override
public synchronized long skip(long n) throws IOException {
if (n <= 0) {
return 0;
@@ -389,6 +392,7 @@ public synchronized long skip(long n) throws IOException {
* ChecksumException if the chunk to seek to is corrupted
*/
+ @Override
public synchronized void seek(long pos) throws IOException {
if( pos<0 ) {
return;
@@ -462,13 +466,16 @@ final protected synchronized void set(boolean verifyChecksum,
this.pos = 0;
}
+ @Override
final public boolean markSupported() {
return false;
}
+ @Override
final public void mark(int readlimit) {
}
+ @Override
final public void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
index f7bc22159d..8d668feeab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
@@ -36,19 +36,23 @@ public abstract class FSInputStream extends InputStream
* The next read() will be from that location. Can't
* seek past the end of the file.
*/
+ @Override
public abstract void seek(long pos) throws IOException;
/**
* Return the current offset from the start of the file
*/
+ @Override
public abstract long getPos() throws IOException;
/**
* Seeks a different copy of the data. Returns true if
* found a new source, false otherwise.
*/
+ @Override
public abstract boolean seekToNewSource(long targetPos) throws IOException;
+ @Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
synchronized (this) {
@@ -64,6 +68,7 @@ public int read(long position, byte[] buffer, int offset, int length)
}
}
+ @Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
int nread = 0;
@@ -76,6 +81,7 @@ public void readFully(long position, byte[] buffer, int offset, int length)
}
}
+ @Override
public void readFully(long position, byte[] buffer)
throws IOException {
readFully(position, buffer, 0, buffer.length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index 66b6a74916..d494f30de7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -55,6 +55,7 @@ protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksu
throws IOException;
/** Write one byte */
+ @Override
public synchronized void write(int b) throws IOException {
sum.update(b);
buf[count++] = (byte)b;
@@ -81,6 +82,7 @@ public synchronized void write(int b) throws IOException {
* @param len the number of bytes to write.
* @exception IOException if an I/O error occurs.
*/
+ @Override
public synchronized void write(byte b[], int off, int len)
throws IOException {
if (off < 0 || len < 0 || off > b.length - len) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
index 2b248bdcf2..149a3e3a4a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
@@ -37,6 +37,7 @@ public abstract class FileChecksum implements Writable {
public abstract byte[] getBytes();
/** Return true if both the algorithms and the values are the same. */
+ @Override
public boolean equals(Object other) {
if (other == this) {
return true;
@@ -50,7 +51,7 @@ public boolean equals(Object other) {
&& Arrays.equals(this.getBytes(), that.getBytes());
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
return getAlgorithmName().hashCode() ^ Arrays.hashCode(getBytes());
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 4e5057a4e9..5cfce9b019 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -190,6 +190,7 @@ public final class FileContext {
new FileContextFinalizer();
private static final PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(final Path file) {
return true;
}
@@ -318,6 +319,7 @@ private static AbstractFileSystem getAbstractFileSystem(
throws UnsupportedFileSystemException, IOException {
try {
return user.doAs(new PrivilegedExceptionAction() {
+ @Override
public AbstractFileSystem run() throws UnsupportedFileSystemException {
return AbstractFileSystem.get(uri, conf);
}
@@ -660,6 +662,7 @@ public FSDataOutputStream create(final Path f,
final CreateOpts[] updatedOpts =
CreateOpts.setOpt(CreateOpts.perms(permission), opts);
return new FSLinkResolver() {
+ @Override
public FSDataOutputStream next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.create(p, createFlag, updatedOpts);
@@ -703,6 +706,7 @@ public void mkdir(final Path dir, final FsPermission permission,
final FsPermission absFerms = (permission == null ?
FsPermission.getDefault() : permission).applyUMask(umask);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.mkdir(p, absFerms, createParent);
@@ -738,6 +742,7 @@ public boolean delete(final Path f, final boolean recursive)
UnsupportedFileSystemException, IOException {
Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return Boolean.valueOf(fs.delete(p, recursive));
@@ -766,6 +771,7 @@ public FSDataInputStream open(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FSDataInputStream next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.open(p);
@@ -796,6 +802,7 @@ public FSDataInputStream open(final Path f, final int bufferSize)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FSDataInputStream next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.open(p, bufferSize);
@@ -826,6 +833,7 @@ public boolean setReplication(final Path f, final short replication)
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return Boolean.valueOf(fs.setReplication(p, replication));
@@ -894,6 +902,7 @@ public void rename(final Path src, final Path dst,
*/
final Path source = resolveIntermediate(absSrc);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.rename(source, p, options);
@@ -925,6 +934,7 @@ public void setPermission(final Path f, final FsPermission permission)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setPermission(p, permission);
@@ -967,6 +977,7 @@ public void setOwner(final Path f, final String username,
}
final Path absF = fixRelativePart(f);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setOwner(p, username, groupname);
@@ -1002,6 +1013,7 @@ public void setTimes(final Path f, final long mtime, final long atime)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setTimes(p, mtime, atime);
@@ -1034,6 +1046,7 @@ public FileChecksum getFileChecksum(final Path f)
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileChecksum next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileChecksum(p);
@@ -1089,6 +1102,7 @@ public FileStatus getFileStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileStatus(p);
@@ -1135,6 +1149,7 @@ public FileStatus getFileLinkStatus(final Path f)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
FileStatus fi = fs.getFileLinkStatus(p);
@@ -1165,6 +1180,7 @@ public Path getLinkTarget(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public Path next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
FileStatus fi = fs.getFileLinkStatus(p);
@@ -1208,6 +1224,7 @@ public BlockLocation[] getFileBlockLocations(final Path f, final long start,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public BlockLocation[] next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileBlockLocations(p, start, len);
@@ -1246,6 +1263,7 @@ public FsStatus getFsStatus(final Path f) throws AccessControlException,
}
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FsStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFsStatus(p);
@@ -1339,6 +1357,7 @@ public void createSymlink(final Path target, final Path link,
IOException {
final Path nonRelLink = fixRelativePart(link);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.createSymlink(target, p, createParent);
@@ -1373,6 +1392,7 @@ public RemoteIterator listStatus(final Path f) throws
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver>() {
+ @Override
public RemoteIterator next(
final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
@@ -1432,6 +1452,7 @@ public RemoteIterator listLocatedStatus(
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver>() {
+ @Override
public RemoteIterator next(
final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
@@ -1703,6 +1724,7 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException,
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileStatus[] next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.listStatus(p);
@@ -2232,6 +2254,7 @@ private static boolean isSameFS(Path qualPath1, Path qualPath2) {
* Deletes all the paths in deleteOnExit on JVM shutdown.
*/
static class FileContextFinalizer implements Runnable {
+ @Override
public synchronized void run() {
processDeleteOnExit();
}
@@ -2244,6 +2267,7 @@ public synchronized void run() {
protected Path resolve(final Path f) throws FileNotFoundException,
UnresolvedLinkException, AccessControlException, IOException {
return new FSLinkResolver() {
+ @Override
public Path next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.resolvePath(p);
@@ -2259,6 +2283,7 @@ public Path next(final AbstractFileSystem fs, final Path p)
*/
protected Path resolveIntermediate(final Path f) throws IOException {
return new FSLinkResolver() {
+ @Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileLinkStatus(p);
@@ -2281,6 +2306,7 @@ Set resolveAbstractFileSystems(final Path f)
final HashSet result
= new HashSet();
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
result.add(fs);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 2757475faf..5445f6eb15 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -253,6 +253,7 @@ public void setSymlink(final Path p) {
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
+ @Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
out.writeLong(getLen());
@@ -270,6 +271,7 @@ public void write(DataOutput out) throws IOException {
}
}
+ @Override
public void readFields(DataInput in) throws IOException {
String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
this.path = new Path(strPath);
@@ -299,6 +301,7 @@ public void readFields(DataInput in) throws IOException {
* @throws ClassCastException if the specified object's is not of
* type FileStatus
*/
+ @Override
public int compareTo(Object o) {
FileStatus other = (FileStatus)o;
return this.getPath().compareTo(other.getPath());
@@ -308,6 +311,7 @@ public int compareTo(Object o) {
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
+ @Override
public boolean equals(Object o) {
if (o == null) {
return false;
@@ -328,6 +332,7 @@ public boolean equals(Object o) {
*
* @return a hash code value for the path name.
*/
+ @Override
public int hashCode() {
return getPath().hashCode();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 31b59439a9..ff9f2db1ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -147,6 +147,7 @@ public static FileSystem get(final URI uri, final Configuration conf,
UserGroupInformation ugi =
UserGroupInformation.getBestUGI(ticketCachePath, user);
return ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
public FileSystem run() throws IOException {
return get(uri, conf);
}
@@ -332,6 +333,7 @@ public static FileSystem newInstance(final URI uri, final Configuration conf,
UserGroupInformation ugi =
UserGroupInformation.getBestUGI(ticketCachePath, user);
return ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
public FileSystem run() throws IOException {
return newInstance(uri,conf);
}
@@ -1389,6 +1391,7 @@ public ContentSummary getContentSummary(Path f) throws IOException {
}
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(Path file) {
return true;
}
@@ -2056,6 +2059,7 @@ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
* No more filesystem operations are needed. Will
* release any held locks.
*/
+ @Override
public void close() throws IOException {
// delete all files that were marked as delete-on-exit.
processDeleteOnExit();
@@ -2393,6 +2397,7 @@ synchronized void closeAll(boolean onlyAutomatic) throws IOException {
}
private class ClientFinalizer implements Runnable {
+ @Override
public synchronized void run() {
try {
closeAll(true);
@@ -2447,7 +2452,7 @@ static class Key {
this.ugi = UserGroupInformation.getCurrentUser();
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
return (scheme + authority).hashCode() + ugi.hashCode() + (int)unique;
}
@@ -2456,7 +2461,7 @@ static boolean isEqual(Object a, Object b) {
return a == b || (a != null && a.equals(b));
}
- /** {@inheritDoc} */
+ @Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
@@ -2471,7 +2476,7 @@ && isEqual(this.ugi, that.ugi)
return false;
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
return "("+ugi.toString() + ")@" + scheme + "://" + authority;
}
@@ -2584,6 +2589,7 @@ public int getWriteOps() {
return writeOps.get();
}
+ @Override
public String toString() {
return bytesRead + " bytes read, " + bytesWritten + " bytes written, "
+ readOps + " read ops, " + largeReadOps + " large read ops, "
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index ba9bb4eafe..b6a2acae49 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -414,9 +414,11 @@ private static class CygPathCommand extends Shell {
String getResult() throws IOException {
return result;
}
+ @Override
protected String[] getExecString() {
return command;
}
+ @Override
protected void parseExecResult(BufferedReader lines) throws IOException {
String line = lines.readLine();
if (line == null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index c2ecd20b5a..6e1e099cb0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -76,6 +76,7 @@ public FileSystem getRawFileSystem() {
* for this FileSystem
* @param conf the configuration
*/
+ @Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
// this is less than ideal, but existing filesystems sometimes neglect
@@ -90,6 +91,7 @@ public void initialize(URI name, Configuration conf) throws IOException {
}
/** Returns a URI whose scheme and authority identify this FileSystem.*/
+ @Override
public URI getUri() {
return fs.getUri();
}
@@ -104,6 +106,7 @@ protected URI getCanonicalUri() {
}
/** Make sure that a path specifies a FileSystem. */
+ @Override
public Path makeQualified(Path path) {
Path fqPath = fs.makeQualified(path);
// swap in our scheme if the filtered fs is using a different scheme
@@ -125,10 +128,12 @@ public Path makeQualified(Path path) {
///////////////////////////////////////////////////////////////
/** Check that a Path belongs to this FileSystem. */
+ @Override
protected void checkPath(Path path) {
fs.checkPath(path);
}
+ @Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
long len) throws IOException {
return fs.getFileBlockLocations(file, start, len);
@@ -143,17 +148,17 @@ public Path resolvePath(final Path p) throws IOException {
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
+ @Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return fs.open(f, bufferSize);
}
- /** {@inheritDoc} */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return fs.append(f, bufferSize, progress);
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -171,6 +176,7 @@ public FSDataOutputStream create(Path f, FsPermission permission,
* @return true if successful;
* false if file does not exist or is a directory
*/
+ @Override
public boolean setReplication(Path src, short replication) throws IOException {
return fs.setReplication(src, replication);
}
@@ -179,23 +185,23 @@ public boolean setReplication(Path src, short replication) throws IOException {
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*/
+ @Override
public boolean rename(Path src, Path dst) throws IOException {
return fs.rename(src, dst);
}
/** Delete a file */
+ @Override
public boolean delete(Path f, boolean recursive) throws IOException {
return fs.delete(f, recursive);
}
/** List files in a directory. */
+ @Override
public FileStatus[] listStatus(Path f) throws IOException {
return fs.listStatus(f);
}
- /**
- * {@inheritDoc}
- */
@Override
public RemoteIterator listCorruptFileBlocks(Path path)
throws IOException {
@@ -203,11 +209,13 @@ public RemoteIterator listCorruptFileBlocks(Path path)
}
/** List files and its block locations in a directory. */
+ @Override
public RemoteIterator listLocatedStatus(Path f)
throws IOException {
return fs.listLocatedStatus(f);
}
+ @Override
public Path getHomeDirectory() {
return fs.getHomeDirectory();
}
@@ -219,6 +227,7 @@ public Path getHomeDirectory() {
*
* @param newDir
*/
+ @Override
public void setWorkingDirectory(Path newDir) {
fs.setWorkingDirectory(newDir);
}
@@ -228,21 +237,21 @@ public void setWorkingDirectory(Path newDir) {
*
* @return the directory pathname
*/
+ @Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
}
+ @Override
protected Path getInitialWorkingDirectory() {
return fs.getInitialWorkingDirectory();
}
- /** {@inheritDoc} */
@Override
public FsStatus getStatus(Path p) throws IOException {
return fs.getStatus(p);
}
- /** {@inheritDoc} */
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return fs.mkdirs(f, permission);
@@ -254,6 +263,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
* the given dst name.
* delSrc indicates if the source should be removed
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, src, dst);
@@ -264,6 +274,7 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
* the given dst name.
* delSrc indicates if the source should be removed
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path[] srcs, Path dst)
throws IOException {
@@ -275,6 +286,7 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite,
* the given dst name.
* delSrc indicates if the source should be removed
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path src, Path dst)
throws IOException {
@@ -286,6 +298,7 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite,
* Copy it from FS control to the local dst name.
* delSrc indicates if the src will be removed or not.
*/
+ @Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyToLocalFile(delSrc, src, dst);
@@ -297,6 +310,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst)
* file. If the FS is local, we write directly into the target. If
* the FS is remote, we write into the tmp local area.
*/
+ @Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fs.startLocalOutput(fsOutputFile, tmpLocalFile);
@@ -308,12 +322,14 @@ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
* FS will copy the contents of tmpLocalFile to the correct target at
* fsOutputFile.
*/
+ @Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
}
/** Return the total size of all files in the filesystem.*/
+ @Override
public long getUsed() throws IOException{
return fs.getUsed();
}
@@ -357,16 +373,17 @@ public FsServerDefaults getServerDefaults(Path f) throws IOException {
/**
* Get file status.
*/
+ @Override
public FileStatus getFileStatus(Path f) throws IOException {
return fs.getFileStatus(f);
}
- /** {@inheritDoc} */
+ @Override
public FileChecksum getFileChecksum(Path f) throws IOException {
return fs.getFileChecksum(f);
}
- /** {@inheritDoc} */
+ @Override
public void setVerifyChecksum(boolean verifyChecksum) {
fs.setVerifyChecksum(verifyChecksum);
}
@@ -387,21 +404,18 @@ public void close() throws IOException {
fs.close();
}
- /** {@inheritDoc} */
@Override
public void setOwner(Path p, String username, String groupname
) throws IOException {
fs.setOwner(p, username, groupname);
}
- /** {@inheritDoc} */
@Override
public void setTimes(Path p, long mtime, long atime
) throws IOException {
fs.setTimes(p, mtime, atime);
}
- /** {@inheritDoc} */
@Override
public void setPermission(Path p, FsPermission permission
) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 6cfc11b1fa..9637b6b913 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -174,9 +174,6 @@ public FileStatus[] listStatus(Path f)
return myFs.listStatus(f);
}
- /**
- * {@inheritDoc}
- */
@Override
public RemoteIterator listCorruptFileBlocks(Path path)
throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
index 637697b83d..c1b9071bbc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
@@ -39,6 +39,7 @@ public class FsServerDefaults implements Writable {
static { // register a ctor
WritableFactories.setFactory(FsServerDefaults.class, new WritableFactory() {
+ @Override
public Writable newInstance() {
return new FsServerDefaults();
}
@@ -106,6 +107,7 @@ public DataChecksum.Type getChecksumType() {
// /////////////////////////////////////////
// Writable
// /////////////////////////////////////////
+ @Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(blockSize);
@@ -116,6 +118,7 @@ public void write(DataOutput out) throws IOException {
WritableUtils.writeEnum(out, checksumType);
}
+ @Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
blockSize = in.readLong();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 4da32789e5..0db1f9e431 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -236,6 +236,7 @@ private void printInstanceHelp(PrintStream out, Command instance) {
/**
* run
*/
+ @Override
public int run(String argv[]) throws Exception {
// initialize FsShell
init();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
index 8b9de78fe0..d392c7d765 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
@@ -60,12 +60,14 @@ public long getRemaining() {
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
+ @Override
public void write(DataOutput out) throws IOException {
out.writeLong(capacity);
out.writeLong(used);
out.writeLong(remaining);
}
+ @Override
public void readFields(DataInput in) throws IOException {
capacity = in.readLong();
used = in.readLong();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
index 65c608ddec..90e75b0ccb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
@@ -53,7 +53,6 @@ public void connect() throws IOException {
}
}
- /* @inheritDoc */
@Override
public InputStream getInputStream() throws IOException {
if (is == null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
index b9a5f1a2cc..2a9208ea5b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
@@ -59,6 +59,7 @@ public FsUrlStreamHandlerFactory(Configuration conf) {
this.handler = new FsUrlStreamHandler(this.conf);
}
+ @Override
public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
if (!protocols.containsKey(protocol)) {
boolean known = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
index 5afa9e911d..24bff5f9cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
@@ -31,6 +31,7 @@
@InterfaceStability.Evolving
public class GlobFilter implements PathFilter {
private final static PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(Path file) {
return true;
}
@@ -75,6 +76,7 @@ boolean hasPattern() {
return pattern.hasWildcard();
}
+ @Override
public boolean accept(Path path) {
return pattern.matches(path.getName()) && userFilter.accept(path);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index 8e03fc35a9..9504e1fda6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -106,6 +106,7 @@ public HarFileSystem(FileSystem fs) {
* har:///archivepath. This assumes the underlying filesystem
* to be used in case not specified.
*/
+ @Override
public void initialize(URI name, Configuration conf) throws IOException {
// decode the name
URI underLyingURI = decodeHarURI(name, conf);
@@ -247,6 +248,7 @@ private String decodeFileName(String fname)
/**
* return the top level archive.
*/
+ @Override
public Path getWorkingDirectory() {
return new Path(uri.toString());
}
@@ -636,6 +638,7 @@ private HarStatus getFileHarStatus(Path f) throws IOException {
/**
* @return null since no checksum algorithm is implemented.
*/
+ @Override
public FileChecksum getFileChecksum(Path f) {
return null;
}
@@ -668,6 +671,7 @@ public FSDataOutputStream create(Path f, int bufferSize)
throw new IOException("Har: Create not allowed");
}
+ @Override
public FSDataOutputStream create(Path f,
FsPermission permission,
boolean overwrite,
@@ -735,10 +739,12 @@ public FileStatus[] listStatus(Path f) throws IOException {
/**
* return the top level archive path.
*/
+ @Override
public Path getHomeDirectory() {
return new Path(uri.toString());
}
+ @Override
public void setWorkingDirectory(Path newDir) {
//does nothing.
}
@@ -746,6 +752,7 @@ public void setWorkingDirectory(Path newDir) {
/**
* not implemented.
*/
+ @Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
throw new IOException("Har: mkdirs not allowed");
}
@@ -753,6 +760,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
/**
* not implemented.
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws
IOException {
throw new IOException("Har: copyfromlocalfile not allowed");
@@ -761,6 +769,7 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws
/**
* copies the file in the har filesystem to a local file.
*/
+ @Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
FileUtil.copy(this, src, getLocal(getConf()), dst, false, getConf());
@@ -769,6 +778,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst)
/**
* not implemented.
*/
+ @Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
throw new IOException("Har: startLocalOutput not allowed");
@@ -777,6 +787,7 @@ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
/**
* not implemented.
*/
+ @Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
throw new IOException("Har: completeLocalOutput not allowed");
@@ -785,6 +796,7 @@ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
/**
* not implemented.
*/
+ @Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
throw new IOException("Har: setowner not allowed");
@@ -793,6 +805,7 @@ public void setOwner(Path p, String username, String groupname)
/**
* Not implemented.
*/
+ @Override
public void setPermission(Path p, FsPermission permisssion)
throws IOException {
throw new IOException("Har: setPermission not allowed");
@@ -825,6 +838,7 @@ private static class HarFsInputStream extends FSInputStream {
this.end = start + length;
}
+ @Override
public synchronized int available() throws IOException {
long remaining = end - underLyingStream.getPos();
if (remaining > (long)Integer.MAX_VALUE) {
@@ -833,6 +847,7 @@ public synchronized int available() throws IOException {
return (int) remaining;
}
+ @Override
public synchronized void close() throws IOException {
underLyingStream.close();
super.close();
@@ -847,15 +862,18 @@ public void mark(int readLimit) {
/**
* reset is not implemented
*/
+ @Override
public void reset() throws IOException {
throw new IOException("reset not implemented.");
}
+ @Override
public synchronized int read() throws IOException {
int ret = read(oneBytebuff, 0, 1);
return (ret <= 0) ? -1: (oneBytebuff[0] & 0xff);
}
+ @Override
public synchronized int read(byte[] b) throws IOException {
int ret = read(b, 0, b.length);
if (ret != -1) {
@@ -867,6 +885,7 @@ public synchronized int read(byte[] b) throws IOException {
/**
*
*/
+ @Override
public synchronized int read(byte[] b, int offset, int len)
throws IOException {
int newlen = len;
@@ -882,6 +901,7 @@ public synchronized int read(byte[] b, int offset, int len)
return ret;
}
+ @Override
public synchronized long skip(long n) throws IOException {
long tmpN = n;
if (tmpN > 0) {
@@ -895,10 +915,12 @@ public synchronized long skip(long n) throws IOException {
return (tmpN < 0)? -1 : 0;
}
+ @Override
public synchronized long getPos() throws IOException {
return (position - start);
}
+ @Override
public synchronized void seek(long pos) throws IOException {
if (pos < 0 || (start + pos > end)) {
throw new IOException("Failed to seek: EOF");
@@ -907,6 +929,7 @@ public synchronized void seek(long pos) throws IOException {
underLyingStream.seek(position);
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
//do not need to implement this
// hdfs in itself does seektonewsource
@@ -917,6 +940,7 @@ public boolean seekToNewSource(long targetPos) throws IOException {
/**
* implementing position readable.
*/
+ @Override
public int read(long pos, byte[] b, int offset, int length)
throws IOException {
int nlength = length;
@@ -929,6 +953,7 @@ public int read(long pos, byte[] b, int offset, int length)
/**
* position readable again.
*/
+ @Override
public void readFully(long pos, byte[] b, int offset, int length)
throws IOException {
if (start + length + pos > end) {
@@ -937,6 +962,7 @@ public void readFully(long pos, byte[] b, int offset, int length)
underLyingStream.readFully(pos + start, b, offset, length);
}
+ @Override
public void readFully(long pos, byte[] b) throws IOException {
readFully(pos, b, 0, b.length);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
index 394c01f705..7db348c557 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
@@ -91,6 +91,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst)
* Moves files to a bad file directory on the same device, so that their
* storage will not be reused.
*/
+ @Override
public boolean reportChecksumFailure(Path p, FSDataInputStream in,
long inPos,
FSDataInputStream sums, long sumsPos) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index b0779ed82f..01368944a4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -94,6 +94,7 @@ public BlockLocation[] getBlockLocations() {
* @throws ClassCastException if the specified object's is not of
* type FileStatus
*/
+ @Override
public int compareTo(Object o) {
return super.compareTo(o);
}
@@ -102,6 +103,7 @@ public int compareTo(Object o) {
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
+ @Override
public boolean equals(Object o) {
return super.equals(o);
}
@@ -112,6 +114,7 @@ public boolean equals(Object o) {
*
* @return a hash code value for the path name.
*/
+ @Override
public int hashCode() {
return super.hashCode();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
index 1c697b7f52..5bddb96f0c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
@@ -57,7 +57,7 @@ public MD5MD5CRC32FileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) {
this.md5 = md5;
}
- /** {@inheritDoc} */
+ @Override
public String getAlgorithmName() {
return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC +
getCrcType().name();
@@ -73,11 +73,11 @@ public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm)
throw new IOException("Unknown checksum type in " + algorithm);
}
-
- /** {@inheritDoc} */
+
+ @Override
public int getLength() {return LENGTH;}
-
- /** {@inheritDoc} */
+
+ @Override
public byte[] getBytes() {
return WritableUtils.toByteArray(this);
}
@@ -92,14 +92,14 @@ public ChecksumOpt getChecksumOpt() {
return new ChecksumOpt(getCrcType(), bytesPerCRC);
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
bytesPerCRC = in.readInt();
crcPerBlock = in.readLong();
md5 = MD5Hash.read(in);
}
-
- /** {@inheritDoc} */
+
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(bytesPerCRC);
out.writeLong(crcPerBlock);
@@ -161,8 +161,8 @@ public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs
+ ", md5=" + md5, e);
}
}
-
- /** {@inheritDoc} */
+
+ @Override
public String toString() {
return getAlgorithmName() + ":" + md5;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index 173e16ea41..8464e51270 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -22,7 +22,6 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.HadoopIllegalArgumentException;
/**
* This class contains options related to file system operations.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 74c85af48b..c0ebebfe67 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -261,6 +261,7 @@ public Path suffix(String suffix) {
return new Path(getParent(), getName()+suffix);
}
+ @Override
public String toString() {
// we can't use uri.toString(), which escapes everything, because we want
// illegal characters unescaped in the string, for glob processing, etc.
@@ -289,6 +290,7 @@ public String toString() {
return buffer.toString();
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof Path)) {
return false;
@@ -297,10 +299,12 @@ public boolean equals(Object o) {
return this.uri.equals(that.uri);
}
+ @Override
public int hashCode() {
return uri.hashCode();
}
+ @Override
public int compareTo(Object o) {
Path that = (Path)o;
return this.uri.compareTo(that.uri);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 38e991480a..b33b1a778f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -72,8 +72,10 @@ public File pathToFile(Path path) {
return new File(path.toUri().getPath());
}
+ @Override
public URI getUri() { return NAME; }
+ @Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
setConf(conf);
@@ -84,6 +86,7 @@ public TrackingFileInputStream(File f) throws IOException {
super(f);
}
+ @Override
public int read() throws IOException {
int result = super.read();
if (result != -1) {
@@ -92,6 +95,7 @@ public int read() throws IOException {
return result;
}
+ @Override
public int read(byte[] data) throws IOException {
int result = super.read(data);
if (result != -1) {
@@ -100,6 +104,7 @@ public int read(byte[] data) throws IOException {
return result;
}
+ @Override
public int read(byte[] data, int offset, int length) throws IOException {
int result = super.read(data, offset, length);
if (result != -1) {
@@ -120,15 +125,18 @@ public LocalFSFileInputStream(Path f) throws IOException {
this.fis = new TrackingFileInputStream(pathToFile(f));
}
+ @Override
public void seek(long pos) throws IOException {
fis.getChannel().position(pos);
this.position = pos;
}
+ @Override
public long getPos() throws IOException {
return this.position;
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
@@ -136,11 +144,14 @@ public boolean seekToNewSource(long targetPos) throws IOException {
/*
* Just forward to the fis
*/
+ @Override
public int available() throws IOException { return fis.available(); }
+ @Override
public void close() throws IOException { fis.close(); }
@Override
public boolean markSupported() { return false; }
+ @Override
public int read() throws IOException {
try {
int value = fis.read();
@@ -153,6 +164,7 @@ public int read() throws IOException {
}
}
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
try {
int value = fis.read(b, off, len);
@@ -165,6 +177,7 @@ public int read(byte[] b, int off, int len) throws IOException {
}
}
+ @Override
public int read(long position, byte[] b, int off, int len)
throws IOException {
ByteBuffer bb = ByteBuffer.wrap(b, off, len);
@@ -175,6 +188,7 @@ public int read(long position, byte[] b, int off, int len)
}
}
+ @Override
public long skip(long n) throws IOException {
long value = fis.skip(n);
if (value > 0) {
@@ -189,6 +203,7 @@ public FileDescriptor getFileDescriptor() throws IOException {
}
}
+ @Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException(f.toString());
@@ -210,8 +225,11 @@ private LocalFSFileOutputStream(Path f, boolean append) throws IOException {
/*
* Just forward to the fos
*/
+ @Override
public void close() throws IOException { fos.close(); }
+ @Override
public void flush() throws IOException { fos.flush(); }
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
try {
fos.write(b, off, len);
@@ -220,6 +238,7 @@ public void write(byte[] b, int off, int len) throws IOException {
}
}
+ @Override
public void write(int b) throws IOException {
try {
fos.write(b);
@@ -229,7 +248,7 @@ public void write(int b) throws IOException {
}
}
- /** {@inheritDoc} */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
if (!exists(f)) {
@@ -242,7 +261,6 @@ public FSDataOutputStream append(Path f, int bufferSize,
new LocalFSFileOutputStream(f, true), bufferSize), statistics);
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress)
@@ -264,7 +282,6 @@ private FSDataOutputStream create(Path f, boolean overwrite,
new LocalFSFileOutputStream(f, false), bufferSize), statistics);
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -276,7 +293,6 @@ public FSDataOutputStream create(Path f, FsPermission permission,
return out;
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite,
@@ -288,6 +304,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
return out;
}
+ @Override
public boolean rename(Path src, Path dst) throws IOException {
if (pathToFile(src).renameTo(pathToFile(dst))) {
return true;
@@ -302,6 +319,7 @@ public boolean rename(Path src, Path dst) throws IOException {
* @return true if the file or directory and all its contents were deleted
* @throws IOException if p is non-empty and recursive is false
*/
+ @Override
public boolean delete(Path p, boolean recursive) throws IOException {
File f = pathToFile(p);
if (f.isFile()) {
@@ -319,6 +337,7 @@ public boolean delete(Path p, boolean recursive) throws IOException {
* (Note: Returned list is not sorted in any given order,
* due to reliance on Java's {@link File#list()} API.)
*/
+ @Override
public FileStatus[] listStatus(Path f) throws IOException {
File localf = pathToFile(f);
FileStatus[] results;
@@ -356,6 +375,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
* Creates the specified directory hierarchy. Does not
* treat existence as an error.
*/
+ @Override
public boolean mkdirs(Path f) throws IOException {
if(f == null) {
throw new IllegalArgumentException("mkdirs path arg is null");
@@ -373,7 +393,6 @@ public boolean mkdirs(Path f) throws IOException {
(p2f.mkdir() || p2f.isDirectory());
}
- /** {@inheritDoc} */
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
boolean b = mkdirs(f);
@@ -418,7 +437,6 @@ protected Path getInitialWorkingDirectory() {
return this.makeQualified(new Path(System.getProperty("user.dir")));
}
- /** {@inheritDoc} */
@Override
public FsStatus getStatus(Path p) throws IOException {
File partition = pathToFile(p == null ? new Path("/") : p);
@@ -430,29 +448,35 @@ public FsStatus getStatus(Path p) throws IOException {
}
// In the case of the local filesystem, we can just rename the file.
+ @Override
public void moveFromLocalFile(Path src, Path dst) throws IOException {
rename(src, dst);
}
// We can write output directly to the final location
+ @Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fsOutputFile;
}
// It's in the right place - nothing to do.
+ @Override
public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile)
throws IOException {
}
+ @Override
public void close() throws IOException {
super.close();
}
+ @Override
public String toString() {
return "LocalFS";
}
+ @Override
public FileStatus getFileStatus(Path f) throws IOException {
File path = pathToFile(f);
if (path.exists()) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 07870df1a6..1820c6619e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -263,6 +263,7 @@ private class Emptier implements Runnable {
}
}
+ @Override
public void run() {
if (emptierInterval == 0)
return; // trash disabled
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 1c19ce27fb..99ca4fbb80 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -262,6 +262,7 @@ public void close() throws IOException {
}
/** This optional operation is not yet supported. */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
index d3ac019a94..beea508d5d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
@@ -51,19 +51,23 @@ public FTPInputStream(InputStream stream, FTPClient client,
this.closed = false;
}
+ @Override
public long getPos() throws IOException {
return pos;
}
// We don't support seek.
+ @Override
public void seek(long pos) throws IOException {
throw new IOException("Seek not supported");
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
throw new IOException("Seek not supported");
}
+ @Override
public synchronized int read() throws IOException {
if (closed) {
throw new IOException("Stream closed");
@@ -79,6 +83,7 @@ public synchronized int read() throws IOException {
return byteRead;
}
+ @Override
public synchronized int read(byte buf[], int off, int len) throws IOException {
if (closed) {
throw new IOException("Stream closed");
@@ -95,6 +100,7 @@ public synchronized int read(byte buf[], int off, int len) throws IOException {
return result;
}
+ @Override
public synchronized void close() throws IOException {
if (closed) {
throw new IOException("Stream closed");
@@ -116,14 +122,17 @@ public synchronized void close() throws IOException {
// Not supported.
+ @Override
public boolean markSupported() {
return false;
}
+ @Override
public void mark(int readLimit) {
// Do nothing
}
+ @Override
public void reset() throws IOException {
throw new IOException("Mark not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
index 88b28ed434..0d77a78c87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
@@ -50,22 +50,27 @@ public KFSImpl(String metaServerHost, int metaServerPort,
statistics = stats;
}
+ @Override
public boolean exists(String path) throws IOException {
return kfsAccess.kfs_exists(path);
}
+ @Override
public boolean isDirectory(String path) throws IOException {
return kfsAccess.kfs_isDirectory(path);
}
+ @Override
public boolean isFile(String path) throws IOException {
return kfsAccess.kfs_isFile(path);
}
+ @Override
public String[] readdir(String path) throws IOException {
return kfsAccess.kfs_readdir(path);
}
+ @Override
public FileStatus[] readdirplus(Path path) throws IOException {
String srep = path.toUri().getPath();
KfsFileAttr[] fattr = kfsAccess.kfs_readdirplus(srep);
@@ -100,52 +105,64 @@ public FileStatus[] readdirplus(Path path) throws IOException {
}
+ @Override
public int mkdirs(String path) throws IOException {
return kfsAccess.kfs_mkdirs(path);
}
+ @Override
public int rename(String source, String dest) throws IOException {
return kfsAccess.kfs_rename(source, dest);
}
+ @Override
public int rmdir(String path) throws IOException {
return kfsAccess.kfs_rmdir(path);
}
+ @Override
public int remove(String path) throws IOException {
return kfsAccess.kfs_remove(path);
}
+ @Override
public long filesize(String path) throws IOException {
return kfsAccess.kfs_filesize(path);
}
+ @Override
public short getReplication(String path) throws IOException {
return kfsAccess.kfs_getReplication(path);
}
+ @Override
public short setReplication(String path, short replication) throws IOException {
return kfsAccess.kfs_setReplication(path, replication);
}
+ @Override
public String[][] getDataLocation(String path, long start, long len) throws IOException {
return kfsAccess.kfs_getDataLocation(path, start, len);
}
+ @Override
public long getModificationTime(String path) throws IOException {
return kfsAccess.kfs_getModificationTime(path);
}
+ @Override
public FSDataInputStream open(String path, int bufferSize) throws IOException {
return new FSDataInputStream(new KFSInputStream(kfsAccess, path,
statistics));
}
+ @Override
public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, replication, false, progress),
statistics);
}
+ @Override
public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
// when opening for append, # of replicas is ignored
return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, (short) 1, true, progress),
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
index 04c937b848..492230f064 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
@@ -53,6 +53,7 @@ public KFSInputStream(KfsAccess kfsAccess, String path,
this.fsize = 0;
}
+ @Override
public long getPos() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -60,6 +61,7 @@ public long getPos() throws IOException {
return kfsChannel.tell();
}
+ @Override
public synchronized int available() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -67,6 +69,7 @@ public synchronized int available() throws IOException {
return (int) (this.fsize - getPos());
}
+ @Override
public synchronized void seek(long targetPos) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -74,10 +77,12 @@ public synchronized void seek(long targetPos) throws IOException {
kfsChannel.seek(targetPos);
}
+ @Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
+ @Override
public synchronized int read() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -93,6 +98,7 @@ public synchronized int read() throws IOException {
return -1;
}
+ @Override
public synchronized int read(byte b[], int off, int len) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -109,6 +115,7 @@ public synchronized int read(byte b[], int off, int len) throws IOException {
return res;
}
+ @Override
public synchronized void close() throws IOException {
if (kfsChannel == null) {
return;
@@ -118,14 +125,17 @@ public synchronized void close() throws IOException {
kfsChannel = null;
}
+ @Override
public boolean markSupported() {
return false;
}
+ @Override
public void mark(int readLimit) {
// Do nothing
}
+ @Override
public void reset() throws IOException {
throw new IOException("Mark not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
index 59cea357e6..a50f750733 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
@@ -20,15 +20,10 @@
package org.apache.hadoop.fs.kfs;
import java.io.*;
-import java.net.*;
-import java.util.*;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.util.Progressable;
import org.kosmix.kosmosfs.access.KfsAccess;
@@ -60,6 +55,7 @@ public long getPos() throws IOException {
return kfsChannel.tell();
}
+ @Override
public void write(int v) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -70,6 +66,7 @@ public void write(int v) throws IOException {
write(b, 0, 1);
}
+ @Override
public void write(byte b[], int off, int len) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -80,6 +77,7 @@ public void write(byte b[], int off, int len) throws IOException {
kfsChannel.write(ByteBuffer.wrap(b, off, len));
}
+ @Override
public void flush() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -89,6 +87,7 @@ public void flush() throws IOException {
kfsChannel.sync();
}
+ @Override
public synchronized void close() throws IOException {
if (kfsChannel == null) {
return;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index af3d5148d5..972a410b53 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -40,6 +40,7 @@ public class FsPermission implements Writable {
private static final Log LOG = LogFactory.getLog(FsPermission.class);
static final WritableFactory FACTORY = new WritableFactory() {
+ @Override
public Writable newInstance() { return new FsPermission(); }
};
static { // register a ctor
@@ -124,12 +125,12 @@ public void fromShort(short n) {
set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7], (((n >>> 9) & 1) == 1) );
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
out.writeShort(toShort());
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
fromShort(in.readShort());
}
@@ -155,7 +156,7 @@ public short toShort() {
return (short)s;
}
- /** {@inheritDoc} */
+ @Override
public boolean equals(Object obj) {
if (obj instanceof FsPermission) {
FsPermission that = (FsPermission)obj;
@@ -167,10 +168,10 @@ public boolean equals(Object obj) {
return false;
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {return toShort();}
- /** {@inheritDoc} */
+ @Override
public String toString() {
String str = useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL;
if(stickyBit) {
@@ -300,9 +301,11 @@ private static class ImmutableFsPermission extends FsPermission {
public ImmutableFsPermission(short permission) {
super(permission);
}
+ @Override
public FsPermission applyUMask(FsPermission umask) {
throw new UnsupportedOperationException();
}
+ @Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
index f47226f1e2..bc9e392a87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
@@ -32,6 +32,7 @@
@InterfaceStability.Unstable
public class PermissionStatus implements Writable {
static final WritableFactory FACTORY = new WritableFactory() {
+ @Override
public Writable newInstance() { return new PermissionStatus(); }
};
static { // register a ctor
@@ -42,9 +43,11 @@ public class PermissionStatus implements Writable {
public static PermissionStatus createImmutable(
String user, String group, FsPermission permission) {
return new PermissionStatus(user, group, permission) {
+ @Override
public PermissionStatus applyUMask(FsPermission umask) {
throw new UnsupportedOperationException();
}
+ @Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
@@ -82,14 +85,14 @@ public PermissionStatus applyUMask(FsPermission umask) {
return this;
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
username = Text.readString(in, Text.DEFAULT_MAX_LEN);
groupname = Text.readString(in, Text.DEFAULT_MAX_LEN);
permission = FsPermission.read(in);
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
write(out, username, groupname, permission);
}
@@ -115,7 +118,7 @@ public static void write(DataOutput out,
permission.write(out);
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
return username + ":" + groupname + ":" + permission;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
index 6667d62189..4adc306633 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
@@ -83,6 +83,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
private static final Log LOG =
LogFactory.getLog(Jets3tFileSystemStore.class.getName());
+ @Override
public void initialize(URI uri, Configuration conf) throws IOException {
this.conf = conf;
@@ -108,6 +109,7 @@ public void initialize(URI uri, Configuration conf) throws IOException {
);
}
+ @Override
public String getVersion() throws IOException {
return FILE_SYSTEM_VERSION_VALUE;
}
@@ -123,14 +125,17 @@ private void delete(String key) throws IOException {
}
}
+ @Override
public void deleteINode(Path path) throws IOException {
delete(pathToKey(path));
}
+ @Override
public void deleteBlock(Block block) throws IOException {
delete(blockToKey(block));
}
+ @Override
public boolean inodeExists(Path path) throws IOException {
InputStream in = get(pathToKey(path), true);
if (in == null) {
@@ -140,6 +145,7 @@ public boolean inodeExists(Path path) throws IOException {
return true;
}
+ @Override
public boolean blockExists(long blockId) throws IOException {
InputStream in = get(blockToKey(blockId), false);
if (in == null) {
@@ -203,10 +209,12 @@ private void checkMetadata(S3Object object) throws S3FileSystemException,
}
}
+ @Override
public INode retrieveINode(Path path) throws IOException {
return INode.deserialize(get(pathToKey(path), true));
}
+ @Override
public File retrieveBlock(Block block, long byteRangeStart)
throws IOException {
File fileBlock = null;
@@ -249,6 +257,7 @@ private File newBackupFile() throws IOException {
return result;
}
+ @Override
public Set listSubPaths(Path path) throws IOException {
try {
String prefix = pathToKey(path);
@@ -270,6 +279,7 @@ public Set listSubPaths(Path path) throws IOException {
}
}
+ @Override
public Set listDeepSubPaths(Path path) throws IOException {
try {
String prefix = pathToKey(path);
@@ -311,10 +321,12 @@ private void put(String key, InputStream in, long length, boolean storeMetadata)
}
}
+ @Override
public void storeINode(Path path, INode inode) throws IOException {
put(pathToKey(path), inode.serialize(), inode.getSerializedLength(), true);
}
+ @Override
public void storeBlock(Block block, File file) throws IOException {
BufferedInputStream in = null;
try {
@@ -354,6 +366,7 @@ private String blockToKey(Block block) {
return blockToKey(block.getId());
}
+ @Override
public void purge() throws IOException {
try {
S3Object[] objects = s3Service.listObjects(bucket);
@@ -368,6 +381,7 @@ public void purge() throws IOException {
}
}
+ @Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder("S3 Filesystem, ");
sb.append(bucket.getName()).append("\n");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
index f82755781e..416bfb17c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
@@ -61,6 +61,7 @@ public static void main(String[] args) throws Exception {
System.exit(res);
}
+ @Override
public int run(String[] args) throws Exception {
if (args.length == 0) {
@@ -195,6 +196,7 @@ interface Store {
class UnversionedStore implements Store {
+ @Override
public Set listAllPaths() throws IOException {
try {
String prefix = urlEncode(Path.SEPARATOR);
@@ -212,6 +214,7 @@ public Set listAllPaths() throws IOException {
}
}
+ @Override
public void deleteINode(Path path) throws IOException {
delete(pathToKey(path));
}
@@ -227,6 +230,7 @@ private void delete(String key) throws IOException {
}
}
+ @Override
public INode retrieveINode(Path path) throws IOException {
return INode.deserialize(get(pathToKey(path)));
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
index 5a5d628adb..81ef31446e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
@@ -206,6 +206,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
}
/** This optional operation is not yet supported. */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
@@ -298,6 +299,7 @@ private boolean renameRecursive(Path src, Path dst) throws IOException {
return true;
}
+ @Override
public boolean delete(Path path, boolean recursive) throws IOException {
Path absolutePath = makeAbsolute(path);
INode inode = store.retrieveINode(absolutePath);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
index c2293ba682..400419c110 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
@@ -49,6 +49,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
private S3Service s3Service;
private S3Bucket bucket;
+ @Override
public void initialize(URI uri, Configuration conf) throws IOException {
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
@@ -63,6 +64,7 @@ public void initialize(URI uri, Configuration conf) throws IOException {
bucket = new S3Bucket(uri.getHost());
}
+ @Override
public void storeFile(String key, File file, byte[] md5Hash)
throws IOException {
@@ -90,6 +92,7 @@ public void storeFile(String key, File file, byte[] md5Hash)
}
}
+ @Override
public void storeEmptyFile(String key) throws IOException {
try {
S3Object object = new S3Object(key);
@@ -102,6 +105,7 @@ public void storeEmptyFile(String key) throws IOException {
}
}
+ @Override
public FileMetadata retrieveMetadata(String key) throws IOException {
try {
S3Object object = s3Service.getObjectDetails(bucket, key);
@@ -117,6 +121,7 @@ public FileMetadata retrieveMetadata(String key) throws IOException {
}
}
+ @Override
public InputStream retrieve(String key) throws IOException {
try {
S3Object object = s3Service.getObject(bucket, key);
@@ -127,6 +132,7 @@ public InputStream retrieve(String key) throws IOException {
}
}
+ @Override
public InputStream retrieve(String key, long byteRangeStart)
throws IOException {
try {
@@ -139,11 +145,13 @@ public InputStream retrieve(String key, long byteRangeStart)
}
}
+ @Override
public PartialListing list(String prefix, int maxListingLength)
throws IOException {
return list(prefix, maxListingLength, null, false);
}
+ @Override
public PartialListing list(String prefix, int maxListingLength, String priorLastKey,
boolean recurse) throws IOException {
@@ -175,6 +183,7 @@ private PartialListing list(String prefix, String delimiter,
}
}
+ @Override
public void delete(String key) throws IOException {
try {
s3Service.deleteObject(bucket, key);
@@ -183,6 +192,7 @@ public void delete(String key) throws IOException {
}
}
+ @Override
public void copy(String srcKey, String dstKey) throws IOException {
try {
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
@@ -192,6 +202,7 @@ public void copy(String srcKey, String dstKey) throws IOException {
}
}
+ @Override
public void purge(String prefix) throws IOException {
try {
S3Object[] objects = s3Service.listObjects(bucket, prefix, null);
@@ -203,6 +214,7 @@ public void purge(String prefix) throws IOException {
}
}
+ @Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder("S3 Native Filesystem, ");
sb.append(bucket.getName()).append("\n");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
index eea429a97e..e1aeea94ac 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
@@ -150,6 +150,7 @@ protected IllegalNumberOfArgumentsException(int want, int got) {
actual = got;
}
+ @Override
public String getMessage() {
return "expected " + expected + " but got " + actual;
}
@@ -165,6 +166,7 @@ public TooManyArgumentsException(int expected, int actual) {
super(expected, actual);
}
+ @Override
public String getMessage() {
return "Too many arguments: " + super.getMessage();
}
@@ -180,6 +182,7 @@ public NotEnoughArgumentsException(int expected, int actual) {
super(expected, actual);
}
+ @Override
public String getMessage() {
return "Not enough arguments: " + super.getMessage();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
index 71bfc9510d..bc1d8af951 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
@@ -114,6 +114,7 @@ private boolean moveToTrash(PathData item) throws IOException {
static class Rmr extends Rm {
public static final String NAME = "rmr";
+ @Override
protected void processOptions(LinkedList args) throws IOException {
args.addFirst("-r");
super.processOptions(args);
@@ -136,6 +137,7 @@ static class Rmdir extends FsCommand {
private boolean ignoreNonEmpty = false;
+ @Override
protected void processOptions(LinkedList args) throws IOException {
CommandFormat cf = new CommandFormat(
1, Integer.MAX_VALUE, "-ignore-fail-on-non-empty");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index 5ae0d67c57..8d598012ec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -161,6 +161,7 @@ public TextRecordInputStream(FileStatus f) throws IOException {
outbuf = new DataOutputBuffer();
}
+ @Override
public int read() throws IOException {
int ret;
if (null == inbuf || -1 == (ret = inbuf.read())) {
@@ -180,6 +181,7 @@ public int read() throws IOException {
return ret;
}
+ @Override
public void close() throws IOException {
r.close();
super.close();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 3f397327de..2541be393b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -73,6 +73,7 @@ public String getCommandName() {
// abstract method that normally is invoked by runall() which is
// overridden below
+ @Override
protected void run(Path path) throws IOException {
throw new RuntimeException("not supposed to get here");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
index b53d2820de..04574cf673 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
@@ -380,6 +380,7 @@ private static int findLongestDirPrefix(String cwd, String path, boolean isDir)
* as given on the commandline, or the full path
* @return String of the path
*/
+ @Override
public String toString() {
String scheme = uri.getScheme();
// No interpretation of symbols. Just decode % escaped chars.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 85426fa4ff..95d0a2d456 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -102,6 +102,7 @@ public ChRootedFileSystem(final URI uri, Configuration conf)
* for this FileSystem
* @param conf the configuration
*/
+ @Override
public void initialize(final URI name, final Configuration conf)
throws IOException {
super.initialize(name, conf);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java
index f92108cfe7..143ce68ebb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java
@@ -20,10 +20,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-
import org.apache.hadoop.fs.Path;
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 1c0c8dac4d..6031daf118 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -164,6 +164,7 @@ public String getScheme() {
* this FileSystem
* @param conf the configuration
*/
+ @Override
public void initialize(final URI theUri, final Configuration conf)
throws IOException {
super.initialize(theUri, conf);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
index 871e3d8a63..e0f62e453b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
@@ -42,7 +42,8 @@ public boolean equals(Object o) {
return super.equals(o);
}
- public int hashCode() {
+ @Override
+ public int hashCode() {
return super.hashCode();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index a4ed255deb..5287581073 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -892,6 +892,7 @@ private String createWithRetries(final String path, final byte[] data,
final List acl, final CreateMode mode)
throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction() {
+ @Override
public String run() throws KeeperException, InterruptedException {
return zkClient.create(path, data, acl, mode);
}
@@ -901,6 +902,7 @@ public String run() throws KeeperException, InterruptedException {
private byte[] getDataWithRetries(final String path, final boolean watch,
final Stat stat) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction() {
+ @Override
public byte[] run() throws KeeperException, InterruptedException {
return zkClient.getData(path, watch, stat);
}
@@ -910,6 +912,7 @@ public byte[] run() throws KeeperException, InterruptedException {
private Stat setDataWithRetries(final String path, final byte[] data,
final int version) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction() {
+ @Override
public Stat run() throws KeeperException, InterruptedException {
return zkClient.setData(path, data, version);
}
@@ -919,6 +922,7 @@ public Stat run() throws KeeperException, InterruptedException {
private void deleteWithRetries(final String path, final int version)
throws KeeperException, InterruptedException {
zkDoWithRetries(new ZKAction() {
+ @Override
public Void run() throws KeeperException, InterruptedException {
zkClient.delete(path, version);
return null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
index d4ae0899fb..85912c7c76 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
@@ -56,6 +56,7 @@ public enum HAServiceState {
this.name = name;
}
+ @Override
public String toString() {
return name;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
index 06fb648f42..4898b38726 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
@@ -184,6 +184,7 @@ private FenceMethodWithArg(FenceMethod method, String arg) {
this.arg = arg;
}
+ @Override
public String toString() {
return method.getClass().getCanonicalName() + "(" + arg + ")";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
index 537fba942d..343693e95c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
@@ -274,6 +274,7 @@ private static class LogAdapter implements com.jcraft.jsch.Logger {
static final Log LOG = LogFactory.getLog(
SshFenceByTcpPort.class.getName() + ".jsch");
+ @Override
public boolean isEnabled(int level) {
switch (level) {
case com.jcraft.jsch.Logger.DEBUG:
@@ -291,6 +292,7 @@ public boolean isEnabled(int level) {
}
}
+ @Override
public void log(int level, String message) {
switch (level) {
case com.jcraft.jsch.Logger.DEBUG:
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index 7bf3c16e8c..77e9e1601a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -474,7 +474,7 @@ public void addInternalServlet(String name, String pathSpec,
}
}
- /** {@inheritDoc} */
+ @Override
public void addFilter(String name, String classname,
Map parameters) {
@@ -494,7 +494,7 @@ public void addFilter(String name, String classname,
filterNames.add(name);
}
- /** {@inheritDoc} */
+ @Override
public void addGlobalFilter(String name, String classname,
Map parameters) {
final String[] ALL_URLS = { "/*" };
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
index bb2f163fe4..6bd9efc689 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
@@ -164,16 +164,18 @@ protected AbstractMapWritable() {
}
/** @return the conf */
+ @Override
public Configuration getConf() {
return conf.get();
}
/** @param conf the conf to set */
+ @Override
public void setConf(Configuration conf) {
this.conf.set(conf);
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
// First write out the size of the class table and any classes that are
@@ -187,7 +189,7 @@ public void write(DataOutput out) throws IOException {
}
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
// Get the number of "unknown" classes
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
index 875d6efdc2..122aa5ca1e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
@@ -88,6 +88,7 @@ public Object toArray() {
public Writable[] get() { return values; }
+ @Override
public void readFields(DataInput in) throws IOException {
values = new Writable[in.readInt()]; // construct values
for (int i = 0; i < values.length; i++) {
@@ -97,6 +98,7 @@ public void readFields(DataInput in) throws IOException {
}
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(values.length); // write values
for (int i = 0; i < values.length; i++) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java
index 71279b4f6d..0079079a79 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java
@@ -57,12 +57,14 @@ public boolean get() {
/**
*/
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readBoolean();
}
/**
*/
+ @Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java
index ff926c11c1..ffcdea2c9a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java
@@ -39,10 +39,12 @@ public ByteWritable() {}
/** Return the value of this ByteWritable. */
public byte get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readByte();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeByte(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
index 012a3bc9d7..7e42a36cb7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
@@ -81,6 +81,7 @@ public byte[] copyBytes() {
* if you need the returned array to be precisely the length of the data.
* @return The data is only valid between 0 and getLength() - 1.
*/
+ @Override
public byte[] getBytes() {
return bytes;
}
@@ -97,6 +98,7 @@ public byte[] get() {
/**
* Get the current size of the buffer.
*/
+ @Override
public int getLength() {
return size;
}
@@ -171,6 +173,7 @@ public void set(byte[] newData, int offset, int length) {
}
// inherit javadoc
+ @Override
public void readFields(DataInput in) throws IOException {
setSize(0); // clear the old data
setSize(in.readInt());
@@ -178,6 +181,7 @@ public void readFields(DataInput in) throws IOException {
}
// inherit javadoc
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(size);
out.write(bytes, 0, size);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java
index ad3164b2d2..6550e1f2fd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java
@@ -45,6 +45,7 @@ public abstract class CompressedWritable implements Writable {
public CompressedWritable() {}
+ @Override
public final void readFields(DataInput in) throws IOException {
compressed = new byte[in.readInt()];
in.readFully(compressed, 0, compressed.length);
@@ -70,6 +71,7 @@ protected void ensureInflated() {
protected abstract void readFieldsCompressed(DataInput in)
throws IOException;
+ @Override
public final void write(DataOutput out) throws IOException {
if (compressed == null) {
ByteArrayOutputStream deflated = new ByteArrayOutputStream();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java
index 469d3ff863..2cd59d75dc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java
@@ -21,8 +21,6 @@
import java.io.DataInputStream;
import java.io.InputStream;
import java.nio.ByteBuffer;
-import java.util.LinkedList;
-import java.util.List;
public class DataInputByteBuffer extends DataInputStream {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
index 6cd1f49722..2b8e259464 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
@@ -72,6 +72,7 @@ public DefaultStringifier(Configuration conf, Class c) {
}
}
+ @Override
public T fromString(String str) throws IOException {
try {
byte[] bytes = Base64.decodeBase64(str.getBytes("UTF-8"));
@@ -83,6 +84,7 @@ public T fromString(String str) throws IOException {
}
}
+ @Override
public String toString(T obj) throws IOException {
outBuf.reset();
serializer.serialize(obj);
@@ -91,6 +93,7 @@ public String toString(T obj) throws IOException {
return new String(Base64.encodeBase64(buf));
}
+ @Override
public void close() throws IOException {
inBuf.close();
outBuf.close();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
index a984cd4ef5..5cc326fe3c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
@@ -42,10 +42,12 @@ public DoubleWritable(double value) {
set(value);
}
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readDouble();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeDouble(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
index c1ff1ca3bf..dc430cc29c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
@@ -23,7 +23,6 @@
import java.io.IOException;
import java.util.EnumSet;
import java.util.Iterator;
-import java.util.Collection;
import java.util.AbstractCollection;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -46,8 +45,11 @@ public class EnumSetWritable> extends AbstractCollection
EnumSetWritable() {
}
+ @Override
public Iterator iterator() { return value.iterator(); }
+ @Override
public int size() { return value.size(); }
+ @Override
public boolean add(E e) {
if (value == null) {
value = EnumSet.of(e);
@@ -109,7 +111,7 @@ public EnumSet get() {
return value;
}
- /** {@inheritDoc} */
+ @Override
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
int length = in.readInt();
@@ -127,7 +129,7 @@ else if (length == 0) {
}
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
if (this.value == null) {
out.writeInt(-1);
@@ -152,6 +154,7 @@ public void write(DataOutput out) throws IOException {
* Returns true if o
is an EnumSetWritable with the same value,
* or both are null.
*/
+ @Override
public boolean equals(Object o) {
if (o == null) {
throw new IllegalArgumentException("null argument passed in equal().");
@@ -180,27 +183,25 @@ public Class getElementType() {
return elementType;
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
if (value == null)
return 0;
return (int) value.hashCode();
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
if (value == null)
return "(null)";
return value.toString();
}
- /** {@inheritDoc} */
@Override
public Configuration getConf() {
return this.conf;
}
- /** {@inheritDoc} */
@Override
public void setConf(Configuration conf) {
this.conf = conf;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
index 4ade2c4d62..21e4cc4f5b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
@@ -39,10 +39,12 @@ public FloatWritable() {}
/** Return the value of this FloatWritable. */
public float get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readFloat();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeFloat(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java
index 8268a5a915..7cfeed7f93 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java
@@ -114,11 +114,13 @@ public Writable get() {
return instance;
}
+ @Override
public String toString() {
return "GW[" + (instance != null ? ("class=" + instance.getClass().getName() +
",value=" + instance.toString()) : "(null)") + "]";
}
+ @Override
public void readFields(DataInput in) throws IOException {
type = in.readByte();
Class extends Writable> clazz = getTypes()[type & 0xff];
@@ -131,6 +133,7 @@ public void readFields(DataInput in) throws IOException {
instance.readFields(in);
}
+ @Override
public void write(DataOutput out) throws IOException {
if (type == NOT_SET || instance == null)
throw new IOException("The GenericWritable has NOT been set correctly. type="
@@ -145,10 +148,12 @@ public void write(DataOutput out) throws IOException {
*/
abstract protected Class extends Writable>[] getTypes();
+ @Override
public Configuration getConf() {
return conf;
}
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 819f075812..a3315a869e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -272,9 +272,11 @@ public static void closeSocket(Socket sock) {
* The /dev/null of OutputStreams.
*/
public static class NullOutputStream extends OutputStream {
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
}
+ @Override
public void write(int b) throws IOException {
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java
index 6a44d81db6..f656d028cb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java
@@ -42,10 +42,12 @@ public IntWritable() {}
/** Return the value of this IntWritable. */
public int get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readInt();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
index b9d64d904d..6dec4aa618 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
@@ -42,15 +42,18 @@ public LongWritable() {}
/** Return the value of this LongWritable. */
public long get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readLong();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeLong(value);
}
/** Returns true iff o
is a LongWritable with the same value. */
+ @Override
public boolean equals(Object o) {
if (!(o instanceof LongWritable))
return false;
@@ -58,17 +61,20 @@ public boolean equals(Object o) {
return this.value == other.value;
}
+ @Override
public int hashCode() {
return (int)value;
}
/** Compares two LongWritables. */
+ @Override
public int compareTo(LongWritable o) {
long thisValue = this.value;
long thatValue = o.value;
return (thisValue {
public static final int MD5_LEN = 16;
private static ThreadLocal DIGESTER_FACTORY = new ThreadLocal() {
+ @Override
protected MessageDigest initialValue() {
try {
return MessageDigest.getInstance("MD5");
@@ -65,6 +66,7 @@ public MD5Hash(byte[] digest) {
}
// javadoc from Writable
+ @Override
public void readFields(DataInput in) throws IOException {
in.readFully(digest);
}
@@ -77,6 +79,7 @@ public static MD5Hash read(DataInput in) throws IOException {
}
// javadoc from Writable
+ @Override
public void write(DataOutput out) throws IOException {
out.write(digest);
}
@@ -155,6 +158,7 @@ public int quarterDigest() {
/** Returns true iff o
is an MD5Hash whose digest contains the
* same values. */
+ @Override
public boolean equals(Object o) {
if (!(o instanceof MD5Hash))
return false;
@@ -165,12 +169,14 @@ public boolean equals(Object o) {
/** Returns a hash code value for this object.
* Only uses the first 4 bytes, since md5s are evenly distributed.
*/
+ @Override
public int hashCode() {
return quarterDigest();
}
/** Compares this object with the specified object for order.*/
+ @Override
public int compareTo(MD5Hash that) {
return WritableComparator.compareBytes(this.digest, 0, MD5_LEN,
that.digest, 0, MD5_LEN);
@@ -182,6 +188,7 @@ public Comparator() {
super(MD5Hash.class);
}
+ @Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return compareBytes(b1, s1, MD5_LEN, b2, s2, MD5_LEN);
@@ -196,6 +203,7 @@ public int compare(byte[] b1, int s1, int l1,
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
/** Returns a string representation of this object. */
+ @Override
public String toString() {
StringBuilder buf = new StringBuilder(MD5_LEN*2);
for (int i = 0; i < MD5_LEN; i++) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index 9c14402d75..7e7d855f82 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -296,6 +296,7 @@ public static void setIndexInterval(Configuration conf, int interval) {
}
/** Close the map. */
+ @Override
public synchronized void close() throws IOException {
data.close();
index.close();
@@ -723,6 +724,7 @@ public synchronized WritableComparable getClosest(WritableComparable key,
}
/** Close the map. */
+ @Override
public synchronized void close() throws IOException {
if (!indexClosed) {
index.close();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java
index 377c9c1656..72c7098d7a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java
@@ -55,27 +55,27 @@ public MapWritable(MapWritable other) {
copy(other);
}
- /** {@inheritDoc} */
+ @Override
public void clear() {
instance.clear();
}
- /** {@inheritDoc} */
+ @Override
public boolean containsKey(Object key) {
return instance.containsKey(key);
}
- /** {@inheritDoc} */
+ @Override
public boolean containsValue(Object value) {
return instance.containsValue(value);
}
- /** {@inheritDoc} */
+ @Override
public Set> entrySet() {
return instance.entrySet();
}
- /** {@inheritDoc} */
+ @Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
@@ -93,27 +93,27 @@ public boolean equals(Object obj) {
return false;
}
- /** {@inheritDoc} */
+ @Override
public Writable get(Object key) {
return instance.get(key);
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
return 1 + this.instance.hashCode();
}
- /** {@inheritDoc} */
+ @Override
public boolean isEmpty() {
return instance.isEmpty();
}
- /** {@inheritDoc} */
+ @Override
public Set keySet() {
return instance.keySet();
}
- /** {@inheritDoc} */
+ @Override
@SuppressWarnings("unchecked")
public Writable put(Writable key, Writable value) {
addToMap(key.getClass());
@@ -121,31 +121,30 @@ public Writable put(Writable key, Writable value) {
return instance.put(key, value);
}
- /** {@inheritDoc} */
+ @Override
public void putAll(Map extends Writable, ? extends Writable> t) {
for (Map.Entry extends Writable, ? extends Writable> e: t.entrySet()) {
put(e.getKey(), e.getValue());
}
}
- /** {@inheritDoc} */
+ @Override
public Writable remove(Object key) {
return instance.remove(key);
}
- /** {@inheritDoc} */
+ @Override
public int size() {
return instance.size();
}
- /** {@inheritDoc} */
+ @Override
public Collection values() {
return instance.values();
}
// Writable
- /** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
@@ -164,7 +163,6 @@ public void write(DataOutput out) throws IOException {
}
}
- /** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java
index beb7b17ce7..77c590fdb6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java
@@ -35,6 +35,7 @@ private NullWritable() {} // no public ctor
/** Returns the single instance of this class. */
public static NullWritable get() { return THIS; }
+ @Override
public String toString() {
return "(null)";
}
@@ -46,8 +47,11 @@ public String toString() {
public int compareTo(NullWritable other) {
return 0;
}
+ @Override
public boolean equals(Object other) { return other instanceof NullWritable; }
+ @Override
public void readFields(DataInput in) throws IOException {}
+ @Override
public void write(DataOutput out) throws IOException {}
/** A Comparator "optimized" for NullWritable. */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
index c555111097..0f0f5c7405 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
@@ -66,15 +66,18 @@ public void set(Object instance) {
this.instance = instance;
}
+ @Override
public String toString() {
return "OW[class=" + declaredClass + ",value=" + instance + "]";
}
+ @Override
public void readFields(DataInput in) throws IOException {
readObject(in, this, this.conf);
}
+ @Override
public void write(DataOutput out) throws IOException {
writeObject(out, instance, declaredClass, conf);
}
@@ -99,6 +102,7 @@ public NullInstance(Class declaredClass, Configuration conf) {
super(conf);
this.declaredClass = declaredClass;
}
+ @Override
public void readFields(DataInput in) throws IOException {
String className = UTF8.readString(in);
declaredClass = PRIMITIVE_NAMES.get(className);
@@ -110,6 +114,7 @@ public void readFields(DataInput in) throws IOException {
}
}
}
+ @Override
public void write(DataOutput out) throws IOException {
UTF8.writeString(out, declaredClass.getName());
}
@@ -375,10 +380,12 @@ public static Class> loadClass(Configuration conf, String className) {
return declaredClass;
}
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
+ @Override
public Configuration getConf() {
return this.conf;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java
index b7605db9a9..15a396dc2b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java
@@ -50,6 +50,7 @@ public class OutputBuffer extends FilterOutputStream {
private static class Buffer extends ByteArrayOutputStream {
public byte[] getData() { return buf; }
public int getLength() { return count; }
+ @Override
public void reset() { count = 0; }
public void write(InputStream in, int len) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
index 046d9e4b73..f1545b69c9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
@@ -194,6 +194,7 @@ private ReadaheadRequestImpl(String identifier, FileDescriptor fd, long off, lon
this.len = len;
}
+ @Override
public void run() {
if (canceled) return;
// There's a very narrow race here that the file will close right at
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
index 6bc798e7e3..b30c4a4da4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
@@ -24,7 +24,6 @@
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 293fdbbb93..8a14860773 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -625,15 +625,18 @@ private void reset(DataInputStream in, int length) throws IOException {
dataSize = length;
}
+ @Override
public int getSize() {
return dataSize;
}
+ @Override
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
outStream.write(data, 0, dataSize);
}
+ @Override
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
throw
@@ -666,10 +669,12 @@ private void reset(DataInputStream in, int length) throws IOException {
dataSize = length;
}
+ @Override
public int getSize() {
return dataSize;
}
+ @Override
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
if (decompressedStream == null) {
@@ -687,6 +692,7 @@ public void writeUncompressedBytes(DataOutputStream outStream)
}
}
+ @Override
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
outStream.write(data, 0, dataSize);
@@ -728,6 +734,7 @@ public TreeMap getMetadata() {
return new TreeMap(this.theMetadata);
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(this.theMetadata.size());
Iterator> iter =
@@ -739,6 +746,7 @@ public void write(DataOutput out) throws IOException {
}
}
+ @Override
public void readFields(DataInput in) throws IOException {
int sz = in.readInt();
if (sz < 0) throw new IOException("Invalid size: " + sz + " for file metadata object");
@@ -752,6 +760,7 @@ public void readFields(DataInput in) throws IOException {
}
}
+ @Override
public boolean equals(Object other) {
if (other == null) {
return false;
@@ -788,11 +797,13 @@ public boolean equals(Metadata other) {
return true;
}
+ @Override
public int hashCode() {
assert false : "hashCode not designed";
return 42; // any arbitrary constant will do
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("size: ").append(this.theMetadata.size()).append("\n");
@@ -1250,6 +1261,7 @@ public void hflush() throws IOException {
Configuration getConf() { return conf; }
/** Close the file. */
+ @Override
public synchronized void close() throws IOException {
keySerializer.close();
uncompressedValSerializer.close();
@@ -1360,6 +1372,7 @@ static class RecordCompressWriter extends Writer {
}
/** Append a key/value pair. */
+ @Override
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
throws IOException {
@@ -1392,6 +1405,7 @@ public synchronized void append(Object key, Object val)
}
/** Append a key/value pair. */
+ @Override
public synchronized void appendRaw(byte[] keyData, int keyOffset,
int keyLength, ValueBytes val) throws IOException {
@@ -1449,6 +1463,7 @@ void writeBuffer(DataOutputBuffer uncompressedDataBuffer)
}
/** Compress and flush contents to dfs */
+ @Override
public synchronized void sync() throws IOException {
if (noBufferedRecords > 0) {
super.sync();
@@ -1478,6 +1493,7 @@ public synchronized void sync() throws IOException {
}
/** Close the file. */
+ @Override
public synchronized void close() throws IOException {
if (out != null) {
sync();
@@ -1486,6 +1502,7 @@ public synchronized void close() throws IOException {
}
/** Append a key/value pair. */
+ @Override
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
throws IOException {
@@ -1518,6 +1535,7 @@ public synchronized void append(Object key, Object val)
}
/** Append a key/value pair. */
+ @Override
public synchronized void appendRaw(byte[] keyData, int keyOffset,
int keyLength, ValueBytes val) throws IOException {
@@ -1960,6 +1978,7 @@ private Deserializer getDeserializer(SerializationFactory sf, Class c) {
}
/** Close the file. */
+ @Override
public synchronized void close() throws IOException {
// Return the decompressors to the pool
CodecPool.returnDecompressor(keyLenDecompressor);
@@ -2618,6 +2637,7 @@ public synchronized long getPosition() throws IOException {
}
/** Returns the name of the file. */
+ @Override
public String toString() {
return filename;
}
@@ -2948,6 +2968,7 @@ private void sort(int count) {
mergeSort.mergeSort(pointersCopy, pointers, 0, count);
}
class SeqFileComparator implements Comparator {
+ @Override
public int compare(IntWritable I, IntWritable J) {
return comparator.compare(rawBuffer, keyOffsets[I.get()],
keyLengths[I.get()], rawBuffer,
@@ -3221,6 +3242,7 @@ public MergeQueue(List segments,
this.tmpDir = tmpDir;
this.progress = progress;
}
+ @Override
protected boolean lessThan(Object a, Object b) {
// indicate we're making progress
if (progress != null) {
@@ -3232,6 +3254,7 @@ protected boolean lessThan(Object a, Object b) {
msa.getKey().getLength(), msb.getKey().getData(), 0,
msb.getKey().getLength()) < 0;
}
+ @Override
public void close() throws IOException {
SegmentDescriptor ms; // close inputs
while ((ms = (SegmentDescriptor)pop()) != null) {
@@ -3239,12 +3262,15 @@ public void close() throws IOException {
}
minSegment = null;
}
+ @Override
public DataOutputBuffer getKey() throws IOException {
return rawKey;
}
+ @Override
public ValueBytes getValue() throws IOException {
return rawValue;
}
+ @Override
public boolean next() throws IOException {
if (size() == 0)
return false;
@@ -3272,6 +3298,7 @@ public boolean next() throws IOException {
return true;
}
+ @Override
public Progress getProgress() {
return mergeProgress;
}
@@ -3469,6 +3496,7 @@ public boolean shouldPreserveInput() {
return preserveInput;
}
+ @Override
public int compareTo(Object o) {
SegmentDescriptor that = (SegmentDescriptor)o;
if (this.segmentLength != that.segmentLength) {
@@ -3481,6 +3509,7 @@ public int compareTo(Object o) {
compareTo(that.segmentPathName.toString());
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof SegmentDescriptor)) {
return false;
@@ -3495,6 +3524,7 @@ public boolean equals(Object o) {
return false;
}
+ @Override
public int hashCode() {
return 37 * 17 + (int) (segmentOffset^(segmentOffset>>>32));
}
@@ -3584,12 +3614,14 @@ public LinkedSegmentsDescriptor (long segmentOffset, long segmentLength,
/** The default cleanup. Subclasses can override this with a custom
* cleanup
*/
+ @Override
public void cleanup() throws IOException {
super.close();
if (super.shouldPreserveInput()) return;
parentContainer.cleanup();
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof LinkedSegmentsDescriptor)) {
return false;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
index 9ba0023190..068ca9d40e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
@@ -87,6 +87,7 @@ public Reader(FileSystem fs, String dirName, WritableComparator comparator, Conf
}
// javadoc inherited
+ @Override
public boolean seek(WritableComparable key)
throws IOException {
return super.seek(key);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
index d870a5fd84..eee744ec6a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
@@ -57,86 +57,86 @@ public SortedMapWritable(SortedMapWritable other) {
copy(other);
}
- /** {@inheritDoc} */
+ @Override
public Comparator super WritableComparable> comparator() {
// Returning null means we use the natural ordering of the keys
return null;
}
- /** {@inheritDoc} */
+ @Override
public WritableComparable firstKey() {
return instance.firstKey();
}
- /** {@inheritDoc} */
+ @Override
public SortedMap
headMap(WritableComparable toKey) {
return instance.headMap(toKey);
}
- /** {@inheritDoc} */
+ @Override
public WritableComparable lastKey() {
return instance.lastKey();
}
- /** {@inheritDoc} */
+ @Override
public SortedMap
subMap(WritableComparable fromKey, WritableComparable toKey) {
return instance.subMap(fromKey, toKey);
}
- /** {@inheritDoc} */
+ @Override
public SortedMap
tailMap(WritableComparable fromKey) {
return instance.tailMap(fromKey);
}
- /** {@inheritDoc} */
+ @Override
public void clear() {
instance.clear();
}
- /** {@inheritDoc} */
+ @Override
public boolean containsKey(Object key) {
return instance.containsKey(key);
}
- /** {@inheritDoc} */
+ @Override
public boolean containsValue(Object value) {
return instance.containsValue(value);
}
- /** {@inheritDoc} */
+ @Override
public Set> entrySet() {
return instance.entrySet();
}
- /** {@inheritDoc} */
+ @Override
public Writable get(Object key) {
return instance.get(key);
}
- /** {@inheritDoc} */
+ @Override
public boolean isEmpty() {
return instance.isEmpty();
}
- /** {@inheritDoc} */
+ @Override
public Set keySet() {
return instance.keySet();
}
- /** {@inheritDoc} */
+ @Override
public Writable put(WritableComparable key, Writable value) {
addToMap(key.getClass());
addToMap(value.getClass());
return instance.put(key, value);
}
- /** {@inheritDoc} */
+ @Override
public void putAll(Map extends WritableComparable, ? extends Writable> t) {
for (Map.Entry extends WritableComparable, ? extends Writable> e:
t.entrySet()) {
@@ -145,22 +145,21 @@ public void putAll(Map extends WritableComparable, ? extends Writable> t) {
}
}
- /** {@inheritDoc} */
+ @Override
public Writable remove(Object key) {
return instance.remove(key);
}
- /** {@inheritDoc} */
+ @Override
public int size() {
return instance.size();
}
- /** {@inheritDoc} */
+ @Override
public Collection values() {
return instance.values();
}
- /** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
@@ -187,7 +186,6 @@ public void readFields(DataInput in) throws IOException {
}
}
- /** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java
index a7ee6876d4..949b14ae57 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java
@@ -54,6 +54,7 @@ public interface Stringifier extends java.io.Closeable {
* Closes this object.
* @throws IOException if an I/O error occurs
* */
+ @Override
public void close() throws IOException;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index a4f80ea886..95fb174a9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -55,6 +55,7 @@ public class Text extends BinaryComparable
private static ThreadLocal ENCODER_FACTORY =
new ThreadLocal() {
+ @Override
protected CharsetEncoder initialValue() {
return Charset.forName("UTF-8").newEncoder().
onMalformedInput(CodingErrorAction.REPORT).
@@ -64,6 +65,7 @@ protected CharsetEncoder initialValue() {
private static ThreadLocal DECODER_FACTORY =
new ThreadLocal() {
+ @Override
protected CharsetDecoder initialValue() {
return Charset.forName("UTF-8").newDecoder().
onMalformedInput(CodingErrorAction.REPORT).
@@ -112,11 +114,13 @@ public byte[] copyBytes() {
* valid. Please use {@link #copyBytes()} if you
* need the returned array to be precisely the length of the data.
*/
+ @Override
public byte[] getBytes() {
return bytes;
}
/** Returns the number of bytes in the byte array */
+ @Override
public int getLength() {
return length;
}
@@ -281,6 +285,7 @@ public String toString() {
/** deserialize
*/
+ @Override
public void readFields(DataInput in) throws IOException {
int newLength = WritableUtils.readVInt(in);
setCapacity(newLength, false);
@@ -313,6 +318,7 @@ public static void skip(DataInput in) throws IOException {
* length uses zero-compressed encoding
* @see Writable#write(DataOutput)
*/
+ @Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, length);
out.write(bytes, 0, length);
@@ -329,6 +335,7 @@ public void write(DataOutput out, int maxLength) throws IOException {
}
/** Returns true iff o
is a Text with the same contents. */
+ @Override
public boolean equals(Object o) {
if (o instanceof Text)
return super.equals(o);
@@ -346,6 +353,7 @@ public Comparator() {
super(Text.class);
}
+ @Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
index 76304623ee..cf8947d32d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
@@ -57,6 +57,7 @@ public Object toArray() {
public Writable[][] get() { return values; }
+ @Override
public void readFields(DataInput in) throws IOException {
// construct matrix
values = new Writable[in.readInt()][];
@@ -81,6 +82,7 @@ public void readFields(DataInput in) throws IOException {
}
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(values.length); // write values
for (int i = 0; i < values.length; i++) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
index 6a0f88673f..ef7512996c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
@@ -110,6 +110,7 @@ public void set(UTF8 other) {
System.arraycopy(other.bytes, 0, bytes, 0, length);
}
+ @Override
public void readFields(DataInput in) throws IOException {
length = in.readUnsignedShort();
if (bytes == null || bytes.length < length)
@@ -123,6 +124,7 @@ public static void skip(DataInput in) throws IOException {
WritableUtils.skipFully(in, length);
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeShort(length);
out.write(bytes, 0, length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java
index e37b144dbf..f537524c4b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java
@@ -43,10 +43,12 @@ public VIntWritable() {}
/** Return the value of this VIntWritable. */
public int get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = WritableUtils.readVInt(in);
}
+ @Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java
index 869bf43914..a9fac30605 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java
@@ -43,10 +43,12 @@ public VLongWritable() {}
/** Return the value of this LongWritable. */
public long get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = WritableUtils.readVLong(in);
}
+ @Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java
index 162374be21..a72be58832 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java
@@ -39,6 +39,7 @@ public VersionMismatchException(byte expectedVersionIn, byte foundVersionIn){
}
/** Returns a string representation of this object. */
+ @Override
public String toString(){
return "A record version mismatch occured. Expecting v"
+ expectedVersion + ", found v" + foundVersion;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java
index a197fd2e4f..c2db55520c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java
@@ -40,11 +40,13 @@ public abstract class VersionedWritable implements Writable {
public abstract byte getVersion();
// javadoc from Writable
+ @Override
public void write(DataOutput out) throws IOException {
out.writeByte(getVersion()); // store version
}
// javadoc from Writable
+ @Override
public void readFields(DataInput in) throws IOException {
byte version = in.readByte(); // read version
if (version != getVersion())
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
index 6eb3a21443..eb3c8d322c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
@@ -120,6 +120,7 @@ public WritableComparable newKey() {
* Writable#readFields(DataInput)}, then calls {@link
* #compare(WritableComparable,WritableComparable)}.
*/
+ @Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
buffer.reset(b1, s1, l1); // parse key1
@@ -144,6 +145,7 @@ public int compare(WritableComparable a, WritableComparable b) {
return a.compareTo(b);
}
+ @Override
public int compare(Object a, Object b) {
return compare((WritableComparable)a, (WritableComparable)b);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index a7a925f35a..35f7cb43ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -63,6 +63,7 @@ public BZip2Codec() { }
* @throws java.io.IOException
* Throws IO exception
*/
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return new BZip2CompressionOutputStream(out);
@@ -74,6 +75,7 @@ public CompressionOutputStream createOutputStream(OutputStream out)
* @return CompressionOutputStream
@throws java.io.IOException
*/
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor) throws IOException {
return createOutputStream(out);
@@ -84,6 +86,7 @@ public CompressionOutputStream createOutputStream(OutputStream out,
*
* @return BZip2DummyCompressor.class
*/
+ @Override
public Class extends org.apache.hadoop.io.compress.Compressor> getCompressorType() {
return BZip2DummyCompressor.class;
}
@@ -93,6 +96,7 @@ public Class extends org.apache.hadoop.io.compress.Compressor> getCompressorTy
*
* @return Compressor
*/
+ @Override
public Compressor createCompressor() {
return new BZip2DummyCompressor();
}
@@ -106,6 +110,7 @@ public Compressor createCompressor() {
* @throws java.io.IOException
* Throws IOException
*/
+ @Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return new BZip2CompressionInputStream(in);
@@ -116,6 +121,7 @@ public CompressionInputStream createInputStream(InputStream in)
*
* @return CompressionInputStream
*/
+ @Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor) throws IOException {
return createInputStream(in);
@@ -133,6 +139,7 @@ public CompressionInputStream createInputStream(InputStream in,
*
* @return CompressionInputStream for BZip2 aligned at block boundaries
*/
+ @Override
public SplitCompressionInputStream createInputStream(InputStream seekableIn,
Decompressor decompressor, long start, long end, READ_MODE readMode)
throws IOException {
@@ -181,6 +188,7 @@ public SplitCompressionInputStream createInputStream(InputStream seekableIn,
*
* @return BZip2DummyDecompressor.class
*/
+ @Override
public Class extends org.apache.hadoop.io.compress.Decompressor> getDecompressorType() {
return BZip2DummyDecompressor.class;
}
@@ -190,6 +198,7 @@ public Class extends org.apache.hadoop.io.compress.Decompressor> getDecompress
*
* @return Decompressor
*/
+ @Override
public Decompressor createDecompressor() {
return new BZip2DummyDecompressor();
}
@@ -199,6 +208,7 @@ public Decompressor createDecompressor() {
*
* @return A String telling the default bzip2 file extension
*/
+ @Override
public String getDefaultExtension() {
return ".bz2";
}
@@ -226,6 +236,7 @@ private void writeStreamHeader() throws IOException {
}
}
+ @Override
public void finish() throws IOException {
if (needsReset) {
// In the case that nothing is written to this stream, we still need to
@@ -245,12 +256,14 @@ private void internalReset() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
// Cannot write to out at this point because out might not be ready
// yet, as in SequenceFile.Writer implementation.
needsReset = true;
}
+ @Override
public void write(int b) throws IOException {
if (needsReset) {
internalReset();
@@ -258,6 +271,7 @@ public void write(int b) throws IOException {
this.output.write(b);
}
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
if (needsReset) {
internalReset();
@@ -265,6 +279,7 @@ public void write(byte[] b, int off, int len) throws IOException {
this.output.write(b, off, len);
}
+ @Override
public void close() throws IOException {
if (needsReset) {
// In the case that nothing is written to this stream, we still need to
@@ -382,6 +397,7 @@ private BufferedInputStream readStreamHeader() throws IOException {
}// end of method
+ @Override
public void close() throws IOException {
if (!needsReset) {
input.close();
@@ -417,6 +433,7 @@ public void close() throws IOException {
*
*/
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
if (needsReset) {
internalReset();
@@ -440,6 +457,7 @@ public int read(byte[] b, int off, int len) throws IOException {
}
+ @Override
public int read() throws IOException {
byte b[] = new byte[1];
int result = this.read(b, 0, 1);
@@ -454,6 +472,7 @@ private void internalReset() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
// Cannot read from bufferedIn at this point because bufferedIn
// might not be ready
@@ -461,6 +480,7 @@ public void resetState() throws IOException {
needsReset = true;
}
+ @Override
public long getPos() {
return this.compressedStreamPosition;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
index 5d854861f2..434183bbc2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
@@ -78,6 +78,7 @@ public BlockCompressorStream(OutputStream out, Compressor compressor) {
* Each block contains the uncompressed length for the block, followed by
* one or more length-prefixed blocks of compressed data.
*/
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
// Sanity checks
if (compressor.finished()) {
@@ -132,6 +133,7 @@ public void write(byte[] b, int off, int len) throws IOException {
}
}
+ @Override
public void finish() throws IOException {
if (!compressor.finished()) {
rawWriteInt((int)compressor.getBytesRead());
@@ -142,6 +144,7 @@ public void finish() throws IOException {
}
}
+ @Override
protected void compress() throws IOException {
int len = compressor.compress(buffer, 0, buffer.length);
if (len > 0) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
index 42ade89019..7d2504e3e2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
@@ -65,6 +65,7 @@ protected BlockDecompressorStream(InputStream in) throws IOException {
super(in);
}
+ @Override
protected int decompress(byte[] b, int off, int len) throws IOException {
// Check if we are the beginning of a block
if (noUncompressedBytes == originalBlockSize) {
@@ -104,6 +105,7 @@ protected int decompress(byte[] b, int off, int len) throws IOException {
return n;
}
+ @Override
protected int getCompressedData() throws IOException {
checkStream();
@@ -126,6 +128,7 @@ protected int getCompressedData() throws IOException {
return len;
}
+ @Override
public void resetState() throws IOException {
originalBlockSize = 0;
noUncompressedBytes = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
index dc95e9e999..57fb366bdd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
@@ -75,6 +75,7 @@ private void addCodec(CompressionCodec codec) {
/**
* Print the extension map out as a string.
*/
+ @Override
public String toString() {
StringBuilder buf = new StringBuilder();
Iterator> itr =
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
index 4f7757dfed..4491819d72 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
@@ -55,6 +55,7 @@ protected CompressionInputStream(InputStream in) throws IOException {
this.in = in;
}
+ @Override
public void close() throws IOException {
in.close();
}
@@ -63,6 +64,7 @@ public void close() throws IOException {
* Read bytes from the stream.
* Made abstract to prevent leakage to underlying stream.
*/
+ @Override
public abstract int read(byte[] b, int off, int len) throws IOException;
/**
@@ -76,6 +78,7 @@ public void close() throws IOException {
*
* @return Current position in stream as a long
*/
+ @Override
public long getPos() throws IOException {
if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)){
//This way of getting the current position will not work for file
@@ -95,6 +98,7 @@ public long getPos() throws IOException {
* @throws UnsupportedOperationException
*/
+ @Override
public void seek(long pos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
@@ -104,6 +108,7 @@ public void seek(long pos) throws UnsupportedOperationException {
*
* @throws UnsupportedOperationException
*/
+ @Override
public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
index b4a47946b2..9bd6b84f98 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
@@ -44,11 +44,13 @@ protected CompressionOutputStream(OutputStream out) {
this.out = out;
}
+ @Override
public void close() throws IOException {
finish();
out.close();
}
+ @Override
public void flush() throws IOException {
out.flush();
}
@@ -57,6 +59,7 @@ public void flush() throws IOException {
* Write compressed bytes to the stream.
* Made abstract to prevent leakage to underlying stream.
*/
+ @Override
public abstract void write(byte[] b, int off, int len) throws IOException;
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
index 4cd7425ba6..84f1b2f179 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
@@ -59,6 +59,7 @@ protected CompressorStream(OutputStream out) {
super(out);
}
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
// Sanity checks
if (compressor.finished()) {
@@ -83,6 +84,7 @@ protected void compress() throws IOException {
}
}
+ @Override
public void finish() throws IOException {
if (!compressor.finished()) {
compressor.finish();
@@ -92,10 +94,12 @@ public void finish() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
compressor.reset();
}
+ @Override
public void close() throws IOException {
if (!closed) {
finish();
@@ -105,6 +109,7 @@ public void close() throws IOException {
}
private byte[] oneByte = new byte[1];
+ @Override
public void write(int b) throws IOException {
oneByte[0] = (byte)(b & 0xff);
write(oneByte, 0, oneByte.length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
index d0ef6ee6d3..16e0ad763a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
@@ -66,11 +66,13 @@ protected DecompressorStream(InputStream in) throws IOException {
}
private byte[] oneByte = new byte[1];
+ @Override
public int read() throws IOException {
checkStream();
return (read(oneByte, 0, oneByte.length) == -1) ? -1 : (oneByte[0] & 0xff);
}
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
checkStream();
@@ -163,11 +165,13 @@ protected void checkStream() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
decompressor.reset();
}
private byte[] skipBytes = new byte[512];
+ @Override
public long skip(long n) throws IOException {
// Sanity checks
if (n < 0) {
@@ -189,11 +193,13 @@ public long skip(long n) throws IOException {
return skipped;
}
+ @Override
public int available() throws IOException {
checkStream();
return (eof) ? 0 : 1;
}
+ @Override
public void close() throws IOException {
if (!closed) {
in.close();
@@ -201,13 +207,16 @@ public void close() throws IOException {
}
}
+ @Override
public boolean markSupported() {
return false;
}
+ @Override
public synchronized void mark(int readlimit) {
}
+ @Override
public synchronized void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index 1be28bfce3..ea7df20de3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -37,14 +37,17 @@ public class DefaultCodec implements Configurable, CompressionCodec {
Configuration conf;
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
+ @Override
public Configuration getConf() {
return conf;
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
// This may leak memory if called in a loop. The createCompressor() call
@@ -57,6 +60,7 @@ public CompressionOutputStream createOutputStream(OutputStream out)
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
@@ -64,20 +68,24 @@ public CompressionOutputStream createOutputStream(OutputStream out,
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public Class extends Compressor> getCompressorType() {
return ZlibFactory.getZlibCompressorType(conf);
}
+ @Override
public Compressor createCompressor() {
return ZlibFactory.getZlibCompressor(conf);
}
+ @Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return new DecompressorStream(in, createDecompressor(),
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
@@ -85,14 +93,17 @@ public CompressionInputStream createInputStream(InputStream in,
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public Class extends Decompressor> getDecompressorType() {
return ZlibFactory.getZlibDecompressorType(conf);
}
+ @Override
public Decompressor createDecompressor() {
return ZlibFactory.getZlibDecompressor(conf);
}
+ @Override
public String getDefaultExtension() {
return ".deflate";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index b17fe4b39e..520205e166 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -20,15 +20,11 @@
import java.io.*;
import java.util.zip.GZIPOutputStream;
-import java.util.zip.GZIPInputStream;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.zlib.*;
-import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
-import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
/**
* This class creates gzip compressors/decompressors.
@@ -66,32 +62,39 @@ protected GzipOutputStream(CompressorStream out) {
super(out);
}
+ @Override
public void close() throws IOException {
out.close();
}
+ @Override
public void flush() throws IOException {
out.flush();
}
+ @Override
public void write(int b) throws IOException {
out.write(b);
}
+ @Override
public void write(byte[] data, int offset, int length)
throws IOException {
out.write(data, offset, length);
}
+ @Override
public void finish() throws IOException {
((ResetableGZIPOutputStream) out).finish();
}
+ @Override
public void resetState() throws IOException {
((ResetableGZIPOutputStream) out).resetState();
}
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return (ZlibFactory.isNativeZlibLoaded(conf)) ?
@@ -100,6 +103,7 @@ public CompressionOutputStream createOutputStream(OutputStream out)
new GzipOutputStream(out);
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
@@ -110,23 +114,27 @@ public CompressionOutputStream createOutputStream(OutputStream out,
createOutputStream(out);
}
+ @Override
public Compressor createCompressor() {
return (ZlibFactory.isNativeZlibLoaded(conf))
? new GzipZlibCompressor(conf)
: null;
}
+ @Override
public Class extends Compressor> getCompressorType() {
return ZlibFactory.isNativeZlibLoaded(conf)
? GzipZlibCompressor.class
: null;
}
+ @Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return createInputStream(in, null);
}
+ @Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
@@ -137,18 +145,21 @@ public CompressionInputStream createInputStream(InputStream in,
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public Decompressor createDecompressor() {
return (ZlibFactory.isNativeZlibLoaded(conf))
? new GzipZlibDecompressor()
: new BuiltInGzipDecompressor();
}
+ @Override
public Class extends Decompressor> getDecompressorType() {
return ZlibFactory.isNativeZlibLoaded(conf)
? GzipZlibDecompressor.class
: BuiltInGzipDecompressor.class;
}
+ @Override
public String getDefaultExtension() {
return ".gz";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
index 14cc9d5b82..00e892d845 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
@@ -338,6 +338,7 @@ private void changeStateToProcessABlock() throws IOException {
}
+ @Override
public int read() throws IOException {
if (this.in != null) {
@@ -372,6 +373,7 @@ public int read() throws IOException {
*/
+ @Override
public int read(final byte[] dest, final int offs, final int len)
throws IOException {
if (offs < 0) {
@@ -574,6 +576,7 @@ private void complete() throws IOException {
}
}
+ @Override
public void close() throws IOException {
InputStream inShadow = this.in;
if (inShadow != null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
index 3060eb924f..ca4e5cd0df 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
@@ -639,6 +639,7 @@ public CBZip2OutputStream(final OutputStream out, final int blockSize)
init();
}
+ @Override
public void write(final int b) throws IOException {
if (this.out != null) {
write0(b);
@@ -704,6 +705,7 @@ private void writeRun() throws IOException {
/**
* Overriden to close the stream.
*/
+ @Override
protected void finalize() throws Throwable {
finish();
super.finalize();
@@ -726,6 +728,7 @@ public void finish() throws IOException {
}
}
+ @Override
public void close() throws IOException {
if (out != null) {
OutputStream outShadow = this.out;
@@ -739,6 +742,7 @@ public void close() throws IOException {
}
}
+ @Override
public void flush() throws IOException {
OutputStream outShadow = this.out;
if (outShadow != null) {
@@ -849,6 +853,7 @@ public final int getBlockSize() {
return this.blockSize100k;
}
+ @Override
public void write(final byte[] buf, int offs, final int len)
throws IOException {
if (offs < 0) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
index 0cf65e5144..22a3118f5f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
@@ -258,6 +258,7 @@ public synchronized int getRemaining() {
return 0;
}
+ @Override
public synchronized void reset() {
finished = false;
compressedDirectBufLen = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
index baf864094e..4620092f08 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
@@ -257,6 +257,7 @@ public synchronized int getRemaining() {
return 0;
}
+ @Override
public synchronized void reset() {
finished = false;
compressedDirectBufLen = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
index 1e5525e743..41f8036fda 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
@@ -122,7 +122,7 @@ public BuiltInGzipDecompressor() {
// in the first buffer load? (But how else would one do it?)
}
- /** {@inheritDoc} */
+ @Override
public synchronized boolean needsInput() {
if (state == GzipStateLabel.DEFLATE_STREAM) { // most common case
return inflater.needsInput();
@@ -144,6 +144,7 @@ public synchronized boolean needsInput() {
* the bulk deflate stream, which is a performance hit we don't want
* to absorb. (Decompressor now documents this requirement.)
*/
+ @Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
@@ -175,6 +176,7 @@ public synchronized void setInput(byte[] b, int off, int len) {
* methods below), the deflate stream is never copied; Inflater operates
* directly on the user's buffer.
*/
+ @Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
int numAvailBytes = 0;
@@ -421,16 +423,17 @@ public synchronized long getBytesRead() {
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
+ @Override
public synchronized int getRemaining() {
return userBufLen;
}
- /** {@inheritDoc} */
+ @Override
public synchronized boolean needsDictionary() {
return inflater.needsDictionary();
}
- /** {@inheritDoc} */
+ @Override
public synchronized void setDictionary(byte[] b, int off, int len) {
inflater.setDictionary(b, off, len);
}
@@ -439,6 +442,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) {
* Returns true if the end of the gzip substream (single "member") has been
* reached.
*/
+ @Override
public synchronized boolean finished() {
return (state == GzipStateLabel.FINISHED);
}
@@ -447,6 +451,7 @@ public synchronized boolean finished() {
* Resets everything, including the input buffer, regardless of whether the
* current gzip substream is finished.
*/
+ @Override
public synchronized void reset() {
// could optionally emit INFO message if state != GzipStateLabel.FINISHED
inflater.reset();
@@ -463,7 +468,7 @@ public synchronized void reset() {
hasHeaderCRC = false;
}
- /** {@inheritDoc} */
+ @Override
public synchronized void end() {
inflater.end();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
index b269d557b7..509456e834 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
@@ -48,6 +48,7 @@ public BuiltInZlibDeflater() {
super();
}
+ @Override
public synchronized int compress(byte[] b, int off, int len)
throws IOException {
return super.deflate(b, off, len);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
index 0223587ad0..4fda6723b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
@@ -39,6 +39,7 @@ public BuiltInZlibInflater() {
super();
}
+ @Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
try {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
index 8839bc98fa..c0d0d699a5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
@@ -259,6 +259,7 @@ public synchronized void reinit(Configuration conf) {
}
}
+ @Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b== null) {
throw new NullPointerException();
@@ -287,6 +288,7 @@ synchronized void setInputFromSavedData() {
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
+ @Override
public synchronized void setDictionary(byte[] b, int off, int len) {
if (stream == 0 || b == null) {
throw new NullPointerException();
@@ -297,6 +299,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) {
setDictionary(stream, b, off, len);
}
+ @Override
public synchronized boolean needsInput() {
// Consume remaining compressed data?
if (compressedDirectBuf.remaining() > 0) {
@@ -325,16 +328,19 @@ public synchronized boolean needsInput() {
return false;
}
+ @Override
public synchronized void finish() {
finish = true;
}
+ @Override
public synchronized boolean finished() {
// Check if 'zlib' says its 'finished' and
// all compressed data has been consumed
return (finished && compressedDirectBuf.remaining() == 0);
}
+ @Override
public synchronized int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
@@ -385,6 +391,7 @@ public synchronized int compress(byte[] b, int off, int len)
*
* @return the total (non-negative) number of compressed bytes output so far
*/
+ @Override
public synchronized long getBytesWritten() {
checkStream();
return getBytesWritten(stream);
@@ -395,11 +402,13 @@ public synchronized long getBytesWritten() {
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
+ @Override
public synchronized long getBytesRead() {
checkStream();
return getBytesRead(stream);
}
+ @Override
public synchronized void reset() {
checkStream();
reset(stream);
@@ -413,6 +422,7 @@ public synchronized void reset() {
userBufOff = userBufLen = 0;
}
+ @Override
public synchronized void end() {
if (stream != 0) {
end(stream);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
index 2db70551e8..ba67571998 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
@@ -118,6 +118,7 @@ public ZlibDecompressor() {
this(CompressionHeader.DEFAULT_HEADER, DEFAULT_DIRECT_BUFFER_SIZE);
}
+ @Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
@@ -154,6 +155,7 @@ synchronized void setInputFromSavedData() {
userBufLen -= compressedDirectBufLen;
}
+ @Override
public synchronized void setDictionary(byte[] b, int off, int len) {
if (stream == 0 || b == null) {
throw new NullPointerException();
@@ -165,6 +167,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) {
needDict = false;
}
+ @Override
public synchronized boolean needsInput() {
// Consume remaining compressed data?
if (uncompressedDirectBuf.remaining() > 0) {
@@ -184,16 +187,19 @@ public synchronized boolean needsInput() {
return false;
}
+ @Override
public synchronized boolean needsDictionary() {
return needDict;
}
+ @Override
public synchronized boolean finished() {
// Check if 'zlib' says it's 'finished' and
// all compressed data has been consumed
return (finished && uncompressedDirectBuf.remaining() == 0);
}
+ @Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
@@ -255,6 +261,7 @@ public synchronized long getBytesRead() {
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
+ @Override
public synchronized int getRemaining() {
checkStream();
return userBufLen + getRemaining(stream); // userBuf + compressedDirectBuf
@@ -263,6 +270,7 @@ public synchronized int getRemaining() {
/**
* Resets everything including the input buffers (user and direct).
*/
+ @Override
public synchronized void reset() {
checkStream();
reset(stream);
@@ -274,6 +282,7 @@ public synchronized void reset() {
userBufOff = userBufLen = 0;
}
+ @Override
public synchronized void end() {
if (stream != 0) {
end(stream);
@@ -281,6 +290,7 @@ public synchronized void end() {
}
}
+ @Override
protected void finalize() {
end();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
index 6b4fdd89aa..ce93266574 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
@@ -300,6 +300,7 @@ public Writer(FSDataOutputStream fout, String compressionName,
* Close the BCFile Writer. Attempting to use the Writer after calling
* close
is not allowed and may lead to undetermined results.
*/
+ @Override
public void close() throws IOException {
if (closed == true) {
return;
@@ -447,6 +448,7 @@ private class MetaBlockRegister implements BlockRegister {
this.compressAlgo = compressAlgo;
}
+ @Override
public void register(long raw, long begin, long end) {
metaIndex.addEntry(new MetaIndexEntry(name, compressAlgo,
new BlockRegion(begin, end - begin, raw)));
@@ -463,6 +465,7 @@ private class DataBlockRegister implements BlockRegister {
// do nothing
}
+ @Override
public void register(long raw, long begin, long end) {
dataIndex.addBlockRegion(new BlockRegion(begin, end - begin, raw));
}
@@ -671,6 +674,7 @@ public Version getAPIVersion() {
/**
* Finishing reading the BCFile. Release all resources.
*/
+ @Override
public void close() {
// nothing to be done now
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java
index a9cb1ec1c3..0808711f89 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java
@@ -68,6 +68,7 @@ public ScalarLong(long m) {
magnitude = m;
}
+ @Override
public long magnitude() {
return magnitude;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
index 0b9ed9d2b3..9a57581c90 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
@@ -297,6 +297,7 @@ public Writer(FSDataOutputStream fsdos, int minBlockSize,
*
* The underlying FSDataOutputStream is not closed.
*/
+ @Override
public void close() throws IOException {
if ((state == State.CLOSED)) {
return;
@@ -820,6 +821,7 @@ public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
* Close the reader. The state of the Reader object is undefined after
* close. Calling close() for multiple times has no effect.
*/
+ @Override
public void close() throws IOException {
readerBCF.close();
}
@@ -1573,6 +1575,7 @@ private void parkCursorAtEnd() throws IOException {
* scanner after calling close is not defined. The entry returned by the
* previous entry() call will be invalid.
*/
+ @Override
public void close() throws IOException {
parkCursorAtEnd();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 2a7f883d95..4cfa0761ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -202,6 +202,7 @@ public static class Stat {
this.mode = mode;
}
+ @Override
public String toString() {
return "Stat(owner='" + owner + "', group='" + group + "'" +
", mode=" + mode + ")";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java
index 5064df5d86..db653b23f4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java
@@ -38,6 +38,7 @@ public Errno getErrno() {
return errno;
}
+ @Override
public String toString() {
return errno.toString() + ": " + super.getMessage();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 8b8387ce2c..5c29a33312 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -150,6 +150,7 @@ public static final RetryPolicy failoverOnNetworkException(
}
static class TryOnceThenFail implements RetryPolicy {
+ @Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isMethodIdempotent) throws Exception {
return RetryAction.FAIL;
@@ -157,6 +158,7 @@ public RetryAction shouldRetry(Exception e, int retries, int failovers,
}
static class RetryForever implements RetryPolicy {
+ @Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isMethodIdempotent) throws Exception {
return RetryAction.RETRY;
@@ -430,6 +432,7 @@ public ExceptionDependentRetry(RetryPolicy defaultPolicy,
this.exceptionToPolicyMap = exceptionToPolicyMap;
}
+ @Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isMethodIdempotent) throws Exception {
RetryPolicy policy = exceptionToPolicyMap.get(e.getClass());
@@ -457,6 +460,7 @@ public RemoteExceptionDependentRetry(RetryPolicy defaultPolicy,
}
}
+ @Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isMethodIdempotent) throws Exception {
RetryPolicy policy = null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java
index 7e74cb7732..05205c5523 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java
@@ -56,6 +56,7 @@ protected DeserializerComparator(Deserializer deserializer)
this.deserializer.open(buffer);
}
+ @Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java
index 61d6f171c9..f08d0008c6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java
@@ -24,11 +24,8 @@
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.Serializable;
-import java.util.Map;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.RawComparator;
/**
*
@@ -45,6 +42,7 @@ static class JavaSerializationDeserializer
private ObjectInputStream ois;
+ @Override
public void open(InputStream in) throws IOException {
ois = new ObjectInputStream(in) {
@Override protected void readStreamHeader() {
@@ -53,6 +51,7 @@ public void open(InputStream in) throws IOException {
};
}
+ @Override
@SuppressWarnings("unchecked")
public T deserialize(T object) throws IOException {
try {
@@ -63,6 +62,7 @@ public T deserialize(T object) throws IOException {
}
}
+ @Override
public void close() throws IOException {
ois.close();
}
@@ -74,6 +74,7 @@ static class JavaSerializationSerializer
private ObjectOutputStream oos;
+ @Override
public void open(OutputStream out) throws IOException {
oos = new ObjectOutputStream(out) {
@Override protected void writeStreamHeader() {
@@ -82,27 +83,32 @@ public void open(OutputStream out) throws IOException {
};
}
+ @Override
public void serialize(Serializable object) throws IOException {
oos.reset(); // clear (class) back-references
oos.writeObject(object);
}
+ @Override
public void close() throws IOException {
oos.close();
}
}
+ @Override
@InterfaceAudience.Private
public boolean accept(Class> c) {
return Serializable.class.isAssignableFrom(c);
}
+ @Override
@InterfaceAudience.Private
public Deserializer getDeserializer(Class c) {
return new JavaSerializationDeserializer();
}
+ @Override
@InterfaceAudience.Private
public Serializer getSerializer(Class c) {
return new JavaSerializationSerializer();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java
index 12927bea14..f9bf692f1f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java
@@ -44,6 +44,7 @@ public JavaSerializationComparator() throws IOException {
super(new JavaSerialization.JavaSerializationDeserializer());
}
+ @Override
@InterfaceAudience.Private
public int compare(T o1, T o2) {
return o1.compareTo(o2);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java
index 8511d25bcd..ad965d6b2f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java
@@ -23,8 +23,6 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.util.Map;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java
index 1d5c068886..f340cb3a98 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java
@@ -47,11 +47,13 @@ public abstract class AvroSerialization extends Configured
@InterfaceAudience.Private
public static final String AVRO_SCHEMA_KEY = "Avro-Schema";
+ @Override
@InterfaceAudience.Private
public Deserializer getDeserializer(Class c) {
return new AvroDeserializer(c);
}
+ @Override
@InterfaceAudience.Private
public Serializer getSerializer(Class c) {
return new AvroSerializer(c);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index b0f5c93f75..de7af1b6b0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -364,6 +364,7 @@ private void handleTimeout(SocketTimeoutException e) throws IOException {
* until a byte is read.
* @throws IOException for any IO problem other than socket timeout
*/
+ @Override
public int read() throws IOException {
do {
try {
@@ -380,6 +381,7 @@ public int read() throws IOException {
*
* @return the total number of bytes read; -1 if the connection is closed.
*/
+ @Override
public int read(byte[] buf, int off, int len) throws IOException {
do {
try {
@@ -510,6 +512,7 @@ private synchronized void handleSaslConnectionFailure(
final Random rand, final UserGroupInformation ugi) throws IOException,
InterruptedException {
ugi.doAs(new PrivilegedExceptionAction