From a11042365f93cf235ecc6f8b1a615cf3edd3e75a Mon Sep 17 00:00:00 2001
From: Eli Collins
Date: Fri, 24 Aug 2012 22:10:38 +0000
Subject: [PATCH 01/62] HDFS-3731. 2.0 release upgrade must handle blocks being
written from 1.0. Contributed by Colin Patrick McCabe
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377137 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hadoop/hdfs/server/common/Storage.java | 6 +
.../hdfs/server/datanode/DataStorage.java | 35 ++++-
.../hadoop/hdfs/TestDFSUpgradeFromImage.java | 123 +++++++++++++-----
.../src/test/resources/hadoop1-bbw.tgz | Bin 0 -> 40234 bytes
5 files changed, 132 insertions(+), 35 deletions(-)
create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop1-bbw.tgz
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7d341d8482..ba2a8b7564 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -673,6 +673,9 @@ Branch-2 ( Unreleased changes )
HDFS-3715. Fix TestFileCreation#testFileCreationNamenodeRestart.
(Andrew Whang via eli)
+ HDFS-3731. 2.0 release upgrade must handle blocks being written from 1.0.
+ (Colin Patrick McCabe via eli)
+
BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 909d57d526..ca596a2b0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -86,6 +86,12 @@ public abstract class Storage extends StorageInfo {
public static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp";
public static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint";
+ /**
+ * The blocksBeingWritten directory which was used in some 1.x and earlier
+ * releases.
+ */
+ public static final String STORAGE_1_BBW = "blocksBeingWritten";
+
public enum StorageState {
NON_EXISTENT,
NOT_FORMATTED,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 221d6b2d73..b0675ef09c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -451,6 +451,8 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
File curDir = sd.getCurrentDir();
File prevDir = sd.getPreviousDir();
+ File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
+
assert curDir.exists() : "Data node current directory must exist.";
// Cleanup directory "detach"
cleanupDetachDir(new File(curDir, STORAGE_DIR_DETACHED));
@@ -471,7 +473,7 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(),
nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
bpStorage.format(curDir, nsInfo);
- linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT));
+ linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));
// 4. Write version file under /current
layoutVersion = HdfsConstants.LAYOUT_VERSION;
@@ -578,15 +580,21 @@ void doFinalize(StorageDirectory sd) throws IOException {
+ "; cur CTime = " + this.getCTime());
assert sd.getCurrentDir().exists() : "Current directory must exist.";
final File tmpDir = sd.getFinalizedTmp();//finalized.tmp directory
+ final File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
// 1. rename previous to finalized.tmp
rename(prevDir, tmpDir);
// 2. delete finalized.tmp dir in a separate thread
+ // Also delete the blocksBeingWritten from HDFS 1.x and earlier, if
+ // it exists.
new Daemon(new Runnable() {
@Override
public void run() {
try {
deleteDir(tmpDir);
+ if (bbwDir.exists()) {
+ deleteDir(bbwDir);
+ }
} catch(IOException ex) {
LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
}
@@ -620,11 +628,16 @@ void finalizeUpgrade(String bpID) throws IOException {
/**
* Hardlink all finalized and RBW blocks in fromDir to toDir
- * @param fromDir directory where the snapshot is stored
- * @param toDir the current data directory
- * @throws IOException if error occurs during hardlink
+ *
+ * @param fromDir The directory where the 'from' snapshot is stored
+ * @param fromBbwDir In HDFS 1.x, the directory where blocks
+ * that are under construction are stored.
+ * @param toDir The current data directory
+ *
+ * @throws IOException If error occurs during hardlink
*/
- private void linkAllBlocks(File fromDir, File toDir) throws IOException {
+ private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir)
+ throws IOException {
HardLink hardLink = new HardLink();
// do the link
int diskLayoutVersion = this.getLayoutVersion();
@@ -632,13 +645,23 @@ private void linkAllBlocks(File fromDir, File toDir) throws IOException {
// hardlink finalized blocks in tmpDir/finalized
linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED),
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
- // hardlink rbw blocks in tmpDir/finalized
+ // hardlink rbw blocks in tmpDir/rbw
linkBlocks(new File(fromDir, STORAGE_DIR_RBW),
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
} else { // pre-RBW version
// hardlink finalized blocks in tmpDir
linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
diskLayoutVersion, hardLink);
+ if (fromBbwDir.exists()) {
+ /*
+ * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
+ * directory. It's a little messy, because the blocksBeingWriten was
+ * NOT underneath the 'current' directory in those releases. See
+ * HDFS-3731 for details.
+ */
+ linkBlocks(fromBbwDir,
+ new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
+ }
}
LOG.info( hardLink.linkStats.report() );
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 8db1741e82..8d71791fd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -39,7 +39,9 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.util.StringUtils;
@@ -49,8 +51,9 @@
* This tests data transfer protocol handling in the Datanode. It sends
* various forms of wrong data and verifies that Datanode handles it well.
*
- * This test uses the following two file from src/test/.../dfs directory :
- * 1) hadoop-version-dfs-dir.tgz : contains DFS directories.
+ * This test uses the following items from src/test/.../dfs directory :
+ * 1) hadoop-22-dfs-dir.tgz and other tarred pre-upgrade NN / DN
+ * directory images
* 2) hadoop-dfs-dir.txt : checksums that are compared in this test.
* Please read hadoop-dfs-dir.txt for more information.
*/
@@ -62,14 +65,23 @@ public class TestDFSUpgradeFromImage {
new File(MiniDFSCluster.getBaseDirectory());
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
-
- public int numDataNodes = 4;
-
+ private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
+
private static class ReferenceFileInfo {
String path;
long checksum;
}
+ private static final Configuration upgradeConf;
+
+ static {
+ upgradeConf = new HdfsConfiguration();
+ upgradeConf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
+ if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Maven
+ System.setProperty("test.build.data", "build/test/data");
+ }
+ }
+
LinkedList refList = new LinkedList();
Iterator refIter;
@@ -137,11 +149,33 @@ private void verifyChecksum(String path, long checksum) throws IOException {
}
}
- CRC32 overallChecksum = new CRC32();
+ /**
+ * Try to open a file for reading several times.
+ *
+ * If we fail because lease recovery hasn't completed, retry the open.
+ */
+ private static FSInputStream dfsOpenFileWithRetries(DistributedFileSystem dfs,
+ String pathName) throws IOException {
+ IOException exc = null;
+ for (int tries = 0; tries < 10; tries++) {
+ try {
+ return dfs.dfs.open(pathName);
+ } catch (IOException e) {
+ exc = e;
+ }
+ if (!exc.getMessage().contains("Cannot obtain " +
+ "block length for LocatedBlock")) {
+ throw exc;
+ }
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException ignored) {}
+ }
+ throw exc;
+ }
- private void verifyDir(DistributedFileSystem dfs, Path dir)
- throws IOException {
-
+ private void verifyDir(DistributedFileSystem dfs, Path dir,
+ CRC32 overallChecksum) throws IOException {
FileStatus[] fileArr = dfs.listStatus(dir);
TreeMap fileMap = new TreeMap();
@@ -157,11 +191,11 @@ private void verifyDir(DistributedFileSystem dfs, Path dir)
overallChecksum.update(pathName.getBytes());
if ( isDir ) {
- verifyDir(dfs, path);
+ verifyDir(dfs, path, overallChecksum);
} else {
// this is not a directory. Checksum the file data.
CRC32 fileCRC = new CRC32();
- FSInputStream in = dfs.dfs.open(pathName);
+ FSInputStream in = dfsOpenFileWithRetries(dfs, pathName);
byte[] buf = new byte[4096];
int nRead = 0;
while ( (nRead = in.read(buf, 0, buf.length)) > 0 ) {
@@ -175,7 +209,8 @@ private void verifyDir(DistributedFileSystem dfs, Path dir)
private void verifyFileSystem(DistributedFileSystem dfs) throws IOException {
- verifyDir(dfs, new Path("/"));
+ CRC32 overallChecksum = new CRC32();
+ verifyDir(dfs, new Path("/"), overallChecksum);
verifyChecksum("overallCRC", overallChecksum.getValue());
@@ -237,7 +272,8 @@ public void testFailOnPreUpgradeImage() throws IOException {
@Test
public void testUpgradeFromRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE);
- upgradeAndVerify();
+ upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
+ numDataNodes(4));
}
/**
@@ -259,7 +295,8 @@ public void testUpgradeFromCorruptRel22Image() throws IOException {
// Upgrade should now fail
try {
- upgradeAndVerify();
+ upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
+ numDataNodes(4));
fail("Upgrade did not fail with bad MD5");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
@@ -268,21 +305,34 @@ public void testUpgradeFromCorruptRel22Image() throws IOException {
}
}
}
-
- private void upgradeAndVerify() throws IOException {
+
+ static void recoverAllLeases(DFSClient dfs,
+ Path path) throws IOException {
+ String pathStr = path.toString();
+ HdfsFileStatus status = dfs.getFileInfo(pathStr);
+ if (!status.isDir()) {
+ dfs.recoverLease(pathStr);
+ return;
+ }
+ byte prev[] = HdfsFileStatus.EMPTY_NAME;
+ DirectoryListing dirList;
+ do {
+ dirList = dfs.listPaths(pathStr, prev);
+ HdfsFileStatus files[] = dirList.getPartialListing();
+ for (HdfsFileStatus f : files) {
+ recoverAllLeases(dfs, f.getFullPath(path));
+ }
+ prev = dirList.getLastName();
+ } while (dirList.hasMore());
+ }
+
+ private void upgradeAndVerify(MiniDFSCluster.Builder bld)
+ throws IOException {
MiniDFSCluster cluster = null;
try {
- Configuration conf = new HdfsConfiguration();
- if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
- System.setProperty("test.build.data", "build/test/data");
- }
- conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
- cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDataNodes)
- .format(false)
- .startupOption(StartupOption.UPGRADE)
- .clusterId("testClusterId")
- .build();
+ bld.format(false).startupOption(StartupOption.UPGRADE)
+ .clusterId("testClusterId");
+ cluster = bld.build();
cluster.waitActive();
DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
DFSClient dfsClient = dfs.dfs;
@@ -293,12 +343,27 @@ private void upgradeAndVerify() throws IOException {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
}
-
+ recoverAllLeases(dfsClient, new Path("/"));
verifyFileSystem(dfs);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
-
+ /**
+ * Test upgrade from a 1.x image with some blocksBeingWritten
+ */
+ @Test
+ public void testUpgradeFromRel1BBWImage() throws IOException {
+ unpackStorage(HADOOP1_BBW_IMAGE);
+ Configuration conf = new Configuration(upgradeConf);
+ conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+ System.getProperty("test.build.data") + File.separator +
+ "dfs" + File.separator +
+ "data" + File.separator +
+ "data1");
+ upgradeAndVerify(new MiniDFSCluster.Builder(conf).
+ numDataNodes(1).enableManagedDfsDirsRedundancy(false).
+ manageDataDfsDirs(false));
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop1-bbw.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop1-bbw.tgz
new file mode 100644
index 0000000000000000000000000000000000000000..2574f8b7d1307901b1cf95e2fbaa4b3acd88c90e
GIT binary patch
literal 40234
zcmeI42~<;QmdC5xwy<14MF9axRiQ$w2+F>u0ZR%H5D*X$(kiPgQI@dhRmr9TVnYeC
ziwKAkB0|_9#V%_KB8!9&Swe&m!WKfd_r{v(?&Rtd3o=?@4N5*
zfA@ap3
zd>%Z>p(-=|d)dIlo*Y^^t%}xkhs(odIvv~9Bd&DbK9lZ`U_P5r?tVNz4(Xfso9uRQ
zmUhmFley-8#J8TjCx<@wWFeWXv4G=7<^=-j>-^as16d8)NgDxBPY%JXeL3z3G05vB
z@n;sHh`Ca*AYf+j@^;DsWPmRf6(qK86r|E<<%(sg{0ICYAT6hSR
zK~)A>R~(P}*3}QxVX*TeU}3nJsMIsnc`eqw;mjpwDOuFool
zdw4LMf1`cMM-*OqC}fNuWh0S&jhy(8%6eGoo^V`Ro8oCkejbijw?Qq*E+Fe
z>lHJ+M2mWk$}KRikb$TKTXbm9BczOi;TZ)DDcxj0h=KH*;=m%P^ia3VPlS&
zFE=;tU9Ksm+IJn7V3j|swe6ngLgjVOpemgxxpyEkG1nlJKcG&eQ5H5c1SiZ=_h4%q
zYK-I$BTwgR?D!&XugOn*O%zuXI0dIwP2IX3m0cbDGg%GId4&2LJe2H&hw^YxIS$G-
z!}NIG{>$x!ho9gEpo>++p#hglw+!bu;yA~{8CPH36){C6!BpJ@W(;K7zj$IESiA2z
z56q_-My1W&^Pm;(N}?IWu**_u{5fW@4VKrw@_d@9G!?!25sU^eXs5=Nx*M%;mI;g~VJjkpYW`sZuydl=i4?Oz-DNY$Yvt{#+-x!TL5Nz*d_Brpnszp^I2fSbHFd{PIPG+38
z8GgYz9%!JXWaXV(VvcSeV}=NFa>o!h^Fxn#UQTYwui7!375wT+6~~7AE1xC1I^5>si<5FpFRo!7PGV
z^l@1PYY^5TtU*|Va5eICkucD2!Xd@@=dupd3#J!LFPL61Utki#B!o!_lknq`kUCoZ
zTs9VF5zHc(MKFs#E{k9d!Wx7%2x}0oMt&|5HsE}nG(Y{2b=SxA2R&^;r3?Q4GbUjH
zuU+%x-$^8xr6!A6krCDsxKj>a?fAm46XWdVsbB39`gKkI0x98{vX|hJ)5tkp_fsSV
z%;xxi^vC~u$B2}3{u4^sY(j9*a7dSL_Qq&bvvn=8CqlI9p|B?#jiG;a=L4_0(8|}3
ziR{BXAeavAbT}j)hKOzWpP
zNXye5qgGkaXN6Dk+0gHCP?pUp5_B0YPhE8E75DU;CEjyzWUyZPbe>VIDnE#x;vy54
zT$s?+03k`SkrVt`+z*4~Jpfpn64md;g)VttsTmja3IOMN=>6{ui?e@qAe?&O)B^{@
z2YB=$V!{slcn+(*l?JV!#GjYS(6{owCMn(ES2VRa_Xj}DAkxFYCmXyG=@Yt8*>Kn4
zgal4c>U1B|@kEj1?mL~h
z*P)Wt*Gx-%_V2m8%$sFIEd3#F_*k(mxw_iSQcQWBmz$K?xIVI39OvtYx+rO)=6ma_
zN6l|!_M7bcI_k5Wl08-vau3H(Y$i%Sy1q?M##P^^5O~?m&^X#c?BPZ0F0X&Ju^62u
zFkGEwTtztk;p5LA9>K4x@?xFK)*Z>pEerMQgLWvDGF&+7dHS*g@oy3-1A(=b_n{xM
z(w*(h`>tI2EZyROk}pBlQ>p#)yAu7{4TQFH+arAICj~NIz75g(OEmXW$uCW0wqLn_
z%vs(nUMML>ue-2QOW1GfQ%1%SEQ?KITHNtg+M~RP6pZ%)F^!qKohH)>ZJ7$t8repH
zznrv-=DPJm5dzDoLdS?CQ_lmS9Gdrjhfevrg`qNiqqUyWmdK^}-Y82_lVUhYFc`D6
zl+%KtRFYbRuO$a!Y59#?-zpKegobPd!6a{27^;q$(ThvcW^b@9a{PnGfQu@bMNhI(
z`Q{J1j5RGX@sNk00@ArDNzxJ*HZ#HJwKyA`F~Uw$aBX_@6jL5#B*3BE_gk2_xy
zFVcB_@$rg>Ox%c-X#4kaLJ`q4E0bbulny{y-DOXjt-qr~>_j@>B`Yg>t)-nCMqm^}n@kL!V+PhJ$$sKFMLG|3z{lg(V%s7#X
z&pp_tydzelXGZ7v?`-9d7#C`!nNjRYUHvjG;`!0d?m2+7w%^oEe)&jNY~=dUUF5mrTBG$~QP6!9&sgn-D3Si^eaJ1Ur`T7O
z<`BcY0&FF1R_*$d8IFbQE2!X*6FlhDVB92%+y2g0vD5a1Ai
zLjbOd;Hn6&ieM7LB>aVvu+Yuwzxm^ykG)3lGj&~i=4b}A&)S+qSoGEl)5zXcB~hD0
z4ZQXRRH&}h{=)8Ftd97t>zX^$pSW1tBp82p_#ryPFFs}G-?gITNzE&HvRJ;1n)p@i6)ISbVh(V9{WBIPAhvNByx1wDP
zv_AIAVwOHFeA6-&;4LUg#Be*~*(%ZRh9rpVZ)#h>J7y(FD5W-9C{z-k+<5o;FoHK{
zk22gep*I(MPwi`B#4wC7n^ves*0?tcyLe~56-{yzexLr+-9LZZxP)W9nOF$>!Y6TJ
zQULTi%Xp4l*A#CHk~-@wW!B7v;@ZO@8<0KJ%Z1K*vY|ufA}+LB;P=`Zs;KVK3KQ9!
zHNHHkO8y+$c>(A6Qq3xI4A6^XB8e-p9uJsEj41U9$RSb+h~xivJD2|1zN+zpsz;~_
zFYC}_-q)!UYsj{qg?dNPdB(kS!B;k?F;$d+lXJ+Bc@LMAs`9iWwntAmy;M$~UF=c2
z9lLEkB`}~p<#Sb)%qT?rNvlkBqKFb<3)0H>o->wtVlLHf`^D{;d6>WK5*EYa-+o-8*e2zdy8STRx-!KqOfPR9GZo5WdtR+5YAdBtjYQ
zr89(UywsWZF=q~vn;^g6HM6F>&M&MC`~UvwcaOLJm?pnFeOP2xV!`9x?vlQ~o-%k9
zH>HD_yHISW;(b$dN0&a;u~Ca6s8p1&j&uI*sI#mC`Hotz`0sc?+npoG*4DE!nSJ!@
z`xDJIZ+9{62Y6kDeZbhILOaTXb{4-b8!pgJ7M_B)#^BmNJ4P%Z2|z6PQT?8
zK;0bV-AFu?Rv9&aih6Prxg-ETCC*=E&(2DtSY4J}TDZ4wVmJNNK{{4@J#-JAAY)jifI(2lW<9wCjzD>{~|Bqkp*@+IP
z18u2~b;Ngd{&A$Q31XsMc}yj_^(1PrWG3-VutNd6#0g1jCQF*jxUD|#TjdiQG`60P
z8DtHudS8t#&6~FPK;|Bd9x*3N>*w@kt1_vax6!z1a2#Ln>KKC
zpzV=e52S)75J=Kk&MP;GDc!MtJS+WyvUzN|T{x9qd%bB*5Y+%&s4cIf^m-qQq)E#g
zq@7e~(k)ly;R*Migr)XtN;%4#hpx=OxHp$5Qq+9`-m$p#D5`<*x=f|NJkrMDachpN
zVaq;w$5o+W0XSUH+_+wGg6Og&n3h@|>`N7_^lx+shcDOG>x?~oS`he}6p}Ho#0NOz
zXI=7Np#*LbFX$F};lY;}TSUPaC&$TP!jbr_@4+J9iK9QpJDc9Tt~Mk%w$)+c_jsxT
zzZKvph8Abm@@B2%sh_4k4APWs(#;iH(^9y&I;GvTk%C9$>mXOuz*VW`UT6UkT30XW
zh9*!flc)}ZnEl`Vc(XsmE@?6GU-3DA%m2rDv9rL{&K$#*(B%#_nfFtVjjjRi*H-9|
z%)y%v60g4Rz12<(j@ixJaaYx<2`N}WPH{4~Nw-(I)}@1Z;5OQgef1J6>wFqe^KDMM
z9vZoT7)F<`y>$^CIvYes#P{q4qco4P{crqTzV%lFAjyUCW<>E_yNHCXvk|C6S@&e&=8fEgKnKqrN;mLGs>2$Se5ig%m$T!?lB8m#wd`=ON7~T
zX`93nl@<|&$P)nkwo+_>PuR?>)ChZ&dkw>rEy73Zy3cx|fhY~Ux4@MmGzrD&2g~3g
z_lV!t%rl9aKU
zTHWd!LEk<{5=h(&zJaT-qdIz6U}f`
z{uoqdt&JRUrH;Sjlkmb;Ch}Bw1vGTA{2m*a+ptE&V@gsgp!sOF
zmhaa|$^eg>=dqMuZ>gR1zJzZfw&I
Date: Fri, 24 Aug 2012 23:36:55 +0000
Subject: [PATCH 02/62] MAPREDUCE-4408. allow jobs to set a JAR that is in the
distributed cached (rkanter via tucu)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377149 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../apache/hadoop/mapreduce/JobSubmitter.java | 14 ++++-
.../org/apache/hadoop/mapred/YARNRunner.java | 4 +-
.../mapreduce/v2/MiniMRYarnCluster.java | 6 +-
.../hadoop/mapreduce/v2/TestMRJobs.java | 62 ++++++++++++++-----
5 files changed, 68 insertions(+), 21 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 5587eb6d3a..37c9591995 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -144,6 +144,9 @@ Branch-2 ( Unreleased changes )
MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
+ MAPREDUCE-4408. allow jobs to set a JAR that is in the distributed cached
+ (rkanter via tucu)
+
BUG FIXES
MAPREDUCE-4422. YARN_APPLICATION_CLASSPATH needs a documented default value in
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 08a09c2a69..31081b332f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -232,9 +232,17 @@ private void copyAndConfigureFiles(Job job, Path submitJobDir,
if ("".equals(job.getJobName())){
job.setJobName(new Path(jobJar).getName());
}
- copyJar(new Path(jobJar), JobSubmissionFiles.getJobJar(submitJobDir),
- replication);
- job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString());
+ Path jobJarPath = new Path(jobJar);
+ URI jobJarURI = jobJarPath.toUri();
+ // If the job jar is already in fs, we don't need to copy it from local fs
+ if (jobJarURI.getScheme() == null || jobJarURI.getAuthority() == null
+ || !(jobJarURI.getScheme().equals(jtFs.getUri().getScheme())
+ && jobJarURI.getAuthority().equals(
+ jtFs.getUri().getAuthority()))) {
+ copyJar(jobJarPath, JobSubmissionFiles.getJobJar(submitJobDir),
+ replication);
+ job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString());
+ }
} else {
LOG.warn("No job jar file set. User classes may not be found. "+
"See Job or Job#setJar(String).");
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 4555f86e88..74ae6446cd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -345,10 +345,10 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
createApplicationResource(defaultFileContext,
jobConfPath, LocalResourceType.FILE));
if (jobConf.get(MRJobConfig.JAR) != null) {
+ Path jobJarPath = new Path(jobConf.get(MRJobConfig.JAR));
localResources.put(MRJobConfig.JOB_JAR,
createApplicationResource(defaultFileContext,
- new Path(jobSubmitDir, MRJobConfig.JOB_JAR),
- LocalResourceType.ARCHIVE));
+ jobJarPath, LocalResourceType.ARCHIVE));
} else {
// Job jar may be null. For e.g, for pipes, the job jar is the hadoop
// mapreduce jar itself which is already on the classpath.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index de0ee249ad..8edf4f15d9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -72,8 +72,10 @@ public MiniMRYarnCluster(String testName, int noOfNMs) {
@Override
public void init(Configuration conf) {
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
- conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(),
- "apps_staging_dir/").getAbsolutePath());
+ if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
+ conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(),
+ "apps_staging_dir/").getAbsolutePath());
+ }
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");
try {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
index 03fdc4e57f..4a30c3cfa6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -41,10 +41,10 @@
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
@@ -80,15 +80,24 @@ public class TestMRJobs {
private static final Log LOG = LogFactory.getLog(TestMRJobs.class);
protected static MiniMRYarnCluster mrCluster;
+ protected static MiniDFSCluster dfsCluster;
private static Configuration conf = new Configuration();
private static FileSystem localFs;
+ private static FileSystem remoteFs;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
+ try {
+ dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+ .format(true).racks(null).build();
+ remoteFs = dfsCluster.getFileSystem();
+ } catch (IOException io) {
+ throw new RuntimeException("problem starting mini dfs cluster", io);
+ }
}
private static Path TEST_ROOT_DIR = new Path("target",
@@ -107,6 +116,8 @@ public static void setup() throws IOException {
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestMRJobs.class.getName(), 3);
Configuration conf = new Configuration();
+ conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
+ conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir");
mrCluster.init(conf);
mrCluster.start();
}
@@ -123,6 +134,10 @@ public static void tearDown() {
mrCluster.stop();
mrCluster = null;
}
+ if (dfsCluster != null) {
+ dfsCluster.shutdown();
+ dfsCluster = null;
+ }
}
@Test
@@ -403,7 +418,6 @@ public void setup(Context context) throws IOException {
Configuration conf = context.getConfiguration();
Path[] files = context.getLocalCacheFiles();
Path[] archives = context.getLocalCacheArchives();
- FileSystem fs = LocalFileSystem.get(conf);
// Check that 4 (2 + appjar + DistrubutedCacheChecker jar) files
// and 2 archives are present
@@ -411,13 +425,13 @@ public void setup(Context context) throws IOException {
Assert.assertEquals(2, archives.length);
// Check lengths of the files
- Assert.assertEquals(1, fs.getFileStatus(files[1]).getLen());
- Assert.assertTrue(fs.getFileStatus(files[2]).getLen() > 1);
+ Assert.assertEquals(1, localFs.getFileStatus(files[1]).getLen());
+ Assert.assertTrue(localFs.getFileStatus(files[2]).getLen() > 1);
// Check extraction of the archive
- Assert.assertTrue(fs.exists(new Path(archives[0],
+ Assert.assertTrue(localFs.exists(new Path(archives[0],
"distributed.jar.inside3")));
- Assert.assertTrue(fs.exists(new Path(archives[1],
+ Assert.assertTrue(localFs.exists(new Path(archives[1],
"distributed.jar.inside4")));
// Check the class loaders
@@ -448,8 +462,7 @@ public void setup(Context context) throws IOException {
}
}
- @Test
- public void testDistributedCache() throws Exception {
+ public void _testDistributedCache(String jobJarPath) throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
@@ -470,11 +483,13 @@ public void testDistributedCache() throws Exception {
// Set the job jar to a new "dummy" jar so we can check that its extracted
// properly
- job.setJar(makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString()));
+ job.setJar(jobJarPath);
// Because the job jar is a "dummy" jar, we need to include the jar with
// DistributedCacheChecker or it won't be able to find it
- job.addFileToClassPath(new Path(
- JarFinder.getJar(DistributedCacheChecker.class)));
+ Path distributedCacheCheckerJar = new Path(
+ JarFinder.getJar(DistributedCacheChecker.class));
+ job.addFileToClassPath(distributedCacheCheckerJar.makeQualified(
+ localFs.getUri(), distributedCacheCheckerJar.getParent()));
job.setMapperClass(DistributedCacheChecker.class);
job.setOutputFormatClass(NullOutputFormat.class);
@@ -484,7 +499,9 @@ public void testDistributedCache() throws Exception {
job.addCacheFile(
new URI(first.toUri().toString() + "#distributed.first.symlink"));
job.addFileToClassPath(second);
- job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
+ // The AppMaster jar itself
+ job.addFileToClassPath(
+ APP_JAR.makeQualified(localFs.getUri(), APP_JAR.getParent()));
job.addArchiveToClassPath(third);
job.addCacheArchive(fourth.toUri());
job.setMaxMapAttempts(1); // speed up failures
@@ -497,6 +514,23 @@ public void testDistributedCache() throws Exception {
" but didn't Match Job ID " + jobId ,
trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
}
+
+ @Test
+ public void testDistributedCache() throws Exception {
+ // Test with a local (file:///) Job Jar
+ Path localJobJarPath = makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString());
+ _testDistributedCache(localJobJarPath.toUri().toString());
+
+ // Test with a remote (hdfs://) Job Jar
+ Path remoteJobJarPath = new Path(remoteFs.getUri().toString() + "/",
+ localJobJarPath.getName());
+ remoteFs.moveFromLocalFile(localJobJarPath, remoteJobJarPath);
+ File localJobJarFile = new File(localJobJarPath.toUri().toString());
+ if (localJobJarFile.exists()) { // just to make sure
+ localJobJarFile.delete();
+ }
+ _testDistributedCache(remoteJobJarPath.toUri().toString());
+ }
private Path createTempFile(String filename, String contents)
throws IOException {
@@ -522,7 +556,7 @@ private Path makeJar(Path p, int index) throws FileNotFoundException,
return p;
}
- private String makeJobJarWithLib(String testDir) throws FileNotFoundException,
+ private Path makeJobJarWithLib(String testDir) throws FileNotFoundException,
IOException{
Path jobJarPath = new Path(testDir, "thejob.jar");
FileOutputStream fos =
@@ -535,7 +569,7 @@ private String makeJobJarWithLib(String testDir) throws FileNotFoundException,
new Path(testDir, "lib2.jar").toUri().getPath()));
jos.close();
localFs.setPermission(jobJarPath, new FsPermission("700"));
- return jobJarPath.toUri().toString();
+ return jobJarPath;
}
private void createAndAddJarToJar(JarOutputStream jos, File jarFile)
From deead78e35b0cb81af875b5a8032cbd06c9a2dae Mon Sep 17 00:00:00 2001
From: Suresh Srinivas
Date: Sat, 25 Aug 2012 01:03:22 +0000
Subject: [PATCH 03/62] HDFS-3844. Add @Override and remove {@inheritdoc} and
unnecessary imports. Contributed by Jing Zhao.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377168 13f79535-47bb-0310-9956-ffa450edef68
---
.../tools/RootDocProcessor.java | 1 +
.../org/apache/hadoop/conf/Configuration.java | 2 +
.../org/apache/hadoop/conf/Configured.java | 2 +
.../hadoop/conf/ReconfigurationServlet.java | 11 ------
.../apache/hadoop/fs/AbstractFileSystem.java | 1 -
.../org/apache/hadoop/fs/AvroFSInput.java | 5 +++
.../org/apache/hadoop/fs/BlockLocation.java | 1 +
.../hadoop/fs/BufferedFSInputStream.java | 8 +++-
.../apache/hadoop/fs/ChecksumFileSystem.java | 18 +++++++--
.../java/org/apache/hadoop/fs/ChecksumFs.java | 10 ++++-
.../org/apache/hadoop/fs/ContentSummary.java | 6 +--
.../main/java/org/apache/hadoop/fs/DF.java | 1 +
.../main/java/org/apache/hadoop/fs/DU.java | 4 ++
.../apache/hadoop/fs/FSDataInputStream.java | 7 ++++
.../org/apache/hadoop/fs/FSInputChecker.java | 7 ++++
.../org/apache/hadoop/fs/FSInputStream.java | 6 +++
.../org/apache/hadoop/fs/FSOutputSummer.java | 2 +
.../org/apache/hadoop/fs/FileChecksum.java | 3 +-
.../org/apache/hadoop/fs/FileContext.java | 26 +++++++++++++
.../java/org/apache/hadoop/fs/FileStatus.java | 5 +++
.../java/org/apache/hadoop/fs/FileSystem.java | 12 ++++--
.../java/org/apache/hadoop/fs/FileUtil.java | 2 +
.../apache/hadoop/fs/FilterFileSystem.java | 38 +++++++++++++------
.../java/org/apache/hadoop/fs/FilterFs.java | 3 --
.../apache/hadoop/fs/FsServerDefaults.java | 3 ++
.../java/org/apache/hadoop/fs/FsShell.java | 1 +
.../java/org/apache/hadoop/fs/FsStatus.java | 2 +
.../org/apache/hadoop/fs/FsUrlConnection.java | 1 -
.../hadoop/fs/FsUrlStreamHandlerFactory.java | 1 +
.../java/org/apache/hadoop/fs/GlobFilter.java | 2 +
.../org/apache/hadoop/fs/HarFileSystem.java | 26 +++++++++++++
.../org/apache/hadoop/fs/LocalFileSystem.java | 1 +
.../apache/hadoop/fs/LocatedFileStatus.java | 3 ++
.../hadoop/fs/MD5MD5CRC32FileChecksum.java | 20 +++++-----
.../java/org/apache/hadoop/fs/Options.java | 1 -
.../main/java/org/apache/hadoop/fs/Path.java | 4 ++
.../apache/hadoop/fs/RawLocalFileSystem.java | 36 +++++++++++++++---
.../apache/hadoop/fs/TrashPolicyDefault.java | 1 +
.../apache/hadoop/fs/ftp/FTPFileSystem.java | 1 +
.../apache/hadoop/fs/ftp/FTPInputStream.java | 9 +++++
.../org/apache/hadoop/fs/kfs/KFSImpl.java | 17 +++++++++
.../apache/hadoop/fs/kfs/KFSInputStream.java | 10 +++++
.../apache/hadoop/fs/kfs/KFSOutputStream.java | 9 ++---
.../hadoop/fs/permission/FsPermission.java | 13 ++++---
.../fs/permission/PermissionStatus.java | 9 +++--
.../hadoop/fs/s3/Jets3tFileSystemStore.java | 14 +++++++
.../apache/hadoop/fs/s3/MigrationTool.java | 4 ++
.../org/apache/hadoop/fs/s3/S3FileSystem.java | 2 +
.../s3native/Jets3tNativeFileSystemStore.java | 12 ++++++
.../apache/hadoop/fs/shell/CommandFormat.java | 3 ++
.../org/apache/hadoop/fs/shell/Delete.java | 2 +
.../org/apache/hadoop/fs/shell/Display.java | 2 +
.../org/apache/hadoop/fs/shell/FsCommand.java | 1 +
.../org/apache/hadoop/fs/shell/PathData.java | 1 +
.../hadoop/fs/viewfs/ChRootedFileSystem.java | 1 +
.../fs/viewfs/NotInMountpointException.java | 4 --
.../hadoop/fs/viewfs/ViewFileSystem.java | 1 +
.../hadoop/fs/viewfs/ViewFsFileStatus.java | 3 +-
.../hadoop/ha/ActiveStandbyElector.java | 4 ++
.../apache/hadoop/ha/HAServiceProtocol.java | 1 +
.../java/org/apache/hadoop/ha/NodeFencer.java | 1 +
.../apache/hadoop/ha/SshFenceByTcpPort.java | 2 +
.../org/apache/hadoop/http/HttpServer.java | 4 +-
.../apache/hadoop/io/AbstractMapWritable.java | 6 ++-
.../org/apache/hadoop/io/ArrayWritable.java | 2 +
.../org/apache/hadoop/io/BooleanWritable.java | 2 +
.../org/apache/hadoop/io/ByteWritable.java | 2 +
.../org/apache/hadoop/io/BytesWritable.java | 4 ++
.../apache/hadoop/io/CompressedWritable.java | 2 +
.../apache/hadoop/io/DataInputByteBuffer.java | 2 -
.../apache/hadoop/io/DefaultStringifier.java | 3 ++
.../org/apache/hadoop/io/DoubleWritable.java | 2 +
.../org/apache/hadoop/io/EnumSetWritable.java | 15 ++++----
.../org/apache/hadoop/io/FloatWritable.java | 2 +
.../org/apache/hadoop/io/GenericWritable.java | 5 +++
.../java/org/apache/hadoop/io/IOUtils.java | 2 +
.../org/apache/hadoop/io/IntWritable.java | 2 +
.../org/apache/hadoop/io/LongWritable.java | 8 ++++
.../java/org/apache/hadoop/io/MD5Hash.java | 8 ++++
.../java/org/apache/hadoop/io/MapFile.java | 2 +
.../org/apache/hadoop/io/MapWritable.java | 30 +++++++--------
.../org/apache/hadoop/io/NullWritable.java | 4 ++
.../org/apache/hadoop/io/ObjectWritable.java | 7 ++++
.../org/apache/hadoop/io/OutputBuffer.java | 1 +
.../org/apache/hadoop/io/ReadaheadPool.java | 1 +
.../org/apache/hadoop/io/SecureIOUtils.java | 1 -
.../org/apache/hadoop/io/SequenceFile.java | 32 ++++++++++++++++
.../java/org/apache/hadoop/io/SetFile.java | 1 +
.../apache/hadoop/io/SortedMapWritable.java | 38 +++++++++----------
.../org/apache/hadoop/io/Stringifier.java | 1 +
.../main/java/org/apache/hadoop/io/Text.java | 8 ++++
.../apache/hadoop/io/TwoDArrayWritable.java | 2 +
.../main/java/org/apache/hadoop/io/UTF8.java | 2 +
.../org/apache/hadoop/io/VIntWritable.java | 2 +
.../org/apache/hadoop/io/VLongWritable.java | 2 +
.../hadoop/io/VersionMismatchException.java | 1 +
.../apache/hadoop/io/VersionedWritable.java | 2 +
.../apache/hadoop/io/WritableComparator.java | 2 +
.../apache/hadoop/io/compress/BZip2Codec.java | 20 ++++++++++
.../io/compress/BlockCompressorStream.java | 3 ++
.../io/compress/BlockDecompressorStream.java | 3 ++
.../io/compress/CompressionCodecFactory.java | 1 +
.../io/compress/CompressionInputStream.java | 5 +++
.../io/compress/CompressionOutputStream.java | 3 ++
.../hadoop/io/compress/CompressorStream.java | 5 +++
.../io/compress/DecompressorStream.java | 9 +++++
.../hadoop/io/compress/DefaultCodec.java | 11 ++++++
.../apache/hadoop/io/compress/GzipCodec.java | 19 ++++++++--
.../io/compress/bzip2/CBZip2InputStream.java | 3 ++
.../io/compress/bzip2/CBZip2OutputStream.java | 5 +++
.../io/compress/lz4/Lz4Decompressor.java | 1 +
.../compress/snappy/SnappyDecompressor.java | 1 +
.../zlib/BuiltInGzipDecompressor.java | 13 +++++--
.../io/compress/zlib/BuiltInZlibDeflater.java | 1 +
.../io/compress/zlib/BuiltInZlibInflater.java | 1 +
.../io/compress/zlib/ZlibCompressor.java | 10 +++++
.../io/compress/zlib/ZlibDecompressor.java | 10 +++++
.../apache/hadoop/io/file/tfile/BCFile.java | 4 ++
.../hadoop/io/file/tfile/CompareUtils.java | 1 +
.../apache/hadoop/io/file/tfile/TFile.java | 3 ++
.../apache/hadoop/io/nativeio/NativeIO.java | 1 +
.../hadoop/io/nativeio/NativeIOException.java | 1 +
.../apache/hadoop/io/retry/RetryPolicies.java | 4 ++
.../io/serializer/DeserializerComparator.java | 1 +
.../io/serializer/JavaSerialization.java | 12 ++++--
.../JavaSerializationComparator.java | 1 +
.../io/serializer/WritableSerialization.java | 2 -
.../io/serializer/avro/AvroSerialization.java | 2 +
.../java/org/apache/hadoop/ipc/Client.java | 4 ++
.../apache/hadoop/ipc/ProtobufRpcEngine.java | 1 +
.../org/apache/hadoop/ipc/ProtocolProxy.java | 1 -
.../apache/hadoop/ipc/ProtocolSignature.java | 3 +-
.../java/org/apache/hadoop/ipc/Server.java | 1 +
.../apache/hadoop/ipc/WritableRpcEngine.java | 7 ++++
.../java/org/apache/hadoop/log/LogLevel.java | 1 +
.../metrics/ganglia/GangliaContext.java | 1 -
.../hadoop/metrics/spi/CompositeContext.java | 1 -
.../spi/NullContextWithUpdateThread.java | 1 -
.../hadoop/metrics/spi/OutputRecord.java | 4 --
.../org/apache/hadoop/metrics/spi/Util.java | 1 -
.../hadoop/metrics/util/MetricsIntValue.java | 2 -
.../metrics/util/MetricsTimeVaryingInt.java | 2 -
.../metrics/util/MetricsTimeVaryingLong.java | 2 -
.../metrics/util/MetricsTimeVaryingRate.java | 2 -
.../hadoop/metrics2/impl/MetricsConfig.java | 1 -
.../apache/hadoop/metrics2/sink/FileSink.java | 1 -
.../hadoop/metrics2/source/JvmMetrics.java | 1 -
.../net/AbstractDNSToSwitchMapping.java | 1 -
.../main/java/org/apache/hadoop/net/DNS.java | 1 -
.../apache/hadoop/net/NetworkTopology.java | 1 +
.../apache/hadoop/net/ScriptBasedMapping.java | 1 -
.../hadoop/net/SocketIOWithTimeout.java | 1 -
.../apache/hadoop/net/SocketInputStream.java | 5 +++
.../apache/hadoop/net/SocketOutputStream.java | 6 +++
.../apache/hadoop/net/SocksSocketFactory.java | 11 +-----
.../hadoop/net/StandardSocketFactory.java | 7 ----
.../org/apache/hadoop/net/TableMapping.java | 1 +
.../hadoop/record/BinaryRecordInput.java | 17 +++++++++
.../hadoop/record/BinaryRecordOutput.java | 15 ++++++++
.../java/org/apache/hadoop/record/Buffer.java | 5 +++
.../apache/hadoop/record/CsvRecordInput.java | 16 ++++++++
.../apache/hadoop/record/CsvRecordOutput.java | 14 +++++++
.../java/org/apache/hadoop/record/Record.java | 4 ++
.../hadoop/record/RecordComparator.java | 1 +
.../apache/hadoop/record/XmlRecordInput.java | 21 ++++++++++
.../apache/hadoop/record/XmlRecordOutput.java | 14 +++++++
.../hadoop/record/compiler/CGenerator.java | 1 +
.../hadoop/record/compiler/CodeBuffer.java | 1 +
.../apache/hadoop/record/compiler/Consts.java | 4 --
.../hadoop/record/compiler/CppGenerator.java | 1 +
.../hadoop/record/compiler/JBoolean.java | 7 ++++
.../hadoop/record/compiler/JBuffer.java | 9 +++++
.../apache/hadoop/record/compiler/JByte.java | 5 +++
.../hadoop/record/compiler/JCompType.java | 5 +++
.../hadoop/record/compiler/JDouble.java | 6 +++
.../apache/hadoop/record/compiler/JFloat.java | 6 +++
.../apache/hadoop/record/compiler/JInt.java | 5 +++
.../apache/hadoop/record/compiler/JLong.java | 6 +++
.../apache/hadoop/record/compiler/JMap.java | 10 +++++
.../hadoop/record/compiler/JRecord.java | 9 +++++
.../hadoop/record/compiler/JString.java | 6 +++
.../hadoop/record/compiler/JVector.java | 10 +++++
.../hadoop/record/compiler/JavaGenerator.java | 1 +
.../hadoop/record/compiler/ant/RccTask.java | 1 +
.../compiler/generated/ParseException.java | 1 +
.../hadoop/record/compiler/generated/Rcc.java | 1 -
.../compiler/generated/RccTokenManager.java | 8 ----
.../record/compiler/generated/Token.java | 1 +
.../compiler/generated/TokenMgrError.java | 1 +
.../hadoop/record/meta/FieldTypeInfo.java | 2 +
.../apache/hadoop/record/meta/MapTypeID.java | 5 ++-
.../hadoop/record/meta/RecordTypeInfo.java | 3 ++
.../hadoop/record/meta/StructTypeID.java | 3 ++
.../org/apache/hadoop/record/meta/TypeID.java | 2 +
.../hadoop/record/meta/VectorTypeID.java | 3 ++
.../security/RefreshUserMappingsProtocol.java | 1 -
.../hadoop/security/SaslInputStream.java | 7 ++++
.../hadoop/security/SaslOutputStream.java | 7 +++-
.../apache/hadoop/security/SaslRpcClient.java | 1 +
.../apache/hadoop/security/SaslRpcServer.java | 2 -
.../apache/hadoop/security/SecurityUtil.java | 2 +
.../security/ShellBasedUnixGroupsMapping.java | 3 --
.../ShellBasedUnixGroupsNetgroupMapping.java | 6 ---
.../hadoop/security/UserGroupInformation.java | 4 +-
.../security/authorize/AccessControlList.java | 3 ++
.../security/authorize/PolicyProvider.java | 1 +
.../RefreshAuthorizationPolicyProtocol.java | 1 -
.../ssl/FileBasedKeyStoresFactory.java | 1 +
.../security/ssl/SSLHostnameVerifier.java | 17 +++++++++
.../apache/hadoop/security/token/Token.java | 4 +-
.../AbstractDelegationTokenIdentifier.java | 8 +++-
.../AbstractDelegationTokenSecretManager.java | 1 +
.../token/delegation/DelegationKey.java | 2 +
.../hadoop/tools/GetUserMappingsProtocol.java | 1 -
.../apache/hadoop/util/AsyncDiskService.java | 1 +
.../org/apache/hadoop/util/DataChecksum.java | 8 ++++
.../java/org/apache/hadoop/util/HeapSort.java | 5 +--
.../java/org/apache/hadoop/util/Progress.java | 1 +
.../org/apache/hadoop/util/PureJavaCrc32.java | 8 ++--
.../apache/hadoop/util/PureJavaCrc32C.java | 8 ++--
.../org/apache/hadoop/util/QuickSort.java | 5 +--
.../apache/hadoop/util/ReflectionUtils.java | 1 +
.../java/org/apache/hadoop/util/Shell.java | 4 +-
.../org/apache/hadoop/util/bloom/Filter.java | 2 +
.../org/apache/hadoop/util/bloom/Key.java | 2 +
.../apache/hadoop/util/hash/JenkinsHash.java | 1 +
.../apache/hadoop/util/hash/MurmurHash.java | 1 +
.../apache/hadoop/cli/util/CLICommand.java | 1 +
.../apache/hadoop/cli/util/CLITestCmd.java | 6 +++
.../apache/hadoop/cli/util/FSCmdExecutor.java | 1 +
.../apache/hadoop/conf/TestConfServlet.java | 1 -
.../apache/hadoop/conf/TestConfiguration.java | 2 -
.../conf/TestConfigurationDeprecation.java | 2 -
.../hadoop/conf/TestDeprecatedKeys.java | 3 --
.../hadoop/conf/TestReconfiguration.java | 6 ---
.../hadoop/fs/FSMainOperationsBaseTest.java | 2 +
.../fs/FileContextMainOperationsBaseTest.java | 2 +
.../hadoop/fs/FileContextPermissionBase.java | 1 +
.../apache/hadoop/fs/FileContextURIBase.java | 2 -
.../org/apache/hadoop/fs/TestAvroFSInput.java | 1 -
.../java/org/apache/hadoop/fs/TestDU.java | 2 +
.../TestFSMainOperationsLocalFileSystem.java | 3 ++
.../hadoop/fs/TestFcLocalFsPermission.java | 2 +
.../apache/hadoop/fs/TestFcLocalFsUtil.java | 1 +
.../hadoop/fs/TestFileSystemCaching.java | 10 +++++
.../org/apache/hadoop/fs/TestFsOptions.java | 2 -
.../hadoop/fs/TestFsShellReturnCode.java | 1 +
.../org/apache/hadoop/fs/TestListFiles.java | 1 -
.../fs/TestLocalFSFileContextCreateMkdir.java | 1 +
.../TestLocalFSFileContextMainOperations.java | 2 +
.../fs/TestLocalFSFileContextSymlink.java | 5 +++
.../hadoop/fs/TestLocalFsFCStatistics.java | 3 ++
.../hadoop/fs/TestLocal_S3FileContextURI.java | 1 +
.../hadoop/fs/TestS3_LocalFileContextURI.java | 1 +
.../java/org/apache/hadoop/fs/TestTrash.java | 2 +
.../hadoop/fs/kfs/KFSEmulationImpl.java | 17 +++++++++
.../hadoop/fs/kfs/TestKosmosFileSystem.java | 8 +---
.../fs/loadGenerator/DataGenerator.java | 1 +
.../fs/loadGenerator/LoadGenerator.java | 2 +
.../fs/loadGenerator/StructureGenerator.java | 1 +
.../hadoop/fs/s3/InMemoryFileSystemStore.java | 14 +++++++
.../InMemoryNativeFileSystemStore.java | 12 ++++++
.../fs/viewfs/TestChRootedFileSystem.java | 1 +
.../TestFSMainOperationsLocalFileSystem.java | 2 +
.../fs/viewfs/TestFcCreateMkdirLocalFs.java | 2 +
.../viewfs/TestFcMainOperationsLocalFs.java | 2 +
.../fs/viewfs/TestFcPermissionsLocalFs.java | 2 +
...tViewFileSystemDelegationTokenSupport.java | 2 +
.../TestViewFileSystemLocalFileSystem.java | 2 +
...ileSystemWithAuthorityLocalFileSystem.java | 3 ++
.../hadoop/fs/viewfs/TestViewFsLocalFs.java | 2 +
.../hadoop/fs/viewfs/TestViewFsTrash.java | 2 +-
.../TestViewFsWithAuthorityLocalFs.java | 3 ++
.../fs/viewfs/TestViewfsFileStatus.java | 1 -
.../apache/hadoop/ha/ClientBaseWithFixes.java | 3 +-
.../org/apache/hadoop/ha/DummyHAService.java | 1 +
.../apache/hadoop/http/TestGlobalFilter.java | 4 ++
.../apache/hadoop/http/TestPathFilter.java | 4 ++
.../apache/hadoop/http/TestServletFilter.java | 5 +++
.../org/apache/hadoop/io/AvroTestUtil.java | 2 -
.../org/apache/hadoop/io/RandomDatum.java | 3 ++
.../apache/hadoop/io/TestEnumSetWritable.java | 8 ----
.../apache/hadoop/io/TestGenericWritable.java | 6 +++
.../org/apache/hadoop/io/TestMD5Hash.java | 2 +
.../apache/hadoop/io/TestSecureIOUtils.java | 3 --
.../apache/hadoop/io/TestSequenceFile.java | 2 +
.../java/org/apache/hadoop/io/TestText.java | 1 +
.../hadoop/io/TestVersionedWritable.java | 8 ++++
.../org/apache/hadoop/io/TestWritable.java | 3 ++
.../apache/hadoop/io/TestWritableName.java | 3 ++
.../hadoop/io/compress/TestCodecFactory.java | 13 +++++++
.../hadoop/io/file/tfile/NanoTimer.java | 1 +
.../io/file/tfile/TestTFileByteArrays.java | 1 -
...eNoneCodecsJClassComparatorByteArrays.java | 3 --
.../tfile/TestTFileSeqFileComparison.java | 16 ++++++++
.../hadoop/io/nativeio/TestNativeIO.java | 1 +
.../hadoop/io/retry/TestFailoverProxy.java | 1 +
.../io/retry/UnreliableImplementation.java | 7 +++-
.../hadoop/io/serializer/avro/Record.java | 2 +
.../avro/TestAvroSerialization.java | 4 ++
.../apache/hadoop/ipc/MiniRPCBenchmark.java | 2 +
.../java/org/apache/hadoop/ipc/TestIPC.java | 3 ++
.../java/org/apache/hadoop/ipc/TestRPC.java | 14 +++++++
.../hadoop/ipc/TestRPCCompatibility.java | 1 +
.../org/apache/hadoop/ipc/TestSaslRPC.java | 6 +++
.../hadoop/metrics/TestMetricsServlet.java | 1 +
.../metrics2/lib/TestMetricsAnnotations.java | 4 +-
.../metrics2/lib/TestMetricsRegistry.java | 3 ++
.../org/apache/hadoop/net/StaticMapping.java | 2 -
.../org/apache/hadoop/record/FromCpp.java | 2 +
.../org/apache/hadoop/record/RecordBench.java | 2 -
.../apache/hadoop/record/TestRecordIO.java | 2 +
.../hadoop/record/TestRecordVersioning.java | 2 +
.../java/org/apache/hadoop/record/ToCpp.java | 2 +
.../security/TestAuthenticationFilter.java | 2 +-
.../hadoop/security/TestCredentials.java | 3 --
.../security/TestDoAsEffectiveUser.java | 10 ++++-
.../hadoop/security/TestGroupsCaching.java | 3 ++
.../hadoop/security/TestJNIGroupsMapping.java | 5 ---
.../security/TestUserGroupInformation.java | 6 +++
.../authorize/TestAccessControlList.java | 3 --
.../hadoop/security/token/TestToken.java | 2 -
.../token/delegation/TestDelegationToken.java | 9 ++++-
.../apache/hadoop/test/GenericTestUtils.java | 1 +
.../apache/hadoop/test/MetricsAsserts.java | 1 -
.../hadoop/test/MultithreadedTestUtil.java | 2 +
.../org/apache/hadoop/util/JarFinder.java | 1 -
.../apache/hadoop/util/TestIndexedSort.java | 7 +++-
.../org/apache/hadoop/util/TestOptions.java | 1 -
.../apache/hadoop/util/TestPureJavaCrc32.java | 2 +-
.../hadoop/util/TestReflectionUtils.java | 3 +-
.../org/apache/hadoop/util/TestRunJar.java | 2 +
.../org/apache/hadoop/util/TestShell.java | 3 +-
.../fs/http/client/HttpFSFileSystem.java | 1 -
.../client/HttpFSKerberosAuthenticator.java | 3 --
.../HttpFSKerberosAuthenticationHandler.java | 3 --
.../http/server/HttpFSParametersProvider.java | 1 -
.../hadoop/lib/lang/RunnableCallable.java | 1 +
.../hadoop/FileSystemAccessService.java | 2 +
.../InstrumentationService.java | 5 +++
.../service/scheduler/SchedulerService.java | 1 +
.../hadoop/lib/servlet/ServerWebApp.java | 2 +
.../apache/hadoop/lib/wsrs/BooleanParam.java | 1 +
.../org/apache/hadoop/lib/wsrs/ByteParam.java | 1 +
.../org/apache/hadoop/lib/wsrs/EnumParam.java | 1 +
.../apache/hadoop/lib/wsrs/IntegerParam.java | 1 +
.../org/apache/hadoop/lib/wsrs/LongParam.java | 1 +
.../org/apache/hadoop/lib/wsrs/Param.java | 1 +
.../apache/hadoop/lib/wsrs/ShortParam.java | 1 +
.../apache/hadoop/lib/wsrs/StringParam.java | 2 +
.../TestHttpFSFileSystemLocalFileSystem.java | 3 ++
.../TestHttpFSWithHttpFSFileSystem.java | 4 ++
...stHttpFSKerberosAuthenticationHandler.java | 1 -
.../apache/hadoop/test/TestHFSTestCase.java | 3 ++
.../org/apache/hadoop/test/TestHTestCase.java | 3 ++
.../apache/hadoop/test/TestHdfsHelper.java | 1 +
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../main/java/org/apache/hadoop/fs/Hdfs.java | 3 --
.../apache/hadoop/hdfs/HsftpFileSystem.java | 1 -
.../hadoop/hdfs/RemoteBlockReader2.java | 4 --
.../token/block/BlockTokenIdentifier.java | 2 -
.../server/blockmanagement/BlockManager.java | 1 -
.../BlockPlacementPolicyWithNodeGroup.java | 9 -----
.../blockmanagement/DatanodeManager.java | 1 -
.../blockmanagement/HeartbeatManager.java | 1 -
.../PendingDataNodeMessages.java | 4 --
.../hadoop/hdfs/server/common/JspHelper.java | 3 --
.../hadoop/hdfs/server/common/Storage.java | 1 +
.../datanode/SecureDataNodeStarter.java | 2 -
.../server/namenode/EditLogInputStream.java | 3 --
.../hdfs/server/namenode/FSEditLogLoader.java | 2 -
.../hdfs/server/namenode/FSEditLogOp.java | 1 -
.../hdfs/server/namenode/FSImageFormat.java | 1 -
.../server/namenode/FSImageSerialization.java | 1 -
.../server/namenode/NameNodeHttpServer.java | 6 ---
.../namenode/RedundantEditLogInputStream.java | 6 ---
.../server/namenode/SerialNumberManager.java | 2 +-
.../StatisticsEditsVisitor.java | 1 -
.../ImageLoaderCurrent.java | 2 -
.../hadoop/hdfs/util/CyclicIteration.java | 3 --
.../hadoop/fi/DataTransferTestUtil.java | 1 -
.../apache/hadoop/hdfs/PipelinesTestUtil.java | 3 --
.../org/apache/hadoop/hdfs/TestDFSMkdirs.java | 3 --
.../namenode/TestGenericJournalConf.java | 1 -
.../TestSecondaryNameNodeUpgrade.java | 6 ---
385 files changed, 1291 insertions(+), 390 deletions(-)
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index 2783bf3b30..a6ce035fa9 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -97,6 +97,7 @@ public ExcludeHandler(Object target) {
this.target = target;
}
+ @Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
String methodName = method.getName();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index cf7aafafb7..f1cb41dd6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1847,6 +1847,7 @@ public void clear() {
*
* @return an iterator over the entries.
*/
+ @Override
public Iterator> iterator() {
// Get a copy of just the string to string pairs. After the old object
// methods that allow non-strings to be put into configurations are removed,
@@ -2272,6 +2273,7 @@ public void readFields(DataInput in) throws IOException {
}
//@Override
+ @Override
public void write(DataOutput out) throws IOException {
Properties props = getProps();
WritableUtils.writeVInt(out, props.size());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java
index 2bc7e537e4..f06af2b98d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java
@@ -39,11 +39,13 @@ public Configured(Configuration conf) {
}
// inherit javadoc
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
// inherit javadoc
+ @Override
public Configuration getConf() {
return conf;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index 041b263edd..452d29f7b7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -23,12 +23,10 @@
import org.apache.commons.lang.StringEscapeUtils;
import java.util.Collection;
-import java.util.Map;
import java.util.Enumeration;
import java.io.IOException;
import java.io.PrintWriter;
-import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
@@ -57,9 +55,6 @@ public class ReconfigurationServlet extends HttpServlet {
public static final String CONF_SERVLET_RECONFIGURABLE_PREFIX =
"conf.servlet.reconfigurable.";
- /**
- * {@inheritDoc}
- */
@Override
public void init() throws ServletException {
super.init();
@@ -202,9 +197,6 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
}
}
- /**
- * {@inheritDoc}
- */
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
@@ -219,9 +211,6 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
printFooter(out);
}
- /**
- * {@inheritDoc}
- */
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index d9eda44580..6adbeab60a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -47,7 +47,6 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java
index a319fb7b36..b4a4a85674 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java
@@ -45,22 +45,27 @@ public AvroFSInput(final FileContext fc, final Path p) throws IOException {
this.stream = fc.open(p);
}
+ @Override
public long length() {
return len;
}
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
+ @Override
public void seek(long p) throws IOException {
stream.seek(p);
}
+ @Override
public long tell() throws IOException {
return stream.getPos();
}
+ @Override
public void close() throws IOException {
stream.close();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index cfe9ee8c66..fa095343c5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -204,6 +204,7 @@ public void setTopologyPaths(String[] topologyPaths) throws IOException {
}
}
+ @Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
index f322924012..745148281d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
@@ -19,7 +19,6 @@
import java.io.BufferedInputStream;
import java.io.FileDescriptor;
-import java.io.FileInputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -50,10 +49,12 @@ public BufferedFSInputStream(FSInputStream in, int size) {
super(in, size);
}
+ @Override
public long getPos() throws IOException {
return ((FSInputStream)in).getPos()-(count-pos);
}
+ @Override
public long skip(long n) throws IOException {
if (n <= 0) {
return 0;
@@ -63,6 +64,7 @@ public long skip(long n) throws IOException {
return n;
}
+ @Override
public void seek(long pos) throws IOException {
if( pos<0 ) {
return;
@@ -82,20 +84,24 @@ public void seek(long pos) throws IOException {
((FSInputStream)in).seek(pos);
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
pos = 0;
count = 0;
return ((FSInputStream)in).seekToNewSource(targetPos);
}
+ @Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
return ((FSInputStream)in).read(position, buffer, offset, length) ;
}
+ @Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
((FSInputStream)in).readFully(position, buffer, offset, length);
}
+ @Override
public void readFully(long position, byte[] buffer) throws IOException {
((FSInputStream)in).readFully(position, buffer);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 17707718b8..42ee870268 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -53,6 +53,7 @@ public ChecksumFileSystem(FileSystem fs) {
super(fs);
}
+ @Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
@@ -64,6 +65,7 @@ public void setConf(Configuration conf) {
/**
* Set whether to verify checksum.
*/
+ @Override
public void setVerifyChecksum(boolean verifyChecksum) {
this.verifyChecksum = verifyChecksum;
}
@@ -74,6 +76,7 @@ public void setWriteChecksum(boolean writeChecksum) {
}
/** get the raw file system */
+ @Override
public FileSystem getRawFileSystem() {
return fs;
}
@@ -162,14 +165,17 @@ private long getChecksumFilePos( long dataPos ) {
return HEADER_LENGTH + 4*(dataPos/bytesPerSum);
}
+ @Override
protected long getChunkPosition( long dataPos ) {
return dataPos/bytesPerSum*bytesPerSum;
}
+ @Override
public int available() throws IOException {
return datas.available() + super.available();
}
+ @Override
public int read(long position, byte[] b, int off, int len)
throws IOException {
// parameter check
@@ -190,6 +196,7 @@ public int read(long position, byte[] b, int off, int len)
return nread;
}
+ @Override
public void close() throws IOException {
datas.close();
if( sums != null ) {
@@ -290,6 +297,7 @@ private long getFileLength() throws IOException {
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
+ @Override
public synchronized long skip(long n) throws IOException {
long curPos = getPos();
long fileLength = getFileLength();
@@ -311,6 +319,7 @@ public synchronized long skip(long n) throws IOException {
* ChecksumException if the chunk to seek to is corrupted
*/
+ @Override
public synchronized void seek(long pos) throws IOException {
if(pos>getFileLength()) {
throw new IOException("Cannot seek after EOF");
@@ -339,7 +348,7 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return new FSDataBoundedInputStream(fs, f, in);
}
- /** {@inheritDoc} */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
@@ -398,6 +407,7 @@ public ChecksumFSOutputSummer(ChecksumFileSystem fs,
sums.writeInt(bytesPerSum);
}
+ @Override
public void close() throws IOException {
flushBuffer();
sums.close();
@@ -412,7 +422,6 @@ protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
}
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -454,7 +463,6 @@ private FSDataOutputStream create(Path f, FsPermission permission,
return out;
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -472,6 +480,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
* @return true if successful;
* false if file does not exist or is a directory
*/
+ @Override
public boolean setReplication(Path src, short replication) throws IOException {
boolean value = fs.setReplication(src, replication);
if (!value)
@@ -487,6 +496,7 @@ public boolean setReplication(Path src, short replication) throws IOException {
/**
* Rename files/dirs
*/
+ @Override
public boolean rename(Path src, Path dst) throws IOException {
if (fs.isDirectory(src)) {
return fs.rename(src, dst);
@@ -516,6 +526,7 @@ public boolean rename(Path src, Path dst) throws IOException {
* Implement the delete(Path, boolean) in checksum
* file system.
*/
+ @Override
public boolean delete(Path f, boolean recursive) throws IOException{
FileStatus fstatus = null;
try {
@@ -538,6 +549,7 @@ public boolean delete(Path f, boolean recursive) throws IOException{
}
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(Path file) {
return !isChecksumFile(file);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 4784991982..12805d86a6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -32,7 +32,6 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.PureJavaCrc32;
-import org.apache.hadoop.util.StringUtils;
/**
* Abstract Checksumed Fs.
@@ -61,6 +60,7 @@ public ChecksumFs(AbstractFileSystem theFs)
/**
* Set whether to verify checksum.
*/
+ @Override
public void setVerifyChecksum(boolean inVerifyChecksum) {
this.verifyChecksum = inVerifyChecksum;
}
@@ -152,14 +152,17 @@ private long getChecksumFilePos(long dataPos) {
return HEADER_LENGTH + 4*(dataPos/bytesPerSum);
}
+ @Override
protected long getChunkPosition(long dataPos) {
return dataPos/bytesPerSum*bytesPerSum;
}
+ @Override
public int available() throws IOException {
return datas.available() + super.available();
}
+ @Override
public int read(long position, byte[] b, int off, int len)
throws IOException, UnresolvedLinkException {
// parameter check
@@ -180,6 +183,7 @@ public int read(long position, byte[] b, int off, int len)
return nread;
}
+ @Override
public void close() throws IOException {
datas.close();
if (sums != null) {
@@ -258,6 +262,7 @@ private long getFileLength() throws IOException, UnresolvedLinkException {
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
+ @Override
public synchronized long skip(long n) throws IOException {
final long curPos = getPos();
final long fileLength = getFileLength();
@@ -279,6 +284,7 @@ public synchronized long skip(long n) throws IOException {
* ChecksumException if the chunk to seek to is corrupted
*/
+ @Override
public synchronized void seek(long pos) throws IOException {
if (pos>getFileLength()) {
throw new IOException("Cannot seek after EOF");
@@ -348,6 +354,7 @@ public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file,
sums.writeInt(bytesPerSum);
}
+ @Override
public void close() throws IOException {
flushBuffer();
sums.close();
@@ -447,6 +454,7 @@ public void renameInternal(Path src, Path dst)
* Implement the delete(Path, boolean) in checksum
* file system.
*/
+ @Override
public boolean delete(Path f, boolean recursive)
throws IOException, UnresolvedLinkException {
FileStatus fstatus = null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index c0ab82de1d..0d685b43e1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -75,7 +75,7 @@ public ContentSummary(
/** Returns (disk) space quota */
public long getSpaceQuota() {return spaceQuota;}
- /** {@inheritDoc} */
+ @Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(length);
@@ -86,7 +86,7 @@ public void write(DataOutput out) throws IOException {
out.writeLong(spaceQuota);
}
- /** {@inheritDoc} */
+ @Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
this.length = in.readLong();
@@ -131,7 +131,7 @@ public static String getHeader(boolean qOption) {
return qOption ? QUOTA_HEADER : HEADER;
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
return toString(true);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
index 9949834222..c552f331f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
@@ -131,6 +131,7 @@ public String getMount() throws IOException {
return mount;
}
+ @Override
public String toString() {
return
"df -k " + mount +"\n" +
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
index 5caec7204d..2c96b0abaf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
@@ -76,6 +76,7 @@ public DU(File path, Configuration conf) throws IOException {
**/
class DURefreshThread implements Runnable {
+ @Override
public void run() {
while(shouldRun) {
@@ -169,16 +170,19 @@ public void shutdown() {
}
}
+ @Override
public String toString() {
return
"du -sk " + dirPath +"\n" +
used + "\t" + dirPath;
}
+ @Override
protected String[] getExecString() {
return new String[] {"du", "-sk", dirPath};
}
+ @Override
protected void parseExecResult(BufferedReader lines) throws IOException {
String line = lines.readLine();
if (line == null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index e47dffb082..eef53140c3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -44,6 +44,7 @@ public FSDataInputStream(InputStream in)
*
* @param desired offset to seek to
*/
+ @Override
public synchronized void seek(long desired) throws IOException {
((Seekable)in).seek(desired);
}
@@ -53,6 +54,7 @@ public synchronized void seek(long desired) throws IOException {
*
* @return current position in the input stream
*/
+ @Override
public long getPos() throws IOException {
return ((Seekable)in).getPos();
}
@@ -68,6 +70,7 @@ public long getPos() throws IOException {
* if there is no more data because the end of the stream has been
* reached
*/
+ @Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
return ((PositionedReadable)in).read(position, buffer, offset, length);
@@ -85,6 +88,7 @@ public int read(long position, byte[] buffer, int offset, int length)
* If an exception is thrown an undetermined number
* of bytes in the buffer may have been written.
*/
+ @Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
((PositionedReadable)in).readFully(position, buffer, offset, length);
@@ -93,6 +97,7 @@ public void readFully(long position, byte[] buffer, int offset, int length)
/**
* See {@link #readFully(long, byte[], int, int)}.
*/
+ @Override
public void readFully(long position, byte[] buffer)
throws IOException {
((PositionedReadable)in).readFully(position, buffer, 0, buffer.length);
@@ -104,6 +109,7 @@ public void readFully(long position, byte[] buffer)
* @param targetPos position to seek to
* @return true if a new source is found, false otherwise
*/
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
return ((Seekable)in).seekToNewSource(targetPos);
}
@@ -118,6 +124,7 @@ public InputStream getWrappedStream() {
return in;
}
+ @Override
public int read(ByteBuffer buf) throws IOException {
if (in instanceof ByteBufferReadable) {
return ((ByteBufferReadable)in).read(buf);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
index 9974f27e24..cc992e7c94 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
@@ -140,6 +140,7 @@ protected synchronized boolean needChecksum() {
* @exception IOException if an I/O error occurs.
*/
+ @Override
public synchronized int read() throws IOException {
if (pos >= count) {
fill();
@@ -180,6 +181,7 @@ public synchronized int read() throws IOException {
* @exception IOException if an I/O error occurs.
* ChecksumException if any checksum error occurs
*/
+ @Override
public synchronized int read(byte[] b, int off, int len) throws IOException {
// parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
@@ -367,6 +369,7 @@ public synchronized int available() throws IOException {
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
+ @Override
public synchronized long skip(long n) throws IOException {
if (n <= 0) {
return 0;
@@ -389,6 +392,7 @@ public synchronized long skip(long n) throws IOException {
* ChecksumException if the chunk to seek to is corrupted
*/
+ @Override
public synchronized void seek(long pos) throws IOException {
if( pos<0 ) {
return;
@@ -462,13 +466,16 @@ final protected synchronized void set(boolean verifyChecksum,
this.pos = 0;
}
+ @Override
final public boolean markSupported() {
return false;
}
+ @Override
final public void mark(int readlimit) {
}
+ @Override
final public void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
index f7bc22159d..8d668feeab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
@@ -36,19 +36,23 @@ public abstract class FSInputStream extends InputStream
* The next read() will be from that location. Can't
* seek past the end of the file.
*/
+ @Override
public abstract void seek(long pos) throws IOException;
/**
* Return the current offset from the start of the file
*/
+ @Override
public abstract long getPos() throws IOException;
/**
* Seeks a different copy of the data. Returns true if
* found a new source, false otherwise.
*/
+ @Override
public abstract boolean seekToNewSource(long targetPos) throws IOException;
+ @Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
synchronized (this) {
@@ -64,6 +68,7 @@ public int read(long position, byte[] buffer, int offset, int length)
}
}
+ @Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
int nread = 0;
@@ -76,6 +81,7 @@ public void readFully(long position, byte[] buffer, int offset, int length)
}
}
+ @Override
public void readFully(long position, byte[] buffer)
throws IOException {
readFully(position, buffer, 0, buffer.length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index 66b6a74916..d494f30de7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -55,6 +55,7 @@ protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksu
throws IOException;
/** Write one byte */
+ @Override
public synchronized void write(int b) throws IOException {
sum.update(b);
buf[count++] = (byte)b;
@@ -81,6 +82,7 @@ public synchronized void write(int b) throws IOException {
* @param len the number of bytes to write.
* @exception IOException if an I/O error occurs.
*/
+ @Override
public synchronized void write(byte b[], int off, int len)
throws IOException {
if (off < 0 || len < 0 || off > b.length - len) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
index 2b248bdcf2..149a3e3a4a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
@@ -37,6 +37,7 @@ public abstract class FileChecksum implements Writable {
public abstract byte[] getBytes();
/** Return true if both the algorithms and the values are the same. */
+ @Override
public boolean equals(Object other) {
if (other == this) {
return true;
@@ -50,7 +51,7 @@ public boolean equals(Object other) {
&& Arrays.equals(this.getBytes(), that.getBytes());
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
return getAlgorithmName().hashCode() ^ Arrays.hashCode(getBytes());
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 4e5057a4e9..5cfce9b019 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -190,6 +190,7 @@ public final class FileContext {
new FileContextFinalizer();
private static final PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(final Path file) {
return true;
}
@@ -318,6 +319,7 @@ private static AbstractFileSystem getAbstractFileSystem(
throws UnsupportedFileSystemException, IOException {
try {
return user.doAs(new PrivilegedExceptionAction() {
+ @Override
public AbstractFileSystem run() throws UnsupportedFileSystemException {
return AbstractFileSystem.get(uri, conf);
}
@@ -660,6 +662,7 @@ public FSDataOutputStream create(final Path f,
final CreateOpts[] updatedOpts =
CreateOpts.setOpt(CreateOpts.perms(permission), opts);
return new FSLinkResolver() {
+ @Override
public FSDataOutputStream next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.create(p, createFlag, updatedOpts);
@@ -703,6 +706,7 @@ public void mkdir(final Path dir, final FsPermission permission,
final FsPermission absFerms = (permission == null ?
FsPermission.getDefault() : permission).applyUMask(umask);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.mkdir(p, absFerms, createParent);
@@ -738,6 +742,7 @@ public boolean delete(final Path f, final boolean recursive)
UnsupportedFileSystemException, IOException {
Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return Boolean.valueOf(fs.delete(p, recursive));
@@ -766,6 +771,7 @@ public FSDataInputStream open(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FSDataInputStream next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.open(p);
@@ -796,6 +802,7 @@ public FSDataInputStream open(final Path f, final int bufferSize)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FSDataInputStream next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.open(p, bufferSize);
@@ -826,6 +833,7 @@ public boolean setReplication(final Path f, final short replication)
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return Boolean.valueOf(fs.setReplication(p, replication));
@@ -894,6 +902,7 @@ public void rename(final Path src, final Path dst,
*/
final Path source = resolveIntermediate(absSrc);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.rename(source, p, options);
@@ -925,6 +934,7 @@ public void setPermission(final Path f, final FsPermission permission)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setPermission(p, permission);
@@ -967,6 +977,7 @@ public void setOwner(final Path f, final String username,
}
final Path absF = fixRelativePart(f);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setOwner(p, username, groupname);
@@ -1002,6 +1013,7 @@ public void setTimes(final Path f, final long mtime, final long atime)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setTimes(p, mtime, atime);
@@ -1034,6 +1046,7 @@ public FileChecksum getFileChecksum(final Path f)
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileChecksum next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileChecksum(p);
@@ -1089,6 +1102,7 @@ public FileStatus getFileStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileStatus(p);
@@ -1135,6 +1149,7 @@ public FileStatus getFileLinkStatus(final Path f)
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
FileStatus fi = fs.getFileLinkStatus(p);
@@ -1165,6 +1180,7 @@ public Path getLinkTarget(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public Path next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
FileStatus fi = fs.getFileLinkStatus(p);
@@ -1208,6 +1224,7 @@ public BlockLocation[] getFileBlockLocations(final Path f, final long start,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public BlockLocation[] next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileBlockLocations(p, start, len);
@@ -1246,6 +1263,7 @@ public FsStatus getFsStatus(final Path f) throws AccessControlException,
}
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FsStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFsStatus(p);
@@ -1339,6 +1357,7 @@ public void createSymlink(final Path target, final Path link,
IOException {
final Path nonRelLink = fixRelativePart(link);
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.createSymlink(target, p, createParent);
@@ -1373,6 +1392,7 @@ public RemoteIterator listStatus(final Path f) throws
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver>() {
+ @Override
public RemoteIterator next(
final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
@@ -1432,6 +1452,7 @@ public RemoteIterator listLocatedStatus(
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver>() {
+ @Override
public RemoteIterator next(
final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
@@ -1703,6 +1724,7 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException,
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver() {
+ @Override
public FileStatus[] next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.listStatus(p);
@@ -2232,6 +2254,7 @@ private static boolean isSameFS(Path qualPath1, Path qualPath2) {
* Deletes all the paths in deleteOnExit on JVM shutdown.
*/
static class FileContextFinalizer implements Runnable {
+ @Override
public synchronized void run() {
processDeleteOnExit();
}
@@ -2244,6 +2267,7 @@ public synchronized void run() {
protected Path resolve(final Path f) throws FileNotFoundException,
UnresolvedLinkException, AccessControlException, IOException {
return new FSLinkResolver() {
+ @Override
public Path next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.resolvePath(p);
@@ -2259,6 +2283,7 @@ public Path next(final AbstractFileSystem fs, final Path p)
*/
protected Path resolveIntermediate(final Path f) throws IOException {
return new FSLinkResolver() {
+ @Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileLinkStatus(p);
@@ -2281,6 +2306,7 @@ Set resolveAbstractFileSystems(final Path f)
final HashSet result
= new HashSet();
new FSLinkResolver() {
+ @Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
result.add(fs);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 2757475faf..5445f6eb15 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -253,6 +253,7 @@ public void setSymlink(final Path p) {
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
+ @Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
out.writeLong(getLen());
@@ -270,6 +271,7 @@ public void write(DataOutput out) throws IOException {
}
}
+ @Override
public void readFields(DataInput in) throws IOException {
String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
this.path = new Path(strPath);
@@ -299,6 +301,7 @@ public void readFields(DataInput in) throws IOException {
* @throws ClassCastException if the specified object's is not of
* type FileStatus
*/
+ @Override
public int compareTo(Object o) {
FileStatus other = (FileStatus)o;
return this.getPath().compareTo(other.getPath());
@@ -308,6 +311,7 @@ public int compareTo(Object o) {
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
+ @Override
public boolean equals(Object o) {
if (o == null) {
return false;
@@ -328,6 +332,7 @@ public boolean equals(Object o) {
*
* @return a hash code value for the path name.
*/
+ @Override
public int hashCode() {
return getPath().hashCode();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 31b59439a9..ff9f2db1ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -147,6 +147,7 @@ public static FileSystem get(final URI uri, final Configuration conf,
UserGroupInformation ugi =
UserGroupInformation.getBestUGI(ticketCachePath, user);
return ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
public FileSystem run() throws IOException {
return get(uri, conf);
}
@@ -332,6 +333,7 @@ public static FileSystem newInstance(final URI uri, final Configuration conf,
UserGroupInformation ugi =
UserGroupInformation.getBestUGI(ticketCachePath, user);
return ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
public FileSystem run() throws IOException {
return newInstance(uri,conf);
}
@@ -1389,6 +1391,7 @@ public ContentSummary getContentSummary(Path f) throws IOException {
}
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(Path file) {
return true;
}
@@ -2056,6 +2059,7 @@ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
* No more filesystem operations are needed. Will
* release any held locks.
*/
+ @Override
public void close() throws IOException {
// delete all files that were marked as delete-on-exit.
processDeleteOnExit();
@@ -2393,6 +2397,7 @@ synchronized void closeAll(boolean onlyAutomatic) throws IOException {
}
private class ClientFinalizer implements Runnable {
+ @Override
public synchronized void run() {
try {
closeAll(true);
@@ -2447,7 +2452,7 @@ static class Key {
this.ugi = UserGroupInformation.getCurrentUser();
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
return (scheme + authority).hashCode() + ugi.hashCode() + (int)unique;
}
@@ -2456,7 +2461,7 @@ static boolean isEqual(Object a, Object b) {
return a == b || (a != null && a.equals(b));
}
- /** {@inheritDoc} */
+ @Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
@@ -2471,7 +2476,7 @@ && isEqual(this.ugi, that.ugi)
return false;
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
return "("+ugi.toString() + ")@" + scheme + "://" + authority;
}
@@ -2584,6 +2589,7 @@ public int getWriteOps() {
return writeOps.get();
}
+ @Override
public String toString() {
return bytesRead + " bytes read, " + bytesWritten + " bytes written, "
+ readOps + " read ops, " + largeReadOps + " large read ops, "
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index ba9bb4eafe..b6a2acae49 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -414,9 +414,11 @@ private static class CygPathCommand extends Shell {
String getResult() throws IOException {
return result;
}
+ @Override
protected String[] getExecString() {
return command;
}
+ @Override
protected void parseExecResult(BufferedReader lines) throws IOException {
String line = lines.readLine();
if (line == null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index c2ecd20b5a..6e1e099cb0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -76,6 +76,7 @@ public FileSystem getRawFileSystem() {
* for this FileSystem
* @param conf the configuration
*/
+ @Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
// this is less than ideal, but existing filesystems sometimes neglect
@@ -90,6 +91,7 @@ public void initialize(URI name, Configuration conf) throws IOException {
}
/** Returns a URI whose scheme and authority identify this FileSystem.*/
+ @Override
public URI getUri() {
return fs.getUri();
}
@@ -104,6 +106,7 @@ protected URI getCanonicalUri() {
}
/** Make sure that a path specifies a FileSystem. */
+ @Override
public Path makeQualified(Path path) {
Path fqPath = fs.makeQualified(path);
// swap in our scheme if the filtered fs is using a different scheme
@@ -125,10 +128,12 @@ public Path makeQualified(Path path) {
///////////////////////////////////////////////////////////////
/** Check that a Path belongs to this FileSystem. */
+ @Override
protected void checkPath(Path path) {
fs.checkPath(path);
}
+ @Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
long len) throws IOException {
return fs.getFileBlockLocations(file, start, len);
@@ -143,17 +148,17 @@ public Path resolvePath(final Path p) throws IOException {
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
+ @Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return fs.open(f, bufferSize);
}
- /** {@inheritDoc} */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return fs.append(f, bufferSize, progress);
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -171,6 +176,7 @@ public FSDataOutputStream create(Path f, FsPermission permission,
* @return true if successful;
* false if file does not exist or is a directory
*/
+ @Override
public boolean setReplication(Path src, short replication) throws IOException {
return fs.setReplication(src, replication);
}
@@ -179,23 +185,23 @@ public boolean setReplication(Path src, short replication) throws IOException {
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*/
+ @Override
public boolean rename(Path src, Path dst) throws IOException {
return fs.rename(src, dst);
}
/** Delete a file */
+ @Override
public boolean delete(Path f, boolean recursive) throws IOException {
return fs.delete(f, recursive);
}
/** List files in a directory. */
+ @Override
public FileStatus[] listStatus(Path f) throws IOException {
return fs.listStatus(f);
}
- /**
- * {@inheritDoc}
- */
@Override
public RemoteIterator listCorruptFileBlocks(Path path)
throws IOException {
@@ -203,11 +209,13 @@ public RemoteIterator listCorruptFileBlocks(Path path)
}
/** List files and its block locations in a directory. */
+ @Override
public RemoteIterator listLocatedStatus(Path f)
throws IOException {
return fs.listLocatedStatus(f);
}
+ @Override
public Path getHomeDirectory() {
return fs.getHomeDirectory();
}
@@ -219,6 +227,7 @@ public Path getHomeDirectory() {
*
* @param newDir
*/
+ @Override
public void setWorkingDirectory(Path newDir) {
fs.setWorkingDirectory(newDir);
}
@@ -228,21 +237,21 @@ public void setWorkingDirectory(Path newDir) {
*
* @return the directory pathname
*/
+ @Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
}
+ @Override
protected Path getInitialWorkingDirectory() {
return fs.getInitialWorkingDirectory();
}
- /** {@inheritDoc} */
@Override
public FsStatus getStatus(Path p) throws IOException {
return fs.getStatus(p);
}
- /** {@inheritDoc} */
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return fs.mkdirs(f, permission);
@@ -254,6 +263,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
* the given dst name.
* delSrc indicates if the source should be removed
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, src, dst);
@@ -264,6 +274,7 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
* the given dst name.
* delSrc indicates if the source should be removed
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path[] srcs, Path dst)
throws IOException {
@@ -275,6 +286,7 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite,
* the given dst name.
* delSrc indicates if the source should be removed
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path src, Path dst)
throws IOException {
@@ -286,6 +298,7 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite,
* Copy it from FS control to the local dst name.
* delSrc indicates if the src will be removed or not.
*/
+ @Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyToLocalFile(delSrc, src, dst);
@@ -297,6 +310,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst)
* file. If the FS is local, we write directly into the target. If
* the FS is remote, we write into the tmp local area.
*/
+ @Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fs.startLocalOutput(fsOutputFile, tmpLocalFile);
@@ -308,12 +322,14 @@ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
* FS will copy the contents of tmpLocalFile to the correct target at
* fsOutputFile.
*/
+ @Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
}
/** Return the total size of all files in the filesystem.*/
+ @Override
public long getUsed() throws IOException{
return fs.getUsed();
}
@@ -357,16 +373,17 @@ public FsServerDefaults getServerDefaults(Path f) throws IOException {
/**
* Get file status.
*/
+ @Override
public FileStatus getFileStatus(Path f) throws IOException {
return fs.getFileStatus(f);
}
- /** {@inheritDoc} */
+ @Override
public FileChecksum getFileChecksum(Path f) throws IOException {
return fs.getFileChecksum(f);
}
- /** {@inheritDoc} */
+ @Override
public void setVerifyChecksum(boolean verifyChecksum) {
fs.setVerifyChecksum(verifyChecksum);
}
@@ -387,21 +404,18 @@ public void close() throws IOException {
fs.close();
}
- /** {@inheritDoc} */
@Override
public void setOwner(Path p, String username, String groupname
) throws IOException {
fs.setOwner(p, username, groupname);
}
- /** {@inheritDoc} */
@Override
public void setTimes(Path p, long mtime, long atime
) throws IOException {
fs.setTimes(p, mtime, atime);
}
- /** {@inheritDoc} */
@Override
public void setPermission(Path p, FsPermission permission
) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 6cfc11b1fa..9637b6b913 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -174,9 +174,6 @@ public FileStatus[] listStatus(Path f)
return myFs.listStatus(f);
}
- /**
- * {@inheritDoc}
- */
@Override
public RemoteIterator listCorruptFileBlocks(Path path)
throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
index 637697b83d..c1b9071bbc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
@@ -39,6 +39,7 @@ public class FsServerDefaults implements Writable {
static { // register a ctor
WritableFactories.setFactory(FsServerDefaults.class, new WritableFactory() {
+ @Override
public Writable newInstance() {
return new FsServerDefaults();
}
@@ -106,6 +107,7 @@ public DataChecksum.Type getChecksumType() {
// /////////////////////////////////////////
// Writable
// /////////////////////////////////////////
+ @Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(blockSize);
@@ -116,6 +118,7 @@ public void write(DataOutput out) throws IOException {
WritableUtils.writeEnum(out, checksumType);
}
+ @Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
blockSize = in.readLong();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 4da32789e5..0db1f9e431 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -236,6 +236,7 @@ private void printInstanceHelp(PrintStream out, Command instance) {
/**
* run
*/
+ @Override
public int run(String argv[]) throws Exception {
// initialize FsShell
init();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
index 8b9de78fe0..d392c7d765 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
@@ -60,12 +60,14 @@ public long getRemaining() {
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
+ @Override
public void write(DataOutput out) throws IOException {
out.writeLong(capacity);
out.writeLong(used);
out.writeLong(remaining);
}
+ @Override
public void readFields(DataInput in) throws IOException {
capacity = in.readLong();
used = in.readLong();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
index 65c608ddec..90e75b0ccb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
@@ -53,7 +53,6 @@ public void connect() throws IOException {
}
}
- /* @inheritDoc */
@Override
public InputStream getInputStream() throws IOException {
if (is == null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
index b9a5f1a2cc..2a9208ea5b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
@@ -59,6 +59,7 @@ public FsUrlStreamHandlerFactory(Configuration conf) {
this.handler = new FsUrlStreamHandler(this.conf);
}
+ @Override
public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
if (!protocols.containsKey(protocol)) {
boolean known = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
index 5afa9e911d..24bff5f9cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
@@ -31,6 +31,7 @@
@InterfaceStability.Evolving
public class GlobFilter implements PathFilter {
private final static PathFilter DEFAULT_FILTER = new PathFilter() {
+ @Override
public boolean accept(Path file) {
return true;
}
@@ -75,6 +76,7 @@ boolean hasPattern() {
return pattern.hasWildcard();
}
+ @Override
public boolean accept(Path path) {
return pattern.matches(path.getName()) && userFilter.accept(path);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index 8e03fc35a9..9504e1fda6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -106,6 +106,7 @@ public HarFileSystem(FileSystem fs) {
* har:///archivepath. This assumes the underlying filesystem
* to be used in case not specified.
*/
+ @Override
public void initialize(URI name, Configuration conf) throws IOException {
// decode the name
URI underLyingURI = decodeHarURI(name, conf);
@@ -247,6 +248,7 @@ private String decodeFileName(String fname)
/**
* return the top level archive.
*/
+ @Override
public Path getWorkingDirectory() {
return new Path(uri.toString());
}
@@ -636,6 +638,7 @@ private HarStatus getFileHarStatus(Path f) throws IOException {
/**
* @return null since no checksum algorithm is implemented.
*/
+ @Override
public FileChecksum getFileChecksum(Path f) {
return null;
}
@@ -668,6 +671,7 @@ public FSDataOutputStream create(Path f, int bufferSize)
throw new IOException("Har: Create not allowed");
}
+ @Override
public FSDataOutputStream create(Path f,
FsPermission permission,
boolean overwrite,
@@ -735,10 +739,12 @@ public FileStatus[] listStatus(Path f) throws IOException {
/**
* return the top level archive path.
*/
+ @Override
public Path getHomeDirectory() {
return new Path(uri.toString());
}
+ @Override
public void setWorkingDirectory(Path newDir) {
//does nothing.
}
@@ -746,6 +752,7 @@ public void setWorkingDirectory(Path newDir) {
/**
* not implemented.
*/
+ @Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
throw new IOException("Har: mkdirs not allowed");
}
@@ -753,6 +760,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
/**
* not implemented.
*/
+ @Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws
IOException {
throw new IOException("Har: copyfromlocalfile not allowed");
@@ -761,6 +769,7 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws
/**
* copies the file in the har filesystem to a local file.
*/
+ @Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
FileUtil.copy(this, src, getLocal(getConf()), dst, false, getConf());
@@ -769,6 +778,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst)
/**
* not implemented.
*/
+ @Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
throw new IOException("Har: startLocalOutput not allowed");
@@ -777,6 +787,7 @@ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
/**
* not implemented.
*/
+ @Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
throw new IOException("Har: completeLocalOutput not allowed");
@@ -785,6 +796,7 @@ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
/**
* not implemented.
*/
+ @Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
throw new IOException("Har: setowner not allowed");
@@ -793,6 +805,7 @@ public void setOwner(Path p, String username, String groupname)
/**
* Not implemented.
*/
+ @Override
public void setPermission(Path p, FsPermission permisssion)
throws IOException {
throw new IOException("Har: setPermission not allowed");
@@ -825,6 +838,7 @@ private static class HarFsInputStream extends FSInputStream {
this.end = start + length;
}
+ @Override
public synchronized int available() throws IOException {
long remaining = end - underLyingStream.getPos();
if (remaining > (long)Integer.MAX_VALUE) {
@@ -833,6 +847,7 @@ public synchronized int available() throws IOException {
return (int) remaining;
}
+ @Override
public synchronized void close() throws IOException {
underLyingStream.close();
super.close();
@@ -847,15 +862,18 @@ public void mark(int readLimit) {
/**
* reset is not implemented
*/
+ @Override
public void reset() throws IOException {
throw new IOException("reset not implemented.");
}
+ @Override
public synchronized int read() throws IOException {
int ret = read(oneBytebuff, 0, 1);
return (ret <= 0) ? -1: (oneBytebuff[0] & 0xff);
}
+ @Override
public synchronized int read(byte[] b) throws IOException {
int ret = read(b, 0, b.length);
if (ret != -1) {
@@ -867,6 +885,7 @@ public synchronized int read(byte[] b) throws IOException {
/**
*
*/
+ @Override
public synchronized int read(byte[] b, int offset, int len)
throws IOException {
int newlen = len;
@@ -882,6 +901,7 @@ public synchronized int read(byte[] b, int offset, int len)
return ret;
}
+ @Override
public synchronized long skip(long n) throws IOException {
long tmpN = n;
if (tmpN > 0) {
@@ -895,10 +915,12 @@ public synchronized long skip(long n) throws IOException {
return (tmpN < 0)? -1 : 0;
}
+ @Override
public synchronized long getPos() throws IOException {
return (position - start);
}
+ @Override
public synchronized void seek(long pos) throws IOException {
if (pos < 0 || (start + pos > end)) {
throw new IOException("Failed to seek: EOF");
@@ -907,6 +929,7 @@ public synchronized void seek(long pos) throws IOException {
underLyingStream.seek(position);
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
//do not need to implement this
// hdfs in itself does seektonewsource
@@ -917,6 +940,7 @@ public boolean seekToNewSource(long targetPos) throws IOException {
/**
* implementing position readable.
*/
+ @Override
public int read(long pos, byte[] b, int offset, int length)
throws IOException {
int nlength = length;
@@ -929,6 +953,7 @@ public int read(long pos, byte[] b, int offset, int length)
/**
* position readable again.
*/
+ @Override
public void readFully(long pos, byte[] b, int offset, int length)
throws IOException {
if (start + length + pos > end) {
@@ -937,6 +962,7 @@ public void readFully(long pos, byte[] b, int offset, int length)
underLyingStream.readFully(pos + start, b, offset, length);
}
+ @Override
public void readFully(long pos, byte[] b) throws IOException {
readFully(pos, b, 0, b.length);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
index 394c01f705..7db348c557 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
@@ -91,6 +91,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst)
* Moves files to a bad file directory on the same device, so that their
* storage will not be reused.
*/
+ @Override
public boolean reportChecksumFailure(Path p, FSDataInputStream in,
long inPos,
FSDataInputStream sums, long sumsPos) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index b0779ed82f..01368944a4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -94,6 +94,7 @@ public BlockLocation[] getBlockLocations() {
* @throws ClassCastException if the specified object's is not of
* type FileStatus
*/
+ @Override
public int compareTo(Object o) {
return super.compareTo(o);
}
@@ -102,6 +103,7 @@ public int compareTo(Object o) {
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
+ @Override
public boolean equals(Object o) {
return super.equals(o);
}
@@ -112,6 +114,7 @@ public boolean equals(Object o) {
*
* @return a hash code value for the path name.
*/
+ @Override
public int hashCode() {
return super.hashCode();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
index 1c697b7f52..5bddb96f0c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
@@ -57,7 +57,7 @@ public MD5MD5CRC32FileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) {
this.md5 = md5;
}
- /** {@inheritDoc} */
+ @Override
public String getAlgorithmName() {
return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC +
getCrcType().name();
@@ -73,11 +73,11 @@ public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm)
throw new IOException("Unknown checksum type in " + algorithm);
}
-
- /** {@inheritDoc} */
+
+ @Override
public int getLength() {return LENGTH;}
-
- /** {@inheritDoc} */
+
+ @Override
public byte[] getBytes() {
return WritableUtils.toByteArray(this);
}
@@ -92,14 +92,14 @@ public ChecksumOpt getChecksumOpt() {
return new ChecksumOpt(getCrcType(), bytesPerCRC);
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
bytesPerCRC = in.readInt();
crcPerBlock = in.readLong();
md5 = MD5Hash.read(in);
}
-
- /** {@inheritDoc} */
+
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(bytesPerCRC);
out.writeLong(crcPerBlock);
@@ -161,8 +161,8 @@ public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs
+ ", md5=" + md5, e);
}
}
-
- /** {@inheritDoc} */
+
+ @Override
public String toString() {
return getAlgorithmName() + ":" + md5;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index 173e16ea41..8464e51270 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -22,7 +22,6 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.HadoopIllegalArgumentException;
/**
* This class contains options related to file system operations.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 74c85af48b..c0ebebfe67 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -261,6 +261,7 @@ public Path suffix(String suffix) {
return new Path(getParent(), getName()+suffix);
}
+ @Override
public String toString() {
// we can't use uri.toString(), which escapes everything, because we want
// illegal characters unescaped in the string, for glob processing, etc.
@@ -289,6 +290,7 @@ public String toString() {
return buffer.toString();
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof Path)) {
return false;
@@ -297,10 +299,12 @@ public boolean equals(Object o) {
return this.uri.equals(that.uri);
}
+ @Override
public int hashCode() {
return uri.hashCode();
}
+ @Override
public int compareTo(Object o) {
Path that = (Path)o;
return this.uri.compareTo(that.uri);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 38e991480a..b33b1a778f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -72,8 +72,10 @@ public File pathToFile(Path path) {
return new File(path.toUri().getPath());
}
+ @Override
public URI getUri() { return NAME; }
+ @Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
setConf(conf);
@@ -84,6 +86,7 @@ public TrackingFileInputStream(File f) throws IOException {
super(f);
}
+ @Override
public int read() throws IOException {
int result = super.read();
if (result != -1) {
@@ -92,6 +95,7 @@ public int read() throws IOException {
return result;
}
+ @Override
public int read(byte[] data) throws IOException {
int result = super.read(data);
if (result != -1) {
@@ -100,6 +104,7 @@ public int read(byte[] data) throws IOException {
return result;
}
+ @Override
public int read(byte[] data, int offset, int length) throws IOException {
int result = super.read(data, offset, length);
if (result != -1) {
@@ -120,15 +125,18 @@ public LocalFSFileInputStream(Path f) throws IOException {
this.fis = new TrackingFileInputStream(pathToFile(f));
}
+ @Override
public void seek(long pos) throws IOException {
fis.getChannel().position(pos);
this.position = pos;
}
+ @Override
public long getPos() throws IOException {
return this.position;
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
@@ -136,11 +144,14 @@ public boolean seekToNewSource(long targetPos) throws IOException {
/*
* Just forward to the fis
*/
+ @Override
public int available() throws IOException { return fis.available(); }
+ @Override
public void close() throws IOException { fis.close(); }
@Override
public boolean markSupported() { return false; }
+ @Override
public int read() throws IOException {
try {
int value = fis.read();
@@ -153,6 +164,7 @@ public int read() throws IOException {
}
}
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
try {
int value = fis.read(b, off, len);
@@ -165,6 +177,7 @@ public int read(byte[] b, int off, int len) throws IOException {
}
}
+ @Override
public int read(long position, byte[] b, int off, int len)
throws IOException {
ByteBuffer bb = ByteBuffer.wrap(b, off, len);
@@ -175,6 +188,7 @@ public int read(long position, byte[] b, int off, int len)
}
}
+ @Override
public long skip(long n) throws IOException {
long value = fis.skip(n);
if (value > 0) {
@@ -189,6 +203,7 @@ public FileDescriptor getFileDescriptor() throws IOException {
}
}
+ @Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException(f.toString());
@@ -210,8 +225,11 @@ private LocalFSFileOutputStream(Path f, boolean append) throws IOException {
/*
* Just forward to the fos
*/
+ @Override
public void close() throws IOException { fos.close(); }
+ @Override
public void flush() throws IOException { fos.flush(); }
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
try {
fos.write(b, off, len);
@@ -220,6 +238,7 @@ public void write(byte[] b, int off, int len) throws IOException {
}
}
+ @Override
public void write(int b) throws IOException {
try {
fos.write(b);
@@ -229,7 +248,7 @@ public void write(int b) throws IOException {
}
}
- /** {@inheritDoc} */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
if (!exists(f)) {
@@ -242,7 +261,6 @@ public FSDataOutputStream append(Path f, int bufferSize,
new LocalFSFileOutputStream(f, true), bufferSize), statistics);
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress)
@@ -264,7 +282,6 @@ private FSDataOutputStream create(Path f, boolean overwrite,
new LocalFSFileOutputStream(f, false), bufferSize), statistics);
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
@@ -276,7 +293,6 @@ public FSDataOutputStream create(Path f, FsPermission permission,
return out;
}
- /** {@inheritDoc} */
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite,
@@ -288,6 +304,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
return out;
}
+ @Override
public boolean rename(Path src, Path dst) throws IOException {
if (pathToFile(src).renameTo(pathToFile(dst))) {
return true;
@@ -302,6 +319,7 @@ public boolean rename(Path src, Path dst) throws IOException {
* @return true if the file or directory and all its contents were deleted
* @throws IOException if p is non-empty and recursive is false
*/
+ @Override
public boolean delete(Path p, boolean recursive) throws IOException {
File f = pathToFile(p);
if (f.isFile()) {
@@ -319,6 +337,7 @@ public boolean delete(Path p, boolean recursive) throws IOException {
* (Note: Returned list is not sorted in any given order,
* due to reliance on Java's {@link File#list()} API.)
*/
+ @Override
public FileStatus[] listStatus(Path f) throws IOException {
File localf = pathToFile(f);
FileStatus[] results;
@@ -356,6 +375,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
* Creates the specified directory hierarchy. Does not
* treat existence as an error.
*/
+ @Override
public boolean mkdirs(Path f) throws IOException {
if(f == null) {
throw new IllegalArgumentException("mkdirs path arg is null");
@@ -373,7 +393,6 @@ public boolean mkdirs(Path f) throws IOException {
(p2f.mkdir() || p2f.isDirectory());
}
- /** {@inheritDoc} */
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
boolean b = mkdirs(f);
@@ -418,7 +437,6 @@ protected Path getInitialWorkingDirectory() {
return this.makeQualified(new Path(System.getProperty("user.dir")));
}
- /** {@inheritDoc} */
@Override
public FsStatus getStatus(Path p) throws IOException {
File partition = pathToFile(p == null ? new Path("/") : p);
@@ -430,29 +448,35 @@ public FsStatus getStatus(Path p) throws IOException {
}
// In the case of the local filesystem, we can just rename the file.
+ @Override
public void moveFromLocalFile(Path src, Path dst) throws IOException {
rename(src, dst);
}
// We can write output directly to the final location
+ @Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fsOutputFile;
}
// It's in the right place - nothing to do.
+ @Override
public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile)
throws IOException {
}
+ @Override
public void close() throws IOException {
super.close();
}
+ @Override
public String toString() {
return "LocalFS";
}
+ @Override
public FileStatus getFileStatus(Path f) throws IOException {
File path = pathToFile(f);
if (path.exists()) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 07870df1a6..1820c6619e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -263,6 +263,7 @@ private class Emptier implements Runnable {
}
}
+ @Override
public void run() {
if (emptierInterval == 0)
return; // trash disabled
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 1c19ce27fb..99ca4fbb80 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -262,6 +262,7 @@ public void close() throws IOException {
}
/** This optional operation is not yet supported. */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
index d3ac019a94..beea508d5d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
@@ -51,19 +51,23 @@ public FTPInputStream(InputStream stream, FTPClient client,
this.closed = false;
}
+ @Override
public long getPos() throws IOException {
return pos;
}
// We don't support seek.
+ @Override
public void seek(long pos) throws IOException {
throw new IOException("Seek not supported");
}
+ @Override
public boolean seekToNewSource(long targetPos) throws IOException {
throw new IOException("Seek not supported");
}
+ @Override
public synchronized int read() throws IOException {
if (closed) {
throw new IOException("Stream closed");
@@ -79,6 +83,7 @@ public synchronized int read() throws IOException {
return byteRead;
}
+ @Override
public synchronized int read(byte buf[], int off, int len) throws IOException {
if (closed) {
throw new IOException("Stream closed");
@@ -95,6 +100,7 @@ public synchronized int read(byte buf[], int off, int len) throws IOException {
return result;
}
+ @Override
public synchronized void close() throws IOException {
if (closed) {
throw new IOException("Stream closed");
@@ -116,14 +122,17 @@ public synchronized void close() throws IOException {
// Not supported.
+ @Override
public boolean markSupported() {
return false;
}
+ @Override
public void mark(int readLimit) {
// Do nothing
}
+ @Override
public void reset() throws IOException {
throw new IOException("Mark not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
index 88b28ed434..0d77a78c87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
@@ -50,22 +50,27 @@ public KFSImpl(String metaServerHost, int metaServerPort,
statistics = stats;
}
+ @Override
public boolean exists(String path) throws IOException {
return kfsAccess.kfs_exists(path);
}
+ @Override
public boolean isDirectory(String path) throws IOException {
return kfsAccess.kfs_isDirectory(path);
}
+ @Override
public boolean isFile(String path) throws IOException {
return kfsAccess.kfs_isFile(path);
}
+ @Override
public String[] readdir(String path) throws IOException {
return kfsAccess.kfs_readdir(path);
}
+ @Override
public FileStatus[] readdirplus(Path path) throws IOException {
String srep = path.toUri().getPath();
KfsFileAttr[] fattr = kfsAccess.kfs_readdirplus(srep);
@@ -100,52 +105,64 @@ public FileStatus[] readdirplus(Path path) throws IOException {
}
+ @Override
public int mkdirs(String path) throws IOException {
return kfsAccess.kfs_mkdirs(path);
}
+ @Override
public int rename(String source, String dest) throws IOException {
return kfsAccess.kfs_rename(source, dest);
}
+ @Override
public int rmdir(String path) throws IOException {
return kfsAccess.kfs_rmdir(path);
}
+ @Override
public int remove(String path) throws IOException {
return kfsAccess.kfs_remove(path);
}
+ @Override
public long filesize(String path) throws IOException {
return kfsAccess.kfs_filesize(path);
}
+ @Override
public short getReplication(String path) throws IOException {
return kfsAccess.kfs_getReplication(path);
}
+ @Override
public short setReplication(String path, short replication) throws IOException {
return kfsAccess.kfs_setReplication(path, replication);
}
+ @Override
public String[][] getDataLocation(String path, long start, long len) throws IOException {
return kfsAccess.kfs_getDataLocation(path, start, len);
}
+ @Override
public long getModificationTime(String path) throws IOException {
return kfsAccess.kfs_getModificationTime(path);
}
+ @Override
public FSDataInputStream open(String path, int bufferSize) throws IOException {
return new FSDataInputStream(new KFSInputStream(kfsAccess, path,
statistics));
}
+ @Override
public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, replication, false, progress),
statistics);
}
+ @Override
public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
// when opening for append, # of replicas is ignored
return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, (short) 1, true, progress),
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
index 04c937b848..492230f064 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
@@ -53,6 +53,7 @@ public KFSInputStream(KfsAccess kfsAccess, String path,
this.fsize = 0;
}
+ @Override
public long getPos() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -60,6 +61,7 @@ public long getPos() throws IOException {
return kfsChannel.tell();
}
+ @Override
public synchronized int available() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -67,6 +69,7 @@ public synchronized int available() throws IOException {
return (int) (this.fsize - getPos());
}
+ @Override
public synchronized void seek(long targetPos) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -74,10 +77,12 @@ public synchronized void seek(long targetPos) throws IOException {
kfsChannel.seek(targetPos);
}
+ @Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
+ @Override
public synchronized int read() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -93,6 +98,7 @@ public synchronized int read() throws IOException {
return -1;
}
+ @Override
public synchronized int read(byte b[], int off, int len) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -109,6 +115,7 @@ public synchronized int read(byte b[], int off, int len) throws IOException {
return res;
}
+ @Override
public synchronized void close() throws IOException {
if (kfsChannel == null) {
return;
@@ -118,14 +125,17 @@ public synchronized void close() throws IOException {
kfsChannel = null;
}
+ @Override
public boolean markSupported() {
return false;
}
+ @Override
public void mark(int readLimit) {
// Do nothing
}
+ @Override
public void reset() throws IOException {
throw new IOException("Mark not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
index 59cea357e6..a50f750733 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
@@ -20,15 +20,10 @@
package org.apache.hadoop.fs.kfs;
import java.io.*;
-import java.net.*;
-import java.util.*;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.util.Progressable;
import org.kosmix.kosmosfs.access.KfsAccess;
@@ -60,6 +55,7 @@ public long getPos() throws IOException {
return kfsChannel.tell();
}
+ @Override
public void write(int v) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -70,6 +66,7 @@ public void write(int v) throws IOException {
write(b, 0, 1);
}
+ @Override
public void write(byte b[], int off, int len) throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -80,6 +77,7 @@ public void write(byte b[], int off, int len) throws IOException {
kfsChannel.write(ByteBuffer.wrap(b, off, len));
}
+ @Override
public void flush() throws IOException {
if (kfsChannel == null) {
throw new IOException("File closed");
@@ -89,6 +87,7 @@ public void flush() throws IOException {
kfsChannel.sync();
}
+ @Override
public synchronized void close() throws IOException {
if (kfsChannel == null) {
return;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index af3d5148d5..972a410b53 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -40,6 +40,7 @@ public class FsPermission implements Writable {
private static final Log LOG = LogFactory.getLog(FsPermission.class);
static final WritableFactory FACTORY = new WritableFactory() {
+ @Override
public Writable newInstance() { return new FsPermission(); }
};
static { // register a ctor
@@ -124,12 +125,12 @@ public void fromShort(short n) {
set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7], (((n >>> 9) & 1) == 1) );
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
out.writeShort(toShort());
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
fromShort(in.readShort());
}
@@ -155,7 +156,7 @@ public short toShort() {
return (short)s;
}
- /** {@inheritDoc} */
+ @Override
public boolean equals(Object obj) {
if (obj instanceof FsPermission) {
FsPermission that = (FsPermission)obj;
@@ -167,10 +168,10 @@ public boolean equals(Object obj) {
return false;
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {return toShort();}
- /** {@inheritDoc} */
+ @Override
public String toString() {
String str = useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL;
if(stickyBit) {
@@ -300,9 +301,11 @@ private static class ImmutableFsPermission extends FsPermission {
public ImmutableFsPermission(short permission) {
super(permission);
}
+ @Override
public FsPermission applyUMask(FsPermission umask) {
throw new UnsupportedOperationException();
}
+ @Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
index f47226f1e2..bc9e392a87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
@@ -32,6 +32,7 @@
@InterfaceStability.Unstable
public class PermissionStatus implements Writable {
static final WritableFactory FACTORY = new WritableFactory() {
+ @Override
public Writable newInstance() { return new PermissionStatus(); }
};
static { // register a ctor
@@ -42,9 +43,11 @@ public class PermissionStatus implements Writable {
public static PermissionStatus createImmutable(
String user, String group, FsPermission permission) {
return new PermissionStatus(user, group, permission) {
+ @Override
public PermissionStatus applyUMask(FsPermission umask) {
throw new UnsupportedOperationException();
}
+ @Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
@@ -82,14 +85,14 @@ public PermissionStatus applyUMask(FsPermission umask) {
return this;
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
username = Text.readString(in, Text.DEFAULT_MAX_LEN);
groupname = Text.readString(in, Text.DEFAULT_MAX_LEN);
permission = FsPermission.read(in);
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
write(out, username, groupname, permission);
}
@@ -115,7 +118,7 @@ public static void write(DataOutput out,
permission.write(out);
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
return username + ":" + groupname + ":" + permission;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
index 6667d62189..4adc306633 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
@@ -83,6 +83,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
private static final Log LOG =
LogFactory.getLog(Jets3tFileSystemStore.class.getName());
+ @Override
public void initialize(URI uri, Configuration conf) throws IOException {
this.conf = conf;
@@ -108,6 +109,7 @@ public void initialize(URI uri, Configuration conf) throws IOException {
);
}
+ @Override
public String getVersion() throws IOException {
return FILE_SYSTEM_VERSION_VALUE;
}
@@ -123,14 +125,17 @@ private void delete(String key) throws IOException {
}
}
+ @Override
public void deleteINode(Path path) throws IOException {
delete(pathToKey(path));
}
+ @Override
public void deleteBlock(Block block) throws IOException {
delete(blockToKey(block));
}
+ @Override
public boolean inodeExists(Path path) throws IOException {
InputStream in = get(pathToKey(path), true);
if (in == null) {
@@ -140,6 +145,7 @@ public boolean inodeExists(Path path) throws IOException {
return true;
}
+ @Override
public boolean blockExists(long blockId) throws IOException {
InputStream in = get(blockToKey(blockId), false);
if (in == null) {
@@ -203,10 +209,12 @@ private void checkMetadata(S3Object object) throws S3FileSystemException,
}
}
+ @Override
public INode retrieveINode(Path path) throws IOException {
return INode.deserialize(get(pathToKey(path), true));
}
+ @Override
public File retrieveBlock(Block block, long byteRangeStart)
throws IOException {
File fileBlock = null;
@@ -249,6 +257,7 @@ private File newBackupFile() throws IOException {
return result;
}
+ @Override
public Set listSubPaths(Path path) throws IOException {
try {
String prefix = pathToKey(path);
@@ -270,6 +279,7 @@ public Set listSubPaths(Path path) throws IOException {
}
}
+ @Override
public Set listDeepSubPaths(Path path) throws IOException {
try {
String prefix = pathToKey(path);
@@ -311,10 +321,12 @@ private void put(String key, InputStream in, long length, boolean storeMetadata)
}
}
+ @Override
public void storeINode(Path path, INode inode) throws IOException {
put(pathToKey(path), inode.serialize(), inode.getSerializedLength(), true);
}
+ @Override
public void storeBlock(Block block, File file) throws IOException {
BufferedInputStream in = null;
try {
@@ -354,6 +366,7 @@ private String blockToKey(Block block) {
return blockToKey(block.getId());
}
+ @Override
public void purge() throws IOException {
try {
S3Object[] objects = s3Service.listObjects(bucket);
@@ -368,6 +381,7 @@ public void purge() throws IOException {
}
}
+ @Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder("S3 Filesystem, ");
sb.append(bucket.getName()).append("\n");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
index f82755781e..416bfb17c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
@@ -61,6 +61,7 @@ public static void main(String[] args) throws Exception {
System.exit(res);
}
+ @Override
public int run(String[] args) throws Exception {
if (args.length == 0) {
@@ -195,6 +196,7 @@ interface Store {
class UnversionedStore implements Store {
+ @Override
public Set listAllPaths() throws IOException {
try {
String prefix = urlEncode(Path.SEPARATOR);
@@ -212,6 +214,7 @@ public Set listAllPaths() throws IOException {
}
}
+ @Override
public void deleteINode(Path path) throws IOException {
delete(pathToKey(path));
}
@@ -227,6 +230,7 @@ private void delete(String key) throws IOException {
}
}
+ @Override
public INode retrieveINode(Path path) throws IOException {
return INode.deserialize(get(pathToKey(path)));
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
index 5a5d628adb..81ef31446e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
@@ -206,6 +206,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
}
/** This optional operation is not yet supported. */
+ @Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
@@ -298,6 +299,7 @@ private boolean renameRecursive(Path src, Path dst) throws IOException {
return true;
}
+ @Override
public boolean delete(Path path, boolean recursive) throws IOException {
Path absolutePath = makeAbsolute(path);
INode inode = store.retrieveINode(absolutePath);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
index c2293ba682..400419c110 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
@@ -49,6 +49,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
private S3Service s3Service;
private S3Bucket bucket;
+ @Override
public void initialize(URI uri, Configuration conf) throws IOException {
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
@@ -63,6 +64,7 @@ public void initialize(URI uri, Configuration conf) throws IOException {
bucket = new S3Bucket(uri.getHost());
}
+ @Override
public void storeFile(String key, File file, byte[] md5Hash)
throws IOException {
@@ -90,6 +92,7 @@ public void storeFile(String key, File file, byte[] md5Hash)
}
}
+ @Override
public void storeEmptyFile(String key) throws IOException {
try {
S3Object object = new S3Object(key);
@@ -102,6 +105,7 @@ public void storeEmptyFile(String key) throws IOException {
}
}
+ @Override
public FileMetadata retrieveMetadata(String key) throws IOException {
try {
S3Object object = s3Service.getObjectDetails(bucket, key);
@@ -117,6 +121,7 @@ public FileMetadata retrieveMetadata(String key) throws IOException {
}
}
+ @Override
public InputStream retrieve(String key) throws IOException {
try {
S3Object object = s3Service.getObject(bucket, key);
@@ -127,6 +132,7 @@ public InputStream retrieve(String key) throws IOException {
}
}
+ @Override
public InputStream retrieve(String key, long byteRangeStart)
throws IOException {
try {
@@ -139,11 +145,13 @@ public InputStream retrieve(String key, long byteRangeStart)
}
}
+ @Override
public PartialListing list(String prefix, int maxListingLength)
throws IOException {
return list(prefix, maxListingLength, null, false);
}
+ @Override
public PartialListing list(String prefix, int maxListingLength, String priorLastKey,
boolean recurse) throws IOException {
@@ -175,6 +183,7 @@ private PartialListing list(String prefix, String delimiter,
}
}
+ @Override
public void delete(String key) throws IOException {
try {
s3Service.deleteObject(bucket, key);
@@ -183,6 +192,7 @@ public void delete(String key) throws IOException {
}
}
+ @Override
public void copy(String srcKey, String dstKey) throws IOException {
try {
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
@@ -192,6 +202,7 @@ public void copy(String srcKey, String dstKey) throws IOException {
}
}
+ @Override
public void purge(String prefix) throws IOException {
try {
S3Object[] objects = s3Service.listObjects(bucket, prefix, null);
@@ -203,6 +214,7 @@ public void purge(String prefix) throws IOException {
}
}
+ @Override
public void dump() throws IOException {
StringBuilder sb = new StringBuilder("S3 Native Filesystem, ");
sb.append(bucket.getName()).append("\n");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
index eea429a97e..e1aeea94ac 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
@@ -150,6 +150,7 @@ protected IllegalNumberOfArgumentsException(int want, int got) {
actual = got;
}
+ @Override
public String getMessage() {
return "expected " + expected + " but got " + actual;
}
@@ -165,6 +166,7 @@ public TooManyArgumentsException(int expected, int actual) {
super(expected, actual);
}
+ @Override
public String getMessage() {
return "Too many arguments: " + super.getMessage();
}
@@ -180,6 +182,7 @@ public NotEnoughArgumentsException(int expected, int actual) {
super(expected, actual);
}
+ @Override
public String getMessage() {
return "Not enough arguments: " + super.getMessage();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
index 71bfc9510d..bc1d8af951 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
@@ -114,6 +114,7 @@ private boolean moveToTrash(PathData item) throws IOException {
static class Rmr extends Rm {
public static final String NAME = "rmr";
+ @Override
protected void processOptions(LinkedList args) throws IOException {
args.addFirst("-r");
super.processOptions(args);
@@ -136,6 +137,7 @@ static class Rmdir extends FsCommand {
private boolean ignoreNonEmpty = false;
+ @Override
protected void processOptions(LinkedList args) throws IOException {
CommandFormat cf = new CommandFormat(
1, Integer.MAX_VALUE, "-ignore-fail-on-non-empty");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index 5ae0d67c57..8d598012ec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -161,6 +161,7 @@ public TextRecordInputStream(FileStatus f) throws IOException {
outbuf = new DataOutputBuffer();
}
+ @Override
public int read() throws IOException {
int ret;
if (null == inbuf || -1 == (ret = inbuf.read())) {
@@ -180,6 +181,7 @@ public int read() throws IOException {
return ret;
}
+ @Override
public void close() throws IOException {
r.close();
super.close();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 3f397327de..2541be393b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -73,6 +73,7 @@ public String getCommandName() {
// abstract method that normally is invoked by runall() which is
// overridden below
+ @Override
protected void run(Path path) throws IOException {
throw new RuntimeException("not supposed to get here");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
index b53d2820de..04574cf673 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
@@ -380,6 +380,7 @@ private static int findLongestDirPrefix(String cwd, String path, boolean isDir)
* as given on the commandline, or the full path
* @return String of the path
*/
+ @Override
public String toString() {
String scheme = uri.getScheme();
// No interpretation of symbols. Just decode % escaped chars.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 85426fa4ff..95d0a2d456 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -102,6 +102,7 @@ public ChRootedFileSystem(final URI uri, Configuration conf)
* for this FileSystem
* @param conf the configuration
*/
+ @Override
public void initialize(final URI name, final Configuration conf)
throws IOException {
super.initialize(name, conf);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java
index f92108cfe7..143ce68ebb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java
@@ -20,10 +20,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-
import org.apache.hadoop.fs.Path;
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 1c0c8dac4d..6031daf118 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -164,6 +164,7 @@ public String getScheme() {
* this FileSystem
* @param conf the configuration
*/
+ @Override
public void initialize(final URI theUri, final Configuration conf)
throws IOException {
super.initialize(theUri, conf);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
index 871e3d8a63..e0f62e453b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
@@ -42,7 +42,8 @@ public boolean equals(Object o) {
return super.equals(o);
}
- public int hashCode() {
+ @Override
+ public int hashCode() {
return super.hashCode();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index a4ed255deb..5287581073 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -892,6 +892,7 @@ private String createWithRetries(final String path, final byte[] data,
final List acl, final CreateMode mode)
throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction() {
+ @Override
public String run() throws KeeperException, InterruptedException {
return zkClient.create(path, data, acl, mode);
}
@@ -901,6 +902,7 @@ public String run() throws KeeperException, InterruptedException {
private byte[] getDataWithRetries(final String path, final boolean watch,
final Stat stat) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction() {
+ @Override
public byte[] run() throws KeeperException, InterruptedException {
return zkClient.getData(path, watch, stat);
}
@@ -910,6 +912,7 @@ public byte[] run() throws KeeperException, InterruptedException {
private Stat setDataWithRetries(final String path, final byte[] data,
final int version) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction() {
+ @Override
public Stat run() throws KeeperException, InterruptedException {
return zkClient.setData(path, data, version);
}
@@ -919,6 +922,7 @@ public Stat run() throws KeeperException, InterruptedException {
private void deleteWithRetries(final String path, final int version)
throws KeeperException, InterruptedException {
zkDoWithRetries(new ZKAction() {
+ @Override
public Void run() throws KeeperException, InterruptedException {
zkClient.delete(path, version);
return null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
index d4ae0899fb..85912c7c76 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
@@ -56,6 +56,7 @@ public enum HAServiceState {
this.name = name;
}
+ @Override
public String toString() {
return name;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
index 06fb648f42..4898b38726 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
@@ -184,6 +184,7 @@ private FenceMethodWithArg(FenceMethod method, String arg) {
this.arg = arg;
}
+ @Override
public String toString() {
return method.getClass().getCanonicalName() + "(" + arg + ")";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
index 537fba942d..343693e95c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
@@ -274,6 +274,7 @@ private static class LogAdapter implements com.jcraft.jsch.Logger {
static final Log LOG = LogFactory.getLog(
SshFenceByTcpPort.class.getName() + ".jsch");
+ @Override
public boolean isEnabled(int level) {
switch (level) {
case com.jcraft.jsch.Logger.DEBUG:
@@ -291,6 +292,7 @@ public boolean isEnabled(int level) {
}
}
+ @Override
public void log(int level, String message) {
switch (level) {
case com.jcraft.jsch.Logger.DEBUG:
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index 7bf3c16e8c..77e9e1601a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -474,7 +474,7 @@ public void addInternalServlet(String name, String pathSpec,
}
}
- /** {@inheritDoc} */
+ @Override
public void addFilter(String name, String classname,
Map parameters) {
@@ -494,7 +494,7 @@ public void addFilter(String name, String classname,
filterNames.add(name);
}
- /** {@inheritDoc} */
+ @Override
public void addGlobalFilter(String name, String classname,
Map parameters) {
final String[] ALL_URLS = { "/*" };
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
index bb2f163fe4..6bd9efc689 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
@@ -164,16 +164,18 @@ protected AbstractMapWritable() {
}
/** @return the conf */
+ @Override
public Configuration getConf() {
return conf.get();
}
/** @param conf the conf to set */
+ @Override
public void setConf(Configuration conf) {
this.conf.set(conf);
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
// First write out the size of the class table and any classes that are
@@ -187,7 +189,7 @@ public void write(DataOutput out) throws IOException {
}
}
- /** {@inheritDoc} */
+ @Override
public void readFields(DataInput in) throws IOException {
// Get the number of "unknown" classes
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
index 875d6efdc2..122aa5ca1e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
@@ -88,6 +88,7 @@ public Object toArray() {
public Writable[] get() { return values; }
+ @Override
public void readFields(DataInput in) throws IOException {
values = new Writable[in.readInt()]; // construct values
for (int i = 0; i < values.length; i++) {
@@ -97,6 +98,7 @@ public void readFields(DataInput in) throws IOException {
}
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(values.length); // write values
for (int i = 0; i < values.length; i++) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java
index 71279b4f6d..0079079a79 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java
@@ -57,12 +57,14 @@ public boolean get() {
/**
*/
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readBoolean();
}
/**
*/
+ @Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java
index ff926c11c1..ffcdea2c9a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java
@@ -39,10 +39,12 @@ public ByteWritable() {}
/** Return the value of this ByteWritable. */
public byte get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readByte();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeByte(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
index 012a3bc9d7..7e42a36cb7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
@@ -81,6 +81,7 @@ public byte[] copyBytes() {
* if you need the returned array to be precisely the length of the data.
* @return The data is only valid between 0 and getLength() - 1.
*/
+ @Override
public byte[] getBytes() {
return bytes;
}
@@ -97,6 +98,7 @@ public byte[] get() {
/**
* Get the current size of the buffer.
*/
+ @Override
public int getLength() {
return size;
}
@@ -171,6 +173,7 @@ public void set(byte[] newData, int offset, int length) {
}
// inherit javadoc
+ @Override
public void readFields(DataInput in) throws IOException {
setSize(0); // clear the old data
setSize(in.readInt());
@@ -178,6 +181,7 @@ public void readFields(DataInput in) throws IOException {
}
// inherit javadoc
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(size);
out.write(bytes, 0, size);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java
index ad3164b2d2..6550e1f2fd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java
@@ -45,6 +45,7 @@ public abstract class CompressedWritable implements Writable {
public CompressedWritable() {}
+ @Override
public final void readFields(DataInput in) throws IOException {
compressed = new byte[in.readInt()];
in.readFully(compressed, 0, compressed.length);
@@ -70,6 +71,7 @@ protected void ensureInflated() {
protected abstract void readFieldsCompressed(DataInput in)
throws IOException;
+ @Override
public final void write(DataOutput out) throws IOException {
if (compressed == null) {
ByteArrayOutputStream deflated = new ByteArrayOutputStream();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java
index 469d3ff863..2cd59d75dc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java
@@ -21,8 +21,6 @@
import java.io.DataInputStream;
import java.io.InputStream;
import java.nio.ByteBuffer;
-import java.util.LinkedList;
-import java.util.List;
public class DataInputByteBuffer extends DataInputStream {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
index 6cd1f49722..2b8e259464 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
@@ -72,6 +72,7 @@ public DefaultStringifier(Configuration conf, Class c) {
}
}
+ @Override
public T fromString(String str) throws IOException {
try {
byte[] bytes = Base64.decodeBase64(str.getBytes("UTF-8"));
@@ -83,6 +84,7 @@ public T fromString(String str) throws IOException {
}
}
+ @Override
public String toString(T obj) throws IOException {
outBuf.reset();
serializer.serialize(obj);
@@ -91,6 +93,7 @@ public String toString(T obj) throws IOException {
return new String(Base64.encodeBase64(buf));
}
+ @Override
public void close() throws IOException {
inBuf.close();
outBuf.close();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
index a984cd4ef5..5cc326fe3c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
@@ -42,10 +42,12 @@ public DoubleWritable(double value) {
set(value);
}
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readDouble();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeDouble(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
index c1ff1ca3bf..dc430cc29c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
@@ -23,7 +23,6 @@
import java.io.IOException;
import java.util.EnumSet;
import java.util.Iterator;
-import java.util.Collection;
import java.util.AbstractCollection;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -46,8 +45,11 @@ public class EnumSetWritable> extends AbstractCollection
EnumSetWritable() {
}
+ @Override
public Iterator iterator() { return value.iterator(); }
+ @Override
public int size() { return value.size(); }
+ @Override
public boolean add(E e) {
if (value == null) {
value = EnumSet.of(e);
@@ -109,7 +111,7 @@ public EnumSet get() {
return value;
}
- /** {@inheritDoc} */
+ @Override
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
int length = in.readInt();
@@ -127,7 +129,7 @@ else if (length == 0) {
}
}
- /** {@inheritDoc} */
+ @Override
public void write(DataOutput out) throws IOException {
if (this.value == null) {
out.writeInt(-1);
@@ -152,6 +154,7 @@ public void write(DataOutput out) throws IOException {
* Returns true if o
is an EnumSetWritable with the same value,
* or both are null.
*/
+ @Override
public boolean equals(Object o) {
if (o == null) {
throw new IllegalArgumentException("null argument passed in equal().");
@@ -180,27 +183,25 @@ public Class getElementType() {
return elementType;
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
if (value == null)
return 0;
return (int) value.hashCode();
}
- /** {@inheritDoc} */
+ @Override
public String toString() {
if (value == null)
return "(null)";
return value.toString();
}
- /** {@inheritDoc} */
@Override
public Configuration getConf() {
return this.conf;
}
- /** {@inheritDoc} */
@Override
public void setConf(Configuration conf) {
this.conf = conf;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
index 4ade2c4d62..21e4cc4f5b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
@@ -39,10 +39,12 @@ public FloatWritable() {}
/** Return the value of this FloatWritable. */
public float get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readFloat();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeFloat(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java
index 8268a5a915..7cfeed7f93 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java
@@ -114,11 +114,13 @@ public Writable get() {
return instance;
}
+ @Override
public String toString() {
return "GW[" + (instance != null ? ("class=" + instance.getClass().getName() +
",value=" + instance.toString()) : "(null)") + "]";
}
+ @Override
public void readFields(DataInput in) throws IOException {
type = in.readByte();
Class extends Writable> clazz = getTypes()[type & 0xff];
@@ -131,6 +133,7 @@ public void readFields(DataInput in) throws IOException {
instance.readFields(in);
}
+ @Override
public void write(DataOutput out) throws IOException {
if (type == NOT_SET || instance == null)
throw new IOException("The GenericWritable has NOT been set correctly. type="
@@ -145,10 +148,12 @@ public void write(DataOutput out) throws IOException {
*/
abstract protected Class extends Writable>[] getTypes();
+ @Override
public Configuration getConf() {
return conf;
}
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 819f075812..a3315a869e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -272,9 +272,11 @@ public static void closeSocket(Socket sock) {
* The /dev/null of OutputStreams.
*/
public static class NullOutputStream extends OutputStream {
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
}
+ @Override
public void write(int b) throws IOException {
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java
index 6a44d81db6..f656d028cb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java
@@ -42,10 +42,12 @@ public IntWritable() {}
/** Return the value of this IntWritable. */
public int get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readInt();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
index b9d64d904d..6dec4aa618 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
@@ -42,15 +42,18 @@ public LongWritable() {}
/** Return the value of this LongWritable. */
public long get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = in.readLong();
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeLong(value);
}
/** Returns true iff o
is a LongWritable with the same value. */
+ @Override
public boolean equals(Object o) {
if (!(o instanceof LongWritable))
return false;
@@ -58,17 +61,20 @@ public boolean equals(Object o) {
return this.value == other.value;
}
+ @Override
public int hashCode() {
return (int)value;
}
/** Compares two LongWritables. */
+ @Override
public int compareTo(LongWritable o) {
long thisValue = this.value;
long thatValue = o.value;
return (thisValue {
public static final int MD5_LEN = 16;
private static ThreadLocal DIGESTER_FACTORY = new ThreadLocal() {
+ @Override
protected MessageDigest initialValue() {
try {
return MessageDigest.getInstance("MD5");
@@ -65,6 +66,7 @@ public MD5Hash(byte[] digest) {
}
// javadoc from Writable
+ @Override
public void readFields(DataInput in) throws IOException {
in.readFully(digest);
}
@@ -77,6 +79,7 @@ public static MD5Hash read(DataInput in) throws IOException {
}
// javadoc from Writable
+ @Override
public void write(DataOutput out) throws IOException {
out.write(digest);
}
@@ -155,6 +158,7 @@ public int quarterDigest() {
/** Returns true iff o
is an MD5Hash whose digest contains the
* same values. */
+ @Override
public boolean equals(Object o) {
if (!(o instanceof MD5Hash))
return false;
@@ -165,12 +169,14 @@ public boolean equals(Object o) {
/** Returns a hash code value for this object.
* Only uses the first 4 bytes, since md5s are evenly distributed.
*/
+ @Override
public int hashCode() {
return quarterDigest();
}
/** Compares this object with the specified object for order.*/
+ @Override
public int compareTo(MD5Hash that) {
return WritableComparator.compareBytes(this.digest, 0, MD5_LEN,
that.digest, 0, MD5_LEN);
@@ -182,6 +188,7 @@ public Comparator() {
super(MD5Hash.class);
}
+ @Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return compareBytes(b1, s1, MD5_LEN, b2, s2, MD5_LEN);
@@ -196,6 +203,7 @@ public int compare(byte[] b1, int s1, int l1,
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
/** Returns a string representation of this object. */
+ @Override
public String toString() {
StringBuilder buf = new StringBuilder(MD5_LEN*2);
for (int i = 0; i < MD5_LEN; i++) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index 9c14402d75..7e7d855f82 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -296,6 +296,7 @@ public static void setIndexInterval(Configuration conf, int interval) {
}
/** Close the map. */
+ @Override
public synchronized void close() throws IOException {
data.close();
index.close();
@@ -723,6 +724,7 @@ public synchronized WritableComparable getClosest(WritableComparable key,
}
/** Close the map. */
+ @Override
public synchronized void close() throws IOException {
if (!indexClosed) {
index.close();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java
index 377c9c1656..72c7098d7a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java
@@ -55,27 +55,27 @@ public MapWritable(MapWritable other) {
copy(other);
}
- /** {@inheritDoc} */
+ @Override
public void clear() {
instance.clear();
}
- /** {@inheritDoc} */
+ @Override
public boolean containsKey(Object key) {
return instance.containsKey(key);
}
- /** {@inheritDoc} */
+ @Override
public boolean containsValue(Object value) {
return instance.containsValue(value);
}
- /** {@inheritDoc} */
+ @Override
public Set> entrySet() {
return instance.entrySet();
}
- /** {@inheritDoc} */
+ @Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
@@ -93,27 +93,27 @@ public boolean equals(Object obj) {
return false;
}
- /** {@inheritDoc} */
+ @Override
public Writable get(Object key) {
return instance.get(key);
}
- /** {@inheritDoc} */
+ @Override
public int hashCode() {
return 1 + this.instance.hashCode();
}
- /** {@inheritDoc} */
+ @Override
public boolean isEmpty() {
return instance.isEmpty();
}
- /** {@inheritDoc} */
+ @Override
public Set keySet() {
return instance.keySet();
}
- /** {@inheritDoc} */
+ @Override
@SuppressWarnings("unchecked")
public Writable put(Writable key, Writable value) {
addToMap(key.getClass());
@@ -121,31 +121,30 @@ public Writable put(Writable key, Writable value) {
return instance.put(key, value);
}
- /** {@inheritDoc} */
+ @Override
public void putAll(Map extends Writable, ? extends Writable> t) {
for (Map.Entry extends Writable, ? extends Writable> e: t.entrySet()) {
put(e.getKey(), e.getValue());
}
}
- /** {@inheritDoc} */
+ @Override
public Writable remove(Object key) {
return instance.remove(key);
}
- /** {@inheritDoc} */
+ @Override
public int size() {
return instance.size();
}
- /** {@inheritDoc} */
+ @Override
public Collection values() {
return instance.values();
}
// Writable
- /** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
@@ -164,7 +163,6 @@ public void write(DataOutput out) throws IOException {
}
}
- /** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java
index beb7b17ce7..77c590fdb6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java
@@ -35,6 +35,7 @@ private NullWritable() {} // no public ctor
/** Returns the single instance of this class. */
public static NullWritable get() { return THIS; }
+ @Override
public String toString() {
return "(null)";
}
@@ -46,8 +47,11 @@ public String toString() {
public int compareTo(NullWritable other) {
return 0;
}
+ @Override
public boolean equals(Object other) { return other instanceof NullWritable; }
+ @Override
public void readFields(DataInput in) throws IOException {}
+ @Override
public void write(DataOutput out) throws IOException {}
/** A Comparator "optimized" for NullWritable. */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
index c555111097..0f0f5c7405 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
@@ -66,15 +66,18 @@ public void set(Object instance) {
this.instance = instance;
}
+ @Override
public String toString() {
return "OW[class=" + declaredClass + ",value=" + instance + "]";
}
+ @Override
public void readFields(DataInput in) throws IOException {
readObject(in, this, this.conf);
}
+ @Override
public void write(DataOutput out) throws IOException {
writeObject(out, instance, declaredClass, conf);
}
@@ -99,6 +102,7 @@ public NullInstance(Class declaredClass, Configuration conf) {
super(conf);
this.declaredClass = declaredClass;
}
+ @Override
public void readFields(DataInput in) throws IOException {
String className = UTF8.readString(in);
declaredClass = PRIMITIVE_NAMES.get(className);
@@ -110,6 +114,7 @@ public void readFields(DataInput in) throws IOException {
}
}
}
+ @Override
public void write(DataOutput out) throws IOException {
UTF8.writeString(out, declaredClass.getName());
}
@@ -375,10 +380,12 @@ public static Class> loadClass(Configuration conf, String className) {
return declaredClass;
}
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
+ @Override
public Configuration getConf() {
return this.conf;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java
index b7605db9a9..15a396dc2b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java
@@ -50,6 +50,7 @@ public class OutputBuffer extends FilterOutputStream {
private static class Buffer extends ByteArrayOutputStream {
public byte[] getData() { return buf; }
public int getLength() { return count; }
+ @Override
public void reset() { count = 0; }
public void write(InputStream in, int len) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
index 046d9e4b73..f1545b69c9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
@@ -194,6 +194,7 @@ private ReadaheadRequestImpl(String identifier, FileDescriptor fd, long off, lon
this.len = len;
}
+ @Override
public void run() {
if (canceled) return;
// There's a very narrow race here that the file will close right at
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
index 6bc798e7e3..b30c4a4da4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
@@ -24,7 +24,6 @@
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 293fdbbb93..8a14860773 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -625,15 +625,18 @@ private void reset(DataInputStream in, int length) throws IOException {
dataSize = length;
}
+ @Override
public int getSize() {
return dataSize;
}
+ @Override
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
outStream.write(data, 0, dataSize);
}
+ @Override
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
throw
@@ -666,10 +669,12 @@ private void reset(DataInputStream in, int length) throws IOException {
dataSize = length;
}
+ @Override
public int getSize() {
return dataSize;
}
+ @Override
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
if (decompressedStream == null) {
@@ -687,6 +692,7 @@ public void writeUncompressedBytes(DataOutputStream outStream)
}
}
+ @Override
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
outStream.write(data, 0, dataSize);
@@ -728,6 +734,7 @@ public TreeMap getMetadata() {
return new TreeMap(this.theMetadata);
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(this.theMetadata.size());
Iterator> iter =
@@ -739,6 +746,7 @@ public void write(DataOutput out) throws IOException {
}
}
+ @Override
public void readFields(DataInput in) throws IOException {
int sz = in.readInt();
if (sz < 0) throw new IOException("Invalid size: " + sz + " for file metadata object");
@@ -752,6 +760,7 @@ public void readFields(DataInput in) throws IOException {
}
}
+ @Override
public boolean equals(Object other) {
if (other == null) {
return false;
@@ -788,11 +797,13 @@ public boolean equals(Metadata other) {
return true;
}
+ @Override
public int hashCode() {
assert false : "hashCode not designed";
return 42; // any arbitrary constant will do
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("size: ").append(this.theMetadata.size()).append("\n");
@@ -1250,6 +1261,7 @@ public void hflush() throws IOException {
Configuration getConf() { return conf; }
/** Close the file. */
+ @Override
public synchronized void close() throws IOException {
keySerializer.close();
uncompressedValSerializer.close();
@@ -1360,6 +1372,7 @@ static class RecordCompressWriter extends Writer {
}
/** Append a key/value pair. */
+ @Override
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
throws IOException {
@@ -1392,6 +1405,7 @@ public synchronized void append(Object key, Object val)
}
/** Append a key/value pair. */
+ @Override
public synchronized void appendRaw(byte[] keyData, int keyOffset,
int keyLength, ValueBytes val) throws IOException {
@@ -1449,6 +1463,7 @@ void writeBuffer(DataOutputBuffer uncompressedDataBuffer)
}
/** Compress and flush contents to dfs */
+ @Override
public synchronized void sync() throws IOException {
if (noBufferedRecords > 0) {
super.sync();
@@ -1478,6 +1493,7 @@ public synchronized void sync() throws IOException {
}
/** Close the file. */
+ @Override
public synchronized void close() throws IOException {
if (out != null) {
sync();
@@ -1486,6 +1502,7 @@ public synchronized void close() throws IOException {
}
/** Append a key/value pair. */
+ @Override
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
throws IOException {
@@ -1518,6 +1535,7 @@ public synchronized void append(Object key, Object val)
}
/** Append a key/value pair. */
+ @Override
public synchronized void appendRaw(byte[] keyData, int keyOffset,
int keyLength, ValueBytes val) throws IOException {
@@ -1960,6 +1978,7 @@ private Deserializer getDeserializer(SerializationFactory sf, Class c) {
}
/** Close the file. */
+ @Override
public synchronized void close() throws IOException {
// Return the decompressors to the pool
CodecPool.returnDecompressor(keyLenDecompressor);
@@ -2618,6 +2637,7 @@ public synchronized long getPosition() throws IOException {
}
/** Returns the name of the file. */
+ @Override
public String toString() {
return filename;
}
@@ -2948,6 +2968,7 @@ private void sort(int count) {
mergeSort.mergeSort(pointersCopy, pointers, 0, count);
}
class SeqFileComparator implements Comparator {
+ @Override
public int compare(IntWritable I, IntWritable J) {
return comparator.compare(rawBuffer, keyOffsets[I.get()],
keyLengths[I.get()], rawBuffer,
@@ -3221,6 +3242,7 @@ public MergeQueue(List segments,
this.tmpDir = tmpDir;
this.progress = progress;
}
+ @Override
protected boolean lessThan(Object a, Object b) {
// indicate we're making progress
if (progress != null) {
@@ -3232,6 +3254,7 @@ protected boolean lessThan(Object a, Object b) {
msa.getKey().getLength(), msb.getKey().getData(), 0,
msb.getKey().getLength()) < 0;
}
+ @Override
public void close() throws IOException {
SegmentDescriptor ms; // close inputs
while ((ms = (SegmentDescriptor)pop()) != null) {
@@ -3239,12 +3262,15 @@ public void close() throws IOException {
}
minSegment = null;
}
+ @Override
public DataOutputBuffer getKey() throws IOException {
return rawKey;
}
+ @Override
public ValueBytes getValue() throws IOException {
return rawValue;
}
+ @Override
public boolean next() throws IOException {
if (size() == 0)
return false;
@@ -3272,6 +3298,7 @@ public boolean next() throws IOException {
return true;
}
+ @Override
public Progress getProgress() {
return mergeProgress;
}
@@ -3469,6 +3496,7 @@ public boolean shouldPreserveInput() {
return preserveInput;
}
+ @Override
public int compareTo(Object o) {
SegmentDescriptor that = (SegmentDescriptor)o;
if (this.segmentLength != that.segmentLength) {
@@ -3481,6 +3509,7 @@ public int compareTo(Object o) {
compareTo(that.segmentPathName.toString());
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof SegmentDescriptor)) {
return false;
@@ -3495,6 +3524,7 @@ public boolean equals(Object o) {
return false;
}
+ @Override
public int hashCode() {
return 37 * 17 + (int) (segmentOffset^(segmentOffset>>>32));
}
@@ -3584,12 +3614,14 @@ public LinkedSegmentsDescriptor (long segmentOffset, long segmentLength,
/** The default cleanup. Subclasses can override this with a custom
* cleanup
*/
+ @Override
public void cleanup() throws IOException {
super.close();
if (super.shouldPreserveInput()) return;
parentContainer.cleanup();
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof LinkedSegmentsDescriptor)) {
return false;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
index 9ba0023190..068ca9d40e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
@@ -87,6 +87,7 @@ public Reader(FileSystem fs, String dirName, WritableComparator comparator, Conf
}
// javadoc inherited
+ @Override
public boolean seek(WritableComparable key)
throws IOException {
return super.seek(key);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
index d870a5fd84..eee744ec6a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
@@ -57,86 +57,86 @@ public SortedMapWritable(SortedMapWritable other) {
copy(other);
}
- /** {@inheritDoc} */
+ @Override
public Comparator super WritableComparable> comparator() {
// Returning null means we use the natural ordering of the keys
return null;
}
- /** {@inheritDoc} */
+ @Override
public WritableComparable firstKey() {
return instance.firstKey();
}
- /** {@inheritDoc} */
+ @Override
public SortedMap
headMap(WritableComparable toKey) {
return instance.headMap(toKey);
}
- /** {@inheritDoc} */
+ @Override
public WritableComparable lastKey() {
return instance.lastKey();
}
- /** {@inheritDoc} */
+ @Override
public SortedMap
subMap(WritableComparable fromKey, WritableComparable toKey) {
return instance.subMap(fromKey, toKey);
}
- /** {@inheritDoc} */
+ @Override
public SortedMap
tailMap(WritableComparable fromKey) {
return instance.tailMap(fromKey);
}
- /** {@inheritDoc} */
+ @Override
public void clear() {
instance.clear();
}
- /** {@inheritDoc} */
+ @Override
public boolean containsKey(Object key) {
return instance.containsKey(key);
}
- /** {@inheritDoc} */
+ @Override
public boolean containsValue(Object value) {
return instance.containsValue(value);
}
- /** {@inheritDoc} */
+ @Override
public Set> entrySet() {
return instance.entrySet();
}
- /** {@inheritDoc} */
+ @Override
public Writable get(Object key) {
return instance.get(key);
}
- /** {@inheritDoc} */
+ @Override
public boolean isEmpty() {
return instance.isEmpty();
}
- /** {@inheritDoc} */
+ @Override
public Set keySet() {
return instance.keySet();
}
- /** {@inheritDoc} */
+ @Override
public Writable put(WritableComparable key, Writable value) {
addToMap(key.getClass());
addToMap(value.getClass());
return instance.put(key, value);
}
- /** {@inheritDoc} */
+ @Override
public void putAll(Map extends WritableComparable, ? extends Writable> t) {
for (Map.Entry extends WritableComparable, ? extends Writable> e:
t.entrySet()) {
@@ -145,22 +145,21 @@ public void putAll(Map extends WritableComparable, ? extends Writable> t) {
}
}
- /** {@inheritDoc} */
+ @Override
public Writable remove(Object key) {
return instance.remove(key);
}
- /** {@inheritDoc} */
+ @Override
public int size() {
return instance.size();
}
- /** {@inheritDoc} */
+ @Override
public Collection values() {
return instance.values();
}
- /** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
@@ -187,7 +186,6 @@ public void readFields(DataInput in) throws IOException {
}
}
- /** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java
index a7ee6876d4..949b14ae57 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java
@@ -54,6 +54,7 @@ public interface Stringifier extends java.io.Closeable {
* Closes this object.
* @throws IOException if an I/O error occurs
* */
+ @Override
public void close() throws IOException;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index a4f80ea886..95fb174a9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -55,6 +55,7 @@ public class Text extends BinaryComparable
private static ThreadLocal ENCODER_FACTORY =
new ThreadLocal() {
+ @Override
protected CharsetEncoder initialValue() {
return Charset.forName("UTF-8").newEncoder().
onMalformedInput(CodingErrorAction.REPORT).
@@ -64,6 +65,7 @@ protected CharsetEncoder initialValue() {
private static ThreadLocal DECODER_FACTORY =
new ThreadLocal() {
+ @Override
protected CharsetDecoder initialValue() {
return Charset.forName("UTF-8").newDecoder().
onMalformedInput(CodingErrorAction.REPORT).
@@ -112,11 +114,13 @@ public byte[] copyBytes() {
* valid. Please use {@link #copyBytes()} if you
* need the returned array to be precisely the length of the data.
*/
+ @Override
public byte[] getBytes() {
return bytes;
}
/** Returns the number of bytes in the byte array */
+ @Override
public int getLength() {
return length;
}
@@ -281,6 +285,7 @@ public String toString() {
/** deserialize
*/
+ @Override
public void readFields(DataInput in) throws IOException {
int newLength = WritableUtils.readVInt(in);
setCapacity(newLength, false);
@@ -313,6 +318,7 @@ public static void skip(DataInput in) throws IOException {
* length uses zero-compressed encoding
* @see Writable#write(DataOutput)
*/
+ @Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, length);
out.write(bytes, 0, length);
@@ -329,6 +335,7 @@ public void write(DataOutput out, int maxLength) throws IOException {
}
/** Returns true iff o
is a Text with the same contents. */
+ @Override
public boolean equals(Object o) {
if (o instanceof Text)
return super.equals(o);
@@ -346,6 +353,7 @@ public Comparator() {
super(Text.class);
}
+ @Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
index 76304623ee..cf8947d32d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
@@ -57,6 +57,7 @@ public Object toArray() {
public Writable[][] get() { return values; }
+ @Override
public void readFields(DataInput in) throws IOException {
// construct matrix
values = new Writable[in.readInt()][];
@@ -81,6 +82,7 @@ public void readFields(DataInput in) throws IOException {
}
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeInt(values.length); // write values
for (int i = 0; i < values.length; i++) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
index 6a0f88673f..ef7512996c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
@@ -110,6 +110,7 @@ public void set(UTF8 other) {
System.arraycopy(other.bytes, 0, bytes, 0, length);
}
+ @Override
public void readFields(DataInput in) throws IOException {
length = in.readUnsignedShort();
if (bytes == null || bytes.length < length)
@@ -123,6 +124,7 @@ public static void skip(DataInput in) throws IOException {
WritableUtils.skipFully(in, length);
}
+ @Override
public void write(DataOutput out) throws IOException {
out.writeShort(length);
out.write(bytes, 0, length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java
index e37b144dbf..f537524c4b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java
@@ -43,10 +43,12 @@ public VIntWritable() {}
/** Return the value of this VIntWritable. */
public int get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = WritableUtils.readVInt(in);
}
+ @Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java
index 869bf43914..a9fac30605 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java
@@ -43,10 +43,12 @@ public VLongWritable() {}
/** Return the value of this LongWritable. */
public long get() { return value; }
+ @Override
public void readFields(DataInput in) throws IOException {
value = WritableUtils.readVLong(in);
}
+ @Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, value);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java
index 162374be21..a72be58832 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java
@@ -39,6 +39,7 @@ public VersionMismatchException(byte expectedVersionIn, byte foundVersionIn){
}
/** Returns a string representation of this object. */
+ @Override
public String toString(){
return "A record version mismatch occured. Expecting v"
+ expectedVersion + ", found v" + foundVersion;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java
index a197fd2e4f..c2db55520c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java
@@ -40,11 +40,13 @@ public abstract class VersionedWritable implements Writable {
public abstract byte getVersion();
// javadoc from Writable
+ @Override
public void write(DataOutput out) throws IOException {
out.writeByte(getVersion()); // store version
}
// javadoc from Writable
+ @Override
public void readFields(DataInput in) throws IOException {
byte version = in.readByte(); // read version
if (version != getVersion())
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
index 6eb3a21443..eb3c8d322c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
@@ -120,6 +120,7 @@ public WritableComparable newKey() {
* Writable#readFields(DataInput)}, then calls {@link
* #compare(WritableComparable,WritableComparable)}.
*/
+ @Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
buffer.reset(b1, s1, l1); // parse key1
@@ -144,6 +145,7 @@ public int compare(WritableComparable a, WritableComparable b) {
return a.compareTo(b);
}
+ @Override
public int compare(Object a, Object b) {
return compare((WritableComparable)a, (WritableComparable)b);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index a7a925f35a..35f7cb43ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -63,6 +63,7 @@ public BZip2Codec() { }
* @throws java.io.IOException
* Throws IO exception
*/
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return new BZip2CompressionOutputStream(out);
@@ -74,6 +75,7 @@ public CompressionOutputStream createOutputStream(OutputStream out)
* @return CompressionOutputStream
@throws java.io.IOException
*/
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor) throws IOException {
return createOutputStream(out);
@@ -84,6 +86,7 @@ public CompressionOutputStream createOutputStream(OutputStream out,
*
* @return BZip2DummyCompressor.class
*/
+ @Override
public Class extends org.apache.hadoop.io.compress.Compressor> getCompressorType() {
return BZip2DummyCompressor.class;
}
@@ -93,6 +96,7 @@ public Class extends org.apache.hadoop.io.compress.Compressor> getCompressorTy
*
* @return Compressor
*/
+ @Override
public Compressor createCompressor() {
return new BZip2DummyCompressor();
}
@@ -106,6 +110,7 @@ public Compressor createCompressor() {
* @throws java.io.IOException
* Throws IOException
*/
+ @Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return new BZip2CompressionInputStream(in);
@@ -116,6 +121,7 @@ public CompressionInputStream createInputStream(InputStream in)
*
* @return CompressionInputStream
*/
+ @Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor) throws IOException {
return createInputStream(in);
@@ -133,6 +139,7 @@ public CompressionInputStream createInputStream(InputStream in,
*
* @return CompressionInputStream for BZip2 aligned at block boundaries
*/
+ @Override
public SplitCompressionInputStream createInputStream(InputStream seekableIn,
Decompressor decompressor, long start, long end, READ_MODE readMode)
throws IOException {
@@ -181,6 +188,7 @@ public SplitCompressionInputStream createInputStream(InputStream seekableIn,
*
* @return BZip2DummyDecompressor.class
*/
+ @Override
public Class extends org.apache.hadoop.io.compress.Decompressor> getDecompressorType() {
return BZip2DummyDecompressor.class;
}
@@ -190,6 +198,7 @@ public Class extends org.apache.hadoop.io.compress.Decompressor> getDecompress
*
* @return Decompressor
*/
+ @Override
public Decompressor createDecompressor() {
return new BZip2DummyDecompressor();
}
@@ -199,6 +208,7 @@ public Decompressor createDecompressor() {
*
* @return A String telling the default bzip2 file extension
*/
+ @Override
public String getDefaultExtension() {
return ".bz2";
}
@@ -226,6 +236,7 @@ private void writeStreamHeader() throws IOException {
}
}
+ @Override
public void finish() throws IOException {
if (needsReset) {
// In the case that nothing is written to this stream, we still need to
@@ -245,12 +256,14 @@ private void internalReset() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
// Cannot write to out at this point because out might not be ready
// yet, as in SequenceFile.Writer implementation.
needsReset = true;
}
+ @Override
public void write(int b) throws IOException {
if (needsReset) {
internalReset();
@@ -258,6 +271,7 @@ public void write(int b) throws IOException {
this.output.write(b);
}
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
if (needsReset) {
internalReset();
@@ -265,6 +279,7 @@ public void write(byte[] b, int off, int len) throws IOException {
this.output.write(b, off, len);
}
+ @Override
public void close() throws IOException {
if (needsReset) {
// In the case that nothing is written to this stream, we still need to
@@ -382,6 +397,7 @@ private BufferedInputStream readStreamHeader() throws IOException {
}// end of method
+ @Override
public void close() throws IOException {
if (!needsReset) {
input.close();
@@ -417,6 +433,7 @@ public void close() throws IOException {
*
*/
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
if (needsReset) {
internalReset();
@@ -440,6 +457,7 @@ public int read(byte[] b, int off, int len) throws IOException {
}
+ @Override
public int read() throws IOException {
byte b[] = new byte[1];
int result = this.read(b, 0, 1);
@@ -454,6 +472,7 @@ private void internalReset() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
// Cannot read from bufferedIn at this point because bufferedIn
// might not be ready
@@ -461,6 +480,7 @@ public void resetState() throws IOException {
needsReset = true;
}
+ @Override
public long getPos() {
return this.compressedStreamPosition;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
index 5d854861f2..434183bbc2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
@@ -78,6 +78,7 @@ public BlockCompressorStream(OutputStream out, Compressor compressor) {
* Each block contains the uncompressed length for the block, followed by
* one or more length-prefixed blocks of compressed data.
*/
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
// Sanity checks
if (compressor.finished()) {
@@ -132,6 +133,7 @@ public void write(byte[] b, int off, int len) throws IOException {
}
}
+ @Override
public void finish() throws IOException {
if (!compressor.finished()) {
rawWriteInt((int)compressor.getBytesRead());
@@ -142,6 +144,7 @@ public void finish() throws IOException {
}
}
+ @Override
protected void compress() throws IOException {
int len = compressor.compress(buffer, 0, buffer.length);
if (len > 0) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
index 42ade89019..7d2504e3e2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
@@ -65,6 +65,7 @@ protected BlockDecompressorStream(InputStream in) throws IOException {
super(in);
}
+ @Override
protected int decompress(byte[] b, int off, int len) throws IOException {
// Check if we are the beginning of a block
if (noUncompressedBytes == originalBlockSize) {
@@ -104,6 +105,7 @@ protected int decompress(byte[] b, int off, int len) throws IOException {
return n;
}
+ @Override
protected int getCompressedData() throws IOException {
checkStream();
@@ -126,6 +128,7 @@ protected int getCompressedData() throws IOException {
return len;
}
+ @Override
public void resetState() throws IOException {
originalBlockSize = 0;
noUncompressedBytes = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
index dc95e9e999..57fb366bdd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
@@ -75,6 +75,7 @@ private void addCodec(CompressionCodec codec) {
/**
* Print the extension map out as a string.
*/
+ @Override
public String toString() {
StringBuilder buf = new StringBuilder();
Iterator> itr =
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
index 4f7757dfed..4491819d72 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
@@ -55,6 +55,7 @@ protected CompressionInputStream(InputStream in) throws IOException {
this.in = in;
}
+ @Override
public void close() throws IOException {
in.close();
}
@@ -63,6 +64,7 @@ public void close() throws IOException {
* Read bytes from the stream.
* Made abstract to prevent leakage to underlying stream.
*/
+ @Override
public abstract int read(byte[] b, int off, int len) throws IOException;
/**
@@ -76,6 +78,7 @@ public void close() throws IOException {
*
* @return Current position in stream as a long
*/
+ @Override
public long getPos() throws IOException {
if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)){
//This way of getting the current position will not work for file
@@ -95,6 +98,7 @@ public long getPos() throws IOException {
* @throws UnsupportedOperationException
*/
+ @Override
public void seek(long pos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
@@ -104,6 +108,7 @@ public void seek(long pos) throws UnsupportedOperationException {
*
* @throws UnsupportedOperationException
*/
+ @Override
public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
index b4a47946b2..9bd6b84f98 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
@@ -44,11 +44,13 @@ protected CompressionOutputStream(OutputStream out) {
this.out = out;
}
+ @Override
public void close() throws IOException {
finish();
out.close();
}
+ @Override
public void flush() throws IOException {
out.flush();
}
@@ -57,6 +59,7 @@ public void flush() throws IOException {
* Write compressed bytes to the stream.
* Made abstract to prevent leakage to underlying stream.
*/
+ @Override
public abstract void write(byte[] b, int off, int len) throws IOException;
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
index 4cd7425ba6..84f1b2f179 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
@@ -59,6 +59,7 @@ protected CompressorStream(OutputStream out) {
super(out);
}
+ @Override
public void write(byte[] b, int off, int len) throws IOException {
// Sanity checks
if (compressor.finished()) {
@@ -83,6 +84,7 @@ protected void compress() throws IOException {
}
}
+ @Override
public void finish() throws IOException {
if (!compressor.finished()) {
compressor.finish();
@@ -92,10 +94,12 @@ public void finish() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
compressor.reset();
}
+ @Override
public void close() throws IOException {
if (!closed) {
finish();
@@ -105,6 +109,7 @@ public void close() throws IOException {
}
private byte[] oneByte = new byte[1];
+ @Override
public void write(int b) throws IOException {
oneByte[0] = (byte)(b & 0xff);
write(oneByte, 0, oneByte.length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
index d0ef6ee6d3..16e0ad763a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
@@ -66,11 +66,13 @@ protected DecompressorStream(InputStream in) throws IOException {
}
private byte[] oneByte = new byte[1];
+ @Override
public int read() throws IOException {
checkStream();
return (read(oneByte, 0, oneByte.length) == -1) ? -1 : (oneByte[0] & 0xff);
}
+ @Override
public int read(byte[] b, int off, int len) throws IOException {
checkStream();
@@ -163,11 +165,13 @@ protected void checkStream() throws IOException {
}
}
+ @Override
public void resetState() throws IOException {
decompressor.reset();
}
private byte[] skipBytes = new byte[512];
+ @Override
public long skip(long n) throws IOException {
// Sanity checks
if (n < 0) {
@@ -189,11 +193,13 @@ public long skip(long n) throws IOException {
return skipped;
}
+ @Override
public int available() throws IOException {
checkStream();
return (eof) ? 0 : 1;
}
+ @Override
public void close() throws IOException {
if (!closed) {
in.close();
@@ -201,13 +207,16 @@ public void close() throws IOException {
}
}
+ @Override
public boolean markSupported() {
return false;
}
+ @Override
public synchronized void mark(int readlimit) {
}
+ @Override
public synchronized void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index 1be28bfce3..ea7df20de3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -37,14 +37,17 @@ public class DefaultCodec implements Configurable, CompressionCodec {
Configuration conf;
+ @Override
public void setConf(Configuration conf) {
this.conf = conf;
}
+ @Override
public Configuration getConf() {
return conf;
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
// This may leak memory if called in a loop. The createCompressor() call
@@ -57,6 +60,7 @@ public CompressionOutputStream createOutputStream(OutputStream out)
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
@@ -64,20 +68,24 @@ public CompressionOutputStream createOutputStream(OutputStream out,
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public Class extends Compressor> getCompressorType() {
return ZlibFactory.getZlibCompressorType(conf);
}
+ @Override
public Compressor createCompressor() {
return ZlibFactory.getZlibCompressor(conf);
}
+ @Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return new DecompressorStream(in, createDecompressor(),
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
@@ -85,14 +93,17 @@ public CompressionInputStream createInputStream(InputStream in,
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public Class extends Decompressor> getDecompressorType() {
return ZlibFactory.getZlibDecompressorType(conf);
}
+ @Override
public Decompressor createDecompressor() {
return ZlibFactory.getZlibDecompressor(conf);
}
+ @Override
public String getDefaultExtension() {
return ".deflate";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index b17fe4b39e..520205e166 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -20,15 +20,11 @@
import java.io.*;
import java.util.zip.GZIPOutputStream;
-import java.util.zip.GZIPInputStream;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.zlib.*;
-import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
-import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
/**
* This class creates gzip compressors/decompressors.
@@ -66,32 +62,39 @@ protected GzipOutputStream(CompressorStream out) {
super(out);
}
+ @Override
public void close() throws IOException {
out.close();
}
+ @Override
public void flush() throws IOException {
out.flush();
}
+ @Override
public void write(int b) throws IOException {
out.write(b);
}
+ @Override
public void write(byte[] data, int offset, int length)
throws IOException {
out.write(data, offset, length);
}
+ @Override
public void finish() throws IOException {
((ResetableGZIPOutputStream) out).finish();
}
+ @Override
public void resetState() throws IOException {
((ResetableGZIPOutputStream) out).resetState();
}
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return (ZlibFactory.isNativeZlibLoaded(conf)) ?
@@ -100,6 +103,7 @@ public CompressionOutputStream createOutputStream(OutputStream out)
new GzipOutputStream(out);
}
+ @Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
@@ -110,23 +114,27 @@ public CompressionOutputStream createOutputStream(OutputStream out,
createOutputStream(out);
}
+ @Override
public Compressor createCompressor() {
return (ZlibFactory.isNativeZlibLoaded(conf))
? new GzipZlibCompressor(conf)
: null;
}
+ @Override
public Class extends Compressor> getCompressorType() {
return ZlibFactory.isNativeZlibLoaded(conf)
? GzipZlibCompressor.class
: null;
}
+ @Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return createInputStream(in, null);
}
+ @Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
@@ -137,18 +145,21 @@ public CompressionInputStream createInputStream(InputStream in,
conf.getInt("io.file.buffer.size", 4*1024));
}
+ @Override
public Decompressor createDecompressor() {
return (ZlibFactory.isNativeZlibLoaded(conf))
? new GzipZlibDecompressor()
: new BuiltInGzipDecompressor();
}
+ @Override
public Class extends Decompressor> getDecompressorType() {
return ZlibFactory.isNativeZlibLoaded(conf)
? GzipZlibDecompressor.class
: BuiltInGzipDecompressor.class;
}
+ @Override
public String getDefaultExtension() {
return ".gz";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
index 14cc9d5b82..00e892d845 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
@@ -338,6 +338,7 @@ private void changeStateToProcessABlock() throws IOException {
}
+ @Override
public int read() throws IOException {
if (this.in != null) {
@@ -372,6 +373,7 @@ public int read() throws IOException {
*/
+ @Override
public int read(final byte[] dest, final int offs, final int len)
throws IOException {
if (offs < 0) {
@@ -574,6 +576,7 @@ private void complete() throws IOException {
}
}
+ @Override
public void close() throws IOException {
InputStream inShadow = this.in;
if (inShadow != null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
index 3060eb924f..ca4e5cd0df 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
@@ -639,6 +639,7 @@ public CBZip2OutputStream(final OutputStream out, final int blockSize)
init();
}
+ @Override
public void write(final int b) throws IOException {
if (this.out != null) {
write0(b);
@@ -704,6 +705,7 @@ private void writeRun() throws IOException {
/**
* Overriden to close the stream.
*/
+ @Override
protected void finalize() throws Throwable {
finish();
super.finalize();
@@ -726,6 +728,7 @@ public void finish() throws IOException {
}
}
+ @Override
public void close() throws IOException {
if (out != null) {
OutputStream outShadow = this.out;
@@ -739,6 +742,7 @@ public void close() throws IOException {
}
}
+ @Override
public void flush() throws IOException {
OutputStream outShadow = this.out;
if (outShadow != null) {
@@ -849,6 +853,7 @@ public final int getBlockSize() {
return this.blockSize100k;
}
+ @Override
public void write(final byte[] buf, int offs, final int len)
throws IOException {
if (offs < 0) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
index 0cf65e5144..22a3118f5f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
@@ -258,6 +258,7 @@ public synchronized int getRemaining() {
return 0;
}
+ @Override
public synchronized void reset() {
finished = false;
compressedDirectBufLen = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
index baf864094e..4620092f08 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
@@ -257,6 +257,7 @@ public synchronized int getRemaining() {
return 0;
}
+ @Override
public synchronized void reset() {
finished = false;
compressedDirectBufLen = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
index 1e5525e743..41f8036fda 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
@@ -122,7 +122,7 @@ public BuiltInGzipDecompressor() {
// in the first buffer load? (But how else would one do it?)
}
- /** {@inheritDoc} */
+ @Override
public synchronized boolean needsInput() {
if (state == GzipStateLabel.DEFLATE_STREAM) { // most common case
return inflater.needsInput();
@@ -144,6 +144,7 @@ public synchronized boolean needsInput() {
* the bulk deflate stream, which is a performance hit we don't want
* to absorb. (Decompressor now documents this requirement.)
*/
+ @Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
@@ -175,6 +176,7 @@ public synchronized void setInput(byte[] b, int off, int len) {
* methods below), the deflate stream is never copied; Inflater operates
* directly on the user's buffer.
*/
+ @Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
int numAvailBytes = 0;
@@ -421,16 +423,17 @@ public synchronized long getBytesRead() {
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
+ @Override
public synchronized int getRemaining() {
return userBufLen;
}
- /** {@inheritDoc} */
+ @Override
public synchronized boolean needsDictionary() {
return inflater.needsDictionary();
}
- /** {@inheritDoc} */
+ @Override
public synchronized void setDictionary(byte[] b, int off, int len) {
inflater.setDictionary(b, off, len);
}
@@ -439,6 +442,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) {
* Returns true if the end of the gzip substream (single "member") has been
* reached.
*/
+ @Override
public synchronized boolean finished() {
return (state == GzipStateLabel.FINISHED);
}
@@ -447,6 +451,7 @@ public synchronized boolean finished() {
* Resets everything, including the input buffer, regardless of whether the
* current gzip substream is finished.
*/
+ @Override
public synchronized void reset() {
// could optionally emit INFO message if state != GzipStateLabel.FINISHED
inflater.reset();
@@ -463,7 +468,7 @@ public synchronized void reset() {
hasHeaderCRC = false;
}
- /** {@inheritDoc} */
+ @Override
public synchronized void end() {
inflater.end();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
index b269d557b7..509456e834 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
@@ -48,6 +48,7 @@ public BuiltInZlibDeflater() {
super();
}
+ @Override
public synchronized int compress(byte[] b, int off, int len)
throws IOException {
return super.deflate(b, off, len);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
index 0223587ad0..4fda6723b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
@@ -39,6 +39,7 @@ public BuiltInZlibInflater() {
super();
}
+ @Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
try {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
index 8839bc98fa..c0d0d699a5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
@@ -259,6 +259,7 @@ public synchronized void reinit(Configuration conf) {
}
}
+ @Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b== null) {
throw new NullPointerException();
@@ -287,6 +288,7 @@ synchronized void setInputFromSavedData() {
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
+ @Override
public synchronized void setDictionary(byte[] b, int off, int len) {
if (stream == 0 || b == null) {
throw new NullPointerException();
@@ -297,6 +299,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) {
setDictionary(stream, b, off, len);
}
+ @Override
public synchronized boolean needsInput() {
// Consume remaining compressed data?
if (compressedDirectBuf.remaining() > 0) {
@@ -325,16 +328,19 @@ public synchronized boolean needsInput() {
return false;
}
+ @Override
public synchronized void finish() {
finish = true;
}
+ @Override
public synchronized boolean finished() {
// Check if 'zlib' says its 'finished' and
// all compressed data has been consumed
return (finished && compressedDirectBuf.remaining() == 0);
}
+ @Override
public synchronized int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
@@ -385,6 +391,7 @@ public synchronized int compress(byte[] b, int off, int len)
*
* @return the total (non-negative) number of compressed bytes output so far
*/
+ @Override
public synchronized long getBytesWritten() {
checkStream();
return getBytesWritten(stream);
@@ -395,11 +402,13 @@ public synchronized long getBytesWritten() {
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
+ @Override
public synchronized long getBytesRead() {
checkStream();
return getBytesRead(stream);
}
+ @Override
public synchronized void reset() {
checkStream();
reset(stream);
@@ -413,6 +422,7 @@ public synchronized void reset() {
userBufOff = userBufLen = 0;
}
+ @Override
public synchronized void end() {
if (stream != 0) {
end(stream);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
index 2db70551e8..ba67571998 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
@@ -118,6 +118,7 @@ public ZlibDecompressor() {
this(CompressionHeader.DEFAULT_HEADER, DEFAULT_DIRECT_BUFFER_SIZE);
}
+ @Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
@@ -154,6 +155,7 @@ synchronized void setInputFromSavedData() {
userBufLen -= compressedDirectBufLen;
}
+ @Override
public synchronized void setDictionary(byte[] b, int off, int len) {
if (stream == 0 || b == null) {
throw new NullPointerException();
@@ -165,6 +167,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) {
needDict = false;
}
+ @Override
public synchronized boolean needsInput() {
// Consume remaining compressed data?
if (uncompressedDirectBuf.remaining() > 0) {
@@ -184,16 +187,19 @@ public synchronized boolean needsInput() {
return false;
}
+ @Override
public synchronized boolean needsDictionary() {
return needDict;
}
+ @Override
public synchronized boolean finished() {
// Check if 'zlib' says it's 'finished' and
// all compressed data has been consumed
return (finished && uncompressedDirectBuf.remaining() == 0);
}
+ @Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
@@ -255,6 +261,7 @@ public synchronized long getBytesRead() {
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
+ @Override
public synchronized int getRemaining() {
checkStream();
return userBufLen + getRemaining(stream); // userBuf + compressedDirectBuf
@@ -263,6 +270,7 @@ public synchronized int getRemaining() {
/**
* Resets everything including the input buffers (user and direct).
*/
+ @Override
public synchronized void reset() {
checkStream();
reset(stream);
@@ -274,6 +282,7 @@ public synchronized void reset() {
userBufOff = userBufLen = 0;
}
+ @Override
public synchronized void end() {
if (stream != 0) {
end(stream);
@@ -281,6 +290,7 @@ public synchronized void end() {
}
}
+ @Override
protected void finalize() {
end();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
index 6b4fdd89aa..ce93266574 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
@@ -300,6 +300,7 @@ public Writer(FSDataOutputStream fout, String compressionName,
* Close the BCFile Writer. Attempting to use the Writer after calling
* deserializer)
this.deserializer.open(buffer);
}
+ @Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java
index 61d6f171c9..f08d0008c6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java
@@ -24,11 +24,8 @@
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.Serializable;
-import java.util.Map;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.RawComparator;
/**
*
@@ -45,6 +42,7 @@ static class JavaSerializationDeserializer
private ObjectInputStream ois;
+ @Override
public void open(InputStream in) throws IOException {
ois = new ObjectInputStream(in) {
@Override protected void readStreamHeader() {
@@ -53,6 +51,7 @@ public void open(InputStream in) throws IOException {
};
}
+ @Override
@SuppressWarnings("unchecked")
public T deserialize(T object) throws IOException {
try {
@@ -63,6 +62,7 @@ public T deserialize(T object) throws IOException {
}
}
+ @Override
public void close() throws IOException {
ois.close();
}
@@ -74,6 +74,7 @@ static class JavaSerializationSerializer
private ObjectOutputStream oos;
+ @Override
public void open(OutputStream out) throws IOException {
oos = new ObjectOutputStream(out) {
@Override protected void writeStreamHeader() {
@@ -82,27 +83,32 @@ public void open(OutputStream out) throws IOException {
};
}
+ @Override
public void serialize(Serializable object) throws IOException {
oos.reset(); // clear (class) back-references
oos.writeObject(object);
}
+ @Override
public void close() throws IOException {
oos.close();
}
}
+ @Override
@InterfaceAudience.Private
public boolean accept(Class> c) {
return Serializable.class.isAssignableFrom(c);
}
+ @Override
@InterfaceAudience.Private
public Deserializer getDeserializer(Class c) {
return new JavaSerializationDeserializer();
}
+ @Override
@InterfaceAudience.Private
public Serializer getSerializer(Class c) {
return new JavaSerializationSerializer();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java
index 12927bea14..f9bf692f1f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java
@@ -44,6 +44,7 @@ public JavaSerializationComparator() throws IOException {
super(new JavaSerialization.JavaSerializationDeserializer());
}
+ @Override
@InterfaceAudience.Private
public int compare(T o1, T o2) {
return o1.compareTo(o2);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java
index 8511d25bcd..ad965d6b2f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java
@@ -23,8 +23,6 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.util.Map;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java
index 1d5c068886..f340cb3a98 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java
@@ -47,11 +47,13 @@ public abstract class AvroSerialization extends Configured
@InterfaceAudience.Private
public static final String AVRO_SCHEMA_KEY = "Avro-Schema";
+ @Override
@InterfaceAudience.Private
public Deserializer getDeserializer(Class c) {
return new AvroDeserializer(c);
}
+ @Override
@InterfaceAudience.Private
public Serializer getSerializer(Class c) {
return new AvroSerializer(c);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index b0f5c93f75..de7af1b6b0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -364,6 +364,7 @@ private void handleTimeout(SocketTimeoutException e) throws IOException {
* until a byte is read.
* @throws IOException for any IO problem other than socket timeout
*/
+ @Override
public int read() throws IOException {
do {
try {
@@ -380,6 +381,7 @@ public int read() throws IOException {
*
* @return the total number of bytes read; -1 if the connection is closed.
*/
+ @Override
public int read(byte[] buf, int off, int len) throws IOException {
do {
try {
@@ -510,6 +512,7 @@ private synchronized void handleSaslConnectionFailure(
final Random rand, final UserGroupInformation ugi) throws IOException,
InterruptedException {
ugi.doAs(new PrivilegedExceptionAction